├── .catwatch.yaml ├── .coveragerc ├── .gitignore ├── .travis.yml ├── .zappr.yaml ├── Dockerfile ├── LICENSE ├── MAINTAINERS ├── MANIFEST.in ├── README.rst ├── delivery.yaml ├── examples ├── account-values.yaml ├── autoscaling.yaml ├── cross-stack-output-refs.yaml ├── elastigroup.yaml ├── minimal-sqs-queue.yaml ├── passing-a-stage.yaml ├── rds-database.yaml └── timed-startup-and-shutdown.yaml ├── next-version ├── pylintrc ├── requirements.txt ├── senza ├── __init__.py ├── __main__.py ├── arguments.py ├── aws.py ├── cli.py ├── components │ ├── __init__.py │ ├── auto_scaling_group.py │ ├── configuration.py │ ├── coreos_auto_configuration.py │ ├── elastic_load_balancer.py │ ├── elastic_load_balancer_v2.py │ ├── elastigroup.py │ ├── iam_role.py │ ├── redis_cluster.py │ ├── redis_node.py │ ├── stups_auto_configuration.py │ ├── subnet_auto_configuration.py │ ├── taupage_auto_scaling_group.py │ ├── weighted_dns_elastic_load_balancer.py │ └── weighted_dns_elastic_load_balancer_v2.py ├── configuration.py ├── definitions.py ├── docker.py ├── error_handling.py ├── exceptions.py ├── manaus │ ├── __init__.py │ ├── acm.py │ ├── boto_proxy.py │ ├── cloudformation.py │ ├── ec2.py │ ├── elb.py │ ├── exceptions.py │ ├── iam.py │ ├── route53.py │ └── utils.py ├── patch.py ├── respawn.py ├── spotinst │ ├── __init__.py │ └── components │ │ ├── __init__.py │ │ └── elastigroup_api.py ├── stack_references.py ├── stups │ ├── __init__.py │ ├── piu.py │ └── taupage.py ├── subcommands │ ├── __init__.py │ ├── config.py │ └── root.py ├── templates │ ├── __init__.py │ ├── _helper.py │ ├── bgapp.py │ ├── postgresapp.py │ ├── rediscluster.py │ ├── redisnode.py │ └── webapp.py ├── traffic.py └── utils.py ├── setup-meta.py ├── setup.py ├── tests ├── fixtures.py ├── test_autoscaling.py ├── test_aws.py ├── test_cli.py ├── test_components.py ├── test_configuration.py ├── test_docker.py ├── test_elastigroup.py ├── test_elastigroup_api.py ├── test_error_handling.py ├── test_manaus │ ├── test_acm.py │ ├── test_boto_proxy.py │ ├── test_cloudformation.py │ ├── test_ec2.py │ ├── test_elb.py │ ├── test_iam.py │ └── test_route53.py ├── test_patch.py ├── test_respawn.py ├── test_stups │ └── test_piu.py ├── test_subcommands │ ├── __init__.py │ ├── test_config.py │ └── test_root.py ├── test_templates.py ├── test_traffic.py └── test_utils.py └── tox.ini /.catwatch.yaml: -------------------------------------------------------------------------------- 1 | title: Senza 2 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [report] 2 | exclude_lines = 3 | pragma: no cover 4 | def __repr__ 5 | def __str__ 6 | omit = 7 | senza/__main__.py 8 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IntelliJ IDEA 2 | /.idea 3 | *.iml 4 | 5 | # test yaml files 6 | /*.yaml 7 | 8 | # Python 9 | *.pyc 10 | *.egg* 11 | coverage.xml 12 | junit.xml 13 | .coverage 14 | dist/ 15 | build/ 16 | htmlcov/ 17 | .cache/ 18 | 19 | # Vi 20 | *.sw* 21 | .tox/ 22 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.8" 4 | install: 5 | - pip install -U setuptools && pip install tox tox-travis coveralls 6 | script: 7 | - tox 8 | after_success: 9 | - coveralls 10 | -------------------------------------------------------------------------------- /.zappr.yaml: -------------------------------------------------------------------------------- 1 | X-Zalando-Team: automata 2 | X-Zalando-Type: code 3 | 4 | # Every PR needs two approvals from Zalando employees. 5 | approvals: 6 | groups: 7 | zalando: 8 | minimum: 2 9 | from: 10 | orgs: 11 | - zalando 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # Hack to upload version to Pypi 2 | 3 | FROM registry.opensource.zalan.do/stups/python AS builder 4 | ARG VERSION 5 | RUN apt-get update && \ 6 | apt-get install -q -y python3-pip && \ 7 | pip3 install -U tox setuptools 8 | COPY . /build 9 | WORKDIR /build 10 | RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" */__init__.py 11 | RUN python3 setup.py sdist bdist_wheel 12 | 13 | FROM pierone.stups.zalan.do/teapot/python-cdp-release:latest 14 | COPY --from=builder /build/dist /pydist 15 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2015 Zalando SE 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | Team Automata 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt 2 | include *.rst 3 | recursive-include senza *.py 4 | -------------------------------------------------------------------------------- /delivery.yaml: -------------------------------------------------------------------------------- 1 | version: "2017-09-20" 2 | notifications: 3 | hipchat: 4 | rooms: 5 | - cdp-notifications-test 6 | pipeline: 7 | - id: build 8 | type: script 9 | vm_config: 10 | type: linux 11 | image: cdp-runtime/python-3.9 12 | commands: 13 | - desc: "Install dependencies" 14 | cmd: pip install -r requirements.txt 15 | - desc: "Run Tests" 16 | cmd: python3 setup.py test 17 | - desc: "Check code style" 18 | cmd: python3 setup.py flake8 19 | - desc: "Build docker image that will upload package" 20 | cmd: | 21 | VERSION=$(./next-version) 22 | 23 | if [[ -z "${CDP_PULL_REQUEST_NUMBER}" ]]; then 24 | DOCKER_IMAGE="pierone.stups.zalan.do/automata/senza-release:${CDP_TARGET_REPOSITORY_COUNTER}" 25 | else 26 | DOCKER_IMAGE="pierone.stups.zalan.do/automata/senza-release-pr:snapshot" 27 | fi 28 | 29 | docker build --build-arg VERSION="$VERSION" -t "$DOCKER_IMAGE" . 30 | 31 | if [[ -z "${CDP_PULL_REQUEST_NUMBER}" ]]; then 32 | docker push "$DOCKER_IMAGE" 33 | git log -1 --pretty=%B > CHANGELOG 34 | # TODO upload the wheel package 35 | git gh-release --message-from-file CHANGELOG $VERSION 36 | fi 37 | # The actual release is done by a pipeline in Zalando's Internal Github Enterprise 38 | -------------------------------------------------------------------------------- /examples/account-values.yaml: -------------------------------------------------------------------------------- 1 | 2 | # basic information for generating and executing this definition 3 | SenzaInfo: 4 | StackName: hello-world 5 | Parameters: 6 | - ApplicationId: 7 | Description: "Application ID from kio( -hello-world )" 8 | Default: "{{AccountInfo.TeamID}}-hello-world" 9 | - ImageVersion: 10 | Description: "Docker image version of hello-world." 11 | Default: 2 12 | - MintBucket: 13 | Description: "Mint bucket for your team" 14 | Default: "mintbucket-{{AccountInfo.AccountID}}-{{AccountInfo.Region}}" 15 | 16 | # a list of senza components to apply to the definition 17 | SenzaComponents: 18 | 19 | # this basic configuration is required for the other components 20 | - Configuration: 21 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 22 | 23 | # will create a launch configuration and auto scaling group with scaling triggers 24 | - AppServer: 25 | Type: Senza::TaupageAutoScalingGroup 26 | InstanceType: t2.micro 27 | SecurityGroups: 28 | - app-{{Arguments.ApplicationId}} 29 | IamRoles: 30 | - app-{{Arguments.ApplicationId}} 31 | AssociatePublicIpAddress: false # change for standalone deployment in default VPC 32 | TaupageConfig: 33 | application_version: "{{Arguments.ImageVersion}}" 34 | runtime: Docker 35 | source: "stups/hello-world:{{Arguments.ImageVersion}}" 36 | mint_bucket: "{{Arguments.MintBucket}}" 37 | domain: "{{AccountInfo.Domain}}" 38 | -------------------------------------------------------------------------------- /examples/autoscaling.yaml: -------------------------------------------------------------------------------- 1 | SenzaInfo: 2 | StackName: hello-world 3 | StackVersion: one 4 | 5 | SenzaComponents: 6 | - Configuration: 7 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 8 | 9 | - AppServer: 10 | Type: Senza::TaupageAutoScalingGroup 11 | InstanceType: t2.micro 12 | IamRoles: 13 | - app-hello-world 14 | TaupageConfig: 15 | ports: 16 | 8080: 8080 17 | application_version: "one" 18 | runtime: Docker 19 | source: "stups/hello-world:one" 20 | AssociatePublicIpAddress: true 21 | AutoScaling: 22 | # also possible: CPU, NetworkOut 23 | MetricType: NetworkIn 24 | # at least two instances 25 | Minimum: 2 26 | # at most sixteen instances 27 | Maximum: 16 28 | # successful startup requirement for the ASG: 29 | # if there are not at least two healthy instances 30 | # within 5 minutes timeout, the ASG creation is 31 | # considered failed. 32 | # default instance count is 1 33 | # default timeout is 15 minutes 34 | SuccessRequires: "2 within 5m" 35 | # scale two instances up/down at a time (if not overwritten by scale up/down specififc value) 36 | ScalingAdjustment: 2 37 | # scale three instances up at a time (if set, it overwrites the ScalingAdjustment value for scaling up) 38 | ScaleUpAdjustment: 3 39 | # scale one instance down at a time (if set, it overwrites the ScalingAdjustment value for scaling down) 40 | ScaleDownAdjustment: 1 41 | # after scaling, suspend further scaling activities 42 | # for 30 minutes (1800 seconds) 43 | # Can be overwritten by scale up/down specific values 44 | Cooldown: 1800 45 | # If set it overwrites the Cooldown value for scaling up 46 | ScaleUpCooldown: 300 47 | # If set it overwrites the Cooldown value for scaling down 48 | ScaleDownCooldown: 3600 49 | # time over which the threshold is measured in seconds 50 | Period: 300 51 | # how many periods the threshold must be hit before scaling 52 | # this will not scale earlier than 10 mins (2 * 300s) 53 | EvaluationPeriods: 2 54 | # will scale up once >= 20 GB inbound traffic 55 | # are hit over 10 mins on average (evaluation periods * period) 56 | ScaleUpThreshold: "20 GB" 57 | # will scale down once <= 1 GB inbound traffic 58 | # is hit over 10 mins on average (evaluation periods * period) 59 | ScaleDownThreshold: "1 GB" 60 | -------------------------------------------------------------------------------- /examples/cross-stack-output-refs.yaml: -------------------------------------------------------------------------------- 1 | SenzaInfo: 2 | StackName: hello-world 3 | StackVersion: one 4 | 5 | SenzaComponents: 6 | - Configuration: 7 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 8 | 9 | - AppServer: 10 | Type: Senza::TaupageAutoScalingGroup 11 | InstanceType: t2.micro 12 | IamRoles: 13 | - app-hello-world 14 | TaupageConfig: 15 | ports: 16 | 8080: 8080 17 | application_version: "one" 18 | runtime: Docker 19 | source: "stups/hello-world:one" 20 | environment: 21 | DB_HOST: 22 | Stack: hello-world-database-1 # see rds-database.yaml 23 | Output: DatabaseHost 24 | -------------------------------------------------------------------------------- /examples/elastigroup.yaml: -------------------------------------------------------------------------------- 1 | SenzaInfo: 2 | StackName: hello-world 3 | StackVersion: latest 4 | SpotinstAccessToken: <5da...dc7> # Token created using the SpotInst console for the target account 5 | 6 | SenzaComponents: 7 | - Configuration: 8 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 9 | 10 | - AppServerElastigroup: 11 | Type: Senza::Elastigroup 12 | InstanceType: m3.large 13 | SpotAlternatives: 14 | - m3.large 15 | - m4.large 16 | - c3.large 17 | - c4.large 18 | SecurityGroups: default # for this to work make sure the default security group allows ingress on TCP/8000 19 | ElasticLoadBalancerV2: AppLoadBalancer 20 | TaupageConfig: 21 | runtime: Docker 22 | source: "crccheck/hello-world:latest" 23 | health_check_path: / 24 | ports: 25 | 8000: 8000 26 | 27 | - AppLoadBalancer: 28 | Type: Senza::WeightedDnsElasticLoadBalancerV2 29 | HTTPPort: 8000 30 | HealthCheckPath: / 31 | SecurityGroups: 32 | - default # for this to work make sure the default security group allows ingress on TCP/443 33 | Scheme: internal 34 | 35 | -------------------------------------------------------------------------------- /examples/minimal-sqs-queue.yaml: -------------------------------------------------------------------------------- 1 | Description: "A minimal Cloud Formation stack creating a SQS queue" 2 | SenzaInfo: 3 | StackName: example 4 | Resources: 5 | MyQueue: 6 | Type: AWS::SQS::Queue 7 | -------------------------------------------------------------------------------- /examples/passing-a-stage.yaml: -------------------------------------------------------------------------------- 1 | 2 | # this is a senza.yaml describing a Location Service Demo (LSD) backend server. 3 | # use it as an example for passing a STAGE environment to your application (see 4 | # NOTEs below). to pass the stage to your application from your deployment, use 5 | # senza create Stage=, where 6 | # should be something like LIVE, STAGING, and so on. 7 | # example: senza create lsd.yaml 21 0.21 Stage=STAGING. 8 | 9 | # NOTE the Stage: section passing a stage parameter to the Taupage AMI. 10 | 11 | SenzaInfo: 12 | StackName: lsd 13 | Parameters: 14 | - ImageVersion: 15 | Description: "Docker image version of lsd." 16 | - Stage: 17 | Description: "Stage of the application (live, staging, ...)" 18 | 19 | # NOTE the environment: section under TaupageConfig: which passes the 20 | # argument of senza (create) to Taupage which turns it into a 21 | # runtime UNIX/LINUX environment variable which can be retrieved from 22 | # within your application, e.g. in scala as sys.env.get("STAGE"). 23 | 24 | SenzaComponents: 25 | 26 | # this basic configuration is required for the other components 27 | - Configuration: 28 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 29 | 30 | # will create a launch configuration and auto scaling group with scaling triggers 31 | - AppServer: 32 | Type: Senza::TaupageAutoScalingGroup 33 | Minimum: 0 34 | Maximum: 1 35 | InstanceType: t2.micro 36 | SecurityGroups: 37 | - app-lsd 38 | IamRoles: 39 | - app-lsd 40 | ElasticLoadBalancer: AppLoadBalancer 41 | AssociatePublicIpAddress: false # change for standalone deployment in default VPC 42 | TaupageConfig: 43 | application_version: "{{Arguments.ImageVersion}}" 44 | runtime: Docker 45 | # NOTE change "/mentoring/lsd" to your team/application path 46 | source: "pierone.stups.zalan.do/mentoring/lsd:{{Arguments.ImageVersion}}" 47 | health_check_path: / 48 | ports: 49 | 8080: 9000 50 | # NOTE replace with your real mint bucket name 51 | mint_bucket: "" 52 | enhanced_cloudwatch_metrics: true 53 | # NOTE UNIX/LINUX environment vars created from senza create command line: 54 | environment: 55 | STAGE: "{{Arguments.Stage}}" 56 | 57 | # creates an ELB entry and Route53 domains to this ELB 58 | - AppLoadBalancer: 59 | Type: Senza::WeightedDnsElasticLoadBalancer 60 | HTTPPort: 8080 61 | HealthCheckPath: / 62 | SecurityGroups: 63 | - app-lsd-lb 64 | Scheme: internet-facing 65 | -------------------------------------------------------------------------------- /examples/rds-database.yaml: -------------------------------------------------------------------------------- 1 | SenzaInfo: 2 | StackName: hello-world-database 3 | Resources: 4 | Database: 5 | Type: AWS::RDS::DBInstance 6 | Properties: 7 | DBName: hello_world_db 8 | Engine: postgres 9 | EngineVersion: 9.4.7 10 | MasterUsername: postgres 11 | MasterUserPassword: postgres 12 | AllocatedStorage: "100" 13 | DeletionPolicy: Snapshot 14 | Outputs: 15 | DatabaseHost: 16 | Value: 17 | "Fn::GetAtt": [Database, Endpoint.Address] -------------------------------------------------------------------------------- /examples/timed-startup-and-shutdown.yaml: -------------------------------------------------------------------------------- 1 | 2 | # this is a senza.yaml describing a Location Service Demo (LSD) backend server. 3 | # use it as an example for scheduled shutdown and startup of your application 4 | # instances in a well known crontab (http://en.wikipedia.org/wiki/Cron) manner. 5 | # NOTE that Minimum: and Maximum: in your AppServer: section should conform to 6 | # the uppper and lower bound of your ScaleDownAction and ScaleUpAction. 7 | 8 | # search for NOTE within this document in order to navigate to the important spots 9 | 10 | SenzaInfo: 11 | StackName: lsd 12 | Parameters: 13 | - ImageVersion: 14 | Description: "Docker image version of lsd." 15 | 16 | SenzaComponents: 17 | 18 | # this basic configuration is required for the other components 19 | - Configuration: 20 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 21 | 22 | # will create a launch configuration and auto scaling group with scaling triggers 23 | - AppServer: 24 | Type: Senza::TaupageAutoScalingGroup 25 | # NOTE to set Minimum and Maximum according to your ScaleUp- and ScaleDownAction 26 | Minimum: 0 27 | Maximum: 1 28 | InstanceType: t2.micro 29 | SecurityGroups: 30 | - app-lsd 31 | IamRoles: 32 | - app-lsd 33 | ElasticLoadBalancer: AppLoadBalancer 34 | AssociatePublicIpAddress: false # change for standalone deployment in default VPC 35 | TaupageConfig: 36 | application_version: "{{Arguments.ImageVersion}}" 37 | runtime: Docker 38 | # NOTE replace pierone.stups.zalan.do with your docker registry (if you are 39 | # not working at zalando ;-) and /mentoring/lsd with your real team/app path 40 | source: "pierone.stups.zalan.do/mentoring/lsd:{{Arguments.ImageVersion}}" 41 | health_check_path: / 42 | ports: 43 | 8080: 9000 44 | # NOTE replace this mint_bucket value with your real mint bucket 45 | mint_bucket: "zalando-stups-mint-123456789012-eu-west-1" 46 | 47 | # creates an ELB entry and Route53 domains to this ELB 48 | - AppLoadBalancer: 49 | Type: Senza::WeightedDnsElasticLoadBalancer 50 | HTTPPort: 8080 51 | HealthCheckPath: / 52 | SecurityGroups: 53 | - app-lsd-lb 54 | Scheme: internet-facing 55 | 56 | # NOTE this is the interesting part. per default, scale down to 0 instances (i.e., shut down) 57 | # at 7pm UTC (which is 8pm GMT+1), scale up to 1 instance again at 7am UTC (which is 8am GMT+1), 58 | # but only from monday till friday (1-5). adjust to your needs and keep in mind to configure a 59 | # reasonable offset for CET and CEST; the below configuration for example will startup your 60 | # instances at 9am and shutdown at 9pm in central european summer, which might still be ok. 61 | 62 | Resources: 63 | 64 | OfficeHourScaleDownAction: 65 | Type: AWS::AutoScaling::ScheduledAction 66 | Properties: 67 | AutoScalingGroupName: 68 | Ref: AppServer 69 | MinSize: 0 70 | MaxSize: 0 71 | Recurrence: "00 19 * * *" 72 | 73 | OfficeHourScaleUpAction: 74 | Type: AWS::AutoScaling::ScheduledAction 75 | Properties: 76 | AutoScalingGroupName: 77 | Ref: AppServer 78 | MinSize: 1 79 | MaxSize: 1 80 | Recurrence: "00 07 * * 1-5" 81 | -------------------------------------------------------------------------------- /next-version: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import subprocess 4 | 5 | MAJOR_VERSION = 2 6 | MINOR_VERSION = 1 7 | 8 | 9 | def get_latest_version() -> (int, int, int): 10 | """ 11 | Gets latest version based on Git Tags. 12 | """ 13 | proc = subprocess.run(['git', 'tag'], stdout=subprocess.PIPE) 14 | 15 | versions = sorted(map(lambda version: tuple(int(sub) 16 | for sub 17 | in version.split('.')), 18 | proc.stdout.decode().splitlines())) 19 | return versions[-1] 20 | 21 | 22 | if __name__ == '__main__': 23 | major, minor, build = get_latest_version() 24 | 25 | if major != MAJOR_VERSION or minor != MINOR_VERSION: 26 | new_build = 0 27 | else: 28 | new_build = build + 1 29 | 30 | print(f"{MAJOR_VERSION}.{MINOR_VERSION}.{new_build}") 31 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | requests 2 | arrow 3 | clickclick>=1.0 4 | pystache 5 | PyYAML 6 | dnspython>=1.15.0 7 | stups-pierone>=1.0.34 8 | boto3>=1.3.0 9 | botocore>=1.4.10 10 | pytest>=3.6.3 11 | raven 12 | typing 13 | -------------------------------------------------------------------------------- /senza/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Deploy immutable application stacks and create and execute AWS CloudFormation 3 | templates in a sane way 4 | """ 5 | 6 | __version__ = '0.91' 7 | -------------------------------------------------------------------------------- /senza/__main__.py: -------------------------------------------------------------------------------- 1 | import senza.cli 2 | 3 | if __name__ == '__main__': 4 | senza.cli.main() 5 | -------------------------------------------------------------------------------- /senza/arguments.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions and decorators related to command line arguments 3 | """ 4 | 5 | # invalid-name is disabled to match the style of other click options 6 | # pylint: disable=locally-disabled, invalid-name 7 | import boto3.session 8 | import click 9 | import configparser 10 | import os 11 | 12 | from .error_handling import HandleExceptions 13 | 14 | 15 | def get_region(region): 16 | """ 17 | Ensure region value. 18 | If region is not provided, get one from the config file. 19 | Raise error if still no region is set at the end. 20 | """ 21 | if not region: 22 | config = configparser.ConfigParser() 23 | try: 24 | config.read(os.path.expanduser("~/.aws/config")) 25 | if "default" in config: 26 | region = config["default"]["region"] 27 | except Exception: 28 | pass 29 | 30 | if not region: 31 | raise click.UsageError( 32 | "Please specify the AWS region on the " 33 | "command line (--region) or in ~/.aws/config" 34 | ) 35 | 36 | return region 37 | 38 | 39 | def validate_region(ctx, param, value): # pylint: disable=locally-disabled, unused-argument 40 | """Validate Click region parameter.""" 41 | 42 | value = get_region(value) # ensure region is set 43 | 44 | session = boto3.session.Session() 45 | valid_regions = session.get_available_regions('cloudformation') 46 | if value not in valid_regions: 47 | valid_regions.sort() 48 | raise click.BadParameter("'{}'. Region must be one of the " 49 | "following AWS regions:\n" 50 | " - {}".format(value, 51 | "\n - ".join(valid_regions))) 52 | 53 | return value 54 | 55 | 56 | def set_stacktrace_visible(ctx, param, value): # pylint: disable=locally-disabled, unused-argument 57 | """ 58 | Callback to define whether to display the stacktrace in case of an 59 | unhandled error. 60 | """ 61 | HandleExceptions.stacktrace_visible = value 62 | 63 | 64 | region_option = click.option('--region', 65 | envvar='AWS_DEFAULT_REGION', 66 | metavar='AWS_REGION_ID', 67 | help='AWS region ID (e.g. eu-west-1)', 68 | callback=validate_region) 69 | 70 | parameter_file_option = click.option('--parameter-file', 71 | help='Config file for params', 72 | metavar='PATH') 73 | 74 | output_option = click.option('-o', '--output', 75 | type=click.Choice(['text', 'json', 'tsv']), 76 | default='text', 77 | help='Use alternative output format') 78 | 79 | json_output_option = click.option('-o', '--output', 80 | type=click.Choice(['json', 'yaml']), 81 | default='json', 82 | help='Use alternative output format') 83 | 84 | stacktrace_visible_option = click.option('--stacktrace-visible', 85 | is_flag=True, 86 | callback=set_stacktrace_visible, 87 | expose_value=False, 88 | help='Show stack trace instead of ' 89 | 'storing it') 90 | 91 | watch_option = click.option('-W', 92 | is_flag=True, 93 | help='Auto update the screen every 2 seconds') 94 | 95 | watchrefresh_option = click.option('-w', '--watch', 96 | type=click.IntRange(1, 300), 97 | metavar='SECS', 98 | help='Auto update the screen every X seconds') 99 | 100 | field_option = click.option('--field', 101 | '-f', 102 | metavar='NAME', 103 | multiple=True, 104 | help='Specify field to be returned') 105 | -------------------------------------------------------------------------------- /senza/components/__init__.py: -------------------------------------------------------------------------------- 1 | import importlib 2 | 3 | from senza.utils import camel_case_to_underscore, pystache_render 4 | 5 | 6 | def get_component(componenttype: str): 7 | '''Get component function by type name (e.g. "Senza::MyComponent")''' 8 | 9 | prefix, _, componenttype = componenttype.partition('::') 10 | root_package = camel_case_to_underscore(prefix) 11 | module_name = camel_case_to_underscore(componenttype) 12 | try: 13 | module = importlib.import_module('{}.components.{}'.format(root_package, module_name)) 14 | except ImportError: 15 | # component (module) not found 16 | return None 17 | function_name = 'component_{}'.format(module_name) 18 | return getattr(module, function_name) 19 | 20 | 21 | def evaluate_template(template, info, components, args, account_info): 22 | data = {"SenzaInfo": info, 23 | "SenzaComponents": components, 24 | "Arguments": args, 25 | "AccountInfo": account_info} 26 | result = pystache_render(template, data) 27 | return result 28 | -------------------------------------------------------------------------------- /senza/components/configuration.py: -------------------------------------------------------------------------------- 1 | 2 | from senza.utils import ensure_keys, named_value 3 | 4 | 5 | def format_params(args): 6 | items = [(key, val) for key, val in sorted(args.__dict__.items()) if key not in ('region', 'version')] 7 | return ', '.join(['{}: {}'.format(key, val) for key, val in items]) 8 | 9 | 10 | def get_default_description(info, args): 11 | return '{} ({})'.format(info['StackName'].title().replace('-', ' '), format_params(args)) 12 | 13 | 14 | def component_configuration(definition, configuration, args, info, force, account_info): 15 | # define parameters 16 | # http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/parameters-section-structure.html 17 | if "Parameters" in info and configuration.get('DefineParameters', True): 18 | definition = ensure_keys(definition, "Parameters") 19 | default_parameter = { 20 | "Type": "String" 21 | } 22 | for parameter in info["Parameters"]: 23 | name, value = named_value(parameter) 24 | value_default = default_parameter.copy() 25 | value_default.update(value) 26 | definition["Parameters"][name] = value_default 27 | 28 | if 'Description' not in definition: 29 | # set some sane default stack description 30 | # we need to truncate at 1024 chars (should be Bytes actually) 31 | # see http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/template-description-structure.html 32 | definition['Description'] = get_default_description(info, args)[:1024] 33 | 34 | # ServerSubnets 35 | for region, subnets in configuration.get('ServerSubnets', {}).items(): 36 | definition = ensure_keys(definition, "Mappings", "ServerSubnets", region) 37 | definition["Mappings"]["ServerSubnets"][region]["Subnets"] = subnets 38 | 39 | # LoadBalancerSubnets 40 | for region, subnets in configuration.get('LoadBalancerSubnets', {}).items(): 41 | definition = ensure_keys(definition, "Mappings", "LoadBalancerSubnets", region) 42 | definition["Mappings"]["LoadBalancerSubnets"][region]["Subnets"] = subnets 43 | 44 | # LoadBalancerInternalSubnets 45 | for region, subnets in configuration.get('LoadBalancerInternalSubnets', {}).items(): 46 | definition = ensure_keys(definition, "Mappings", "LoadBalancerInternalSubnets", region) 47 | definition["Mappings"]["LoadBalancerInternalSubnets"][region]["Subnets"] = subnets 48 | 49 | # Images 50 | for name, image in configuration.get('Images', {}).items(): 51 | for region, ami in image.items(): 52 | definition = ensure_keys(definition, "Mappings", "Images", region, name) 53 | definition["Mappings"]["Images"][region][name] = ami 54 | 55 | return definition 56 | -------------------------------------------------------------------------------- /senza/components/coreos_auto_configuration.py: -------------------------------------------------------------------------------- 1 | import requests 2 | 3 | from senza.components.subnet_auto_configuration import component_subnet_auto_configuration 4 | from senza.utils import ensure_keys 5 | 6 | 7 | def find_coreos_image(release_channel: str, region: str): 8 | '''Find the latest CoreOS AMI''' 9 | 10 | response = requests.get('https://coreos.com/dist/aws/aws-{}.json'.format(release_channel), timeout=5) 11 | response.raise_for_status() 12 | data = response.json() 13 | return data[region]['hvm'] 14 | 15 | 16 | def component_coreos_auto_configuration(definition, configuration, args, info, force, account_info): 17 | ami_id = find_coreos_image(configuration.get('ReleaseChannel') or 'stable', args.region) 18 | configuration = ensure_keys(configuration, "Images", 'LatestCoreOSImage', args.region) 19 | configuration["Images"]['LatestCoreOSImage'][args.region] = ami_id 20 | 21 | component_subnet_auto_configuration(definition, configuration, args, info, force, account_info) 22 | 23 | return definition 24 | -------------------------------------------------------------------------------- /senza/components/elastic_load_balancer_v2.py: -------------------------------------------------------------------------------- 1 | import click 2 | from senza.aws import resolve_security_groups 3 | from senza.components.elastic_load_balancer import (ALLOWED_LOADBALANCER_SCHEMES, 4 | get_load_balancer_name, 5 | get_ssl_cert) 6 | from senza.utils import generate_valid_cloud_name 7 | from senza.definitions import AccountArguments 8 | 9 | from ..cli import TemplateArguments 10 | from ..manaus.route53 import convert_cname_records_to_alias 11 | 12 | SENZA_PROPERTIES = frozenset(['Domains', 'HealthCheckPath', 'HealthCheckPort', 'HealthCheckProtocol', 13 | 'HTTPPort', 'Name', 'SecurityGroups', 'SSLCertificateId', 'Type']) 14 | ALLOWED_HEALTH_CHECK_PROTOCOLS = frozenset(["HTTP", "HTTPS"]) 15 | 16 | 17 | def get_listeners(lb_name, target_group_name, subdomain, main_zone, configuration, 18 | account_info: AccountArguments): 19 | ssl_cert = configuration.get('SSLCertificateId') 20 | if ssl_cert is None: 21 | ssl_certs = [None] 22 | elif isinstance(ssl_cert, list): 23 | ssl_certs = ssl_cert 24 | else: 25 | ssl_certs = ssl_cert.split(',') 26 | 27 | return [{ 28 | 'Type': 'AWS::ElasticLoadBalancingV2::Listener', 29 | 'Properties': { 30 | "Certificates": 31 | [{'CertificateArn': get_ssl_cert(subdomain, main_zone, cert, account_info)} for cert in ssl_certs], 32 | "Protocol": "HTTPS", 33 | "DefaultActions": [{'Type': 'forward', 'TargetGroupArn': {'Ref': target_group_name}}], 34 | 'LoadBalancerArn': {'Ref': lb_name}, 35 | "Port": 443 36 | } 37 | }] 38 | 39 | 40 | def component_elastic_load_balancer_v2(definition, 41 | configuration: dict, 42 | args: TemplateArguments, 43 | info: dict, 44 | force, 45 | account_info: AccountArguments): 46 | lb_name = configuration["Name"] 47 | # domains pointing to the load balancer 48 | subdomain = '' 49 | main_zone = None 50 | for name, domain in configuration.get('Domains', {}).items(): 51 | name = '{}{}'.format(lb_name, name) 52 | 53 | domain_name = "{0}.{1}".format(domain["Subdomain"], domain["Zone"]) 54 | 55 | convert_cname_records_to_alias(domain_name) 56 | 57 | properties = {"Type": "A", 58 | "Name": domain_name, 59 | "HostedZoneName": domain["Zone"], 60 | "AliasTarget": {"HostedZoneId": {"Fn::GetAtt": [lb_name, 61 | "CanonicalHostedZoneID"]}, 62 | "DNSName": {"Fn::GetAtt": [lb_name, "DNSName"]}}} 63 | definition["Resources"][name] = {"Type": "AWS::Route53::RecordSet", 64 | "Properties": properties} 65 | 66 | if domain["Type"] == "weighted": 67 | definition["Resources"][name]["Properties"]['Weight'] = 0 68 | definition["Resources"][name]["Properties"]['SetIdentifier'] = "{0}-{1}".format(info["StackName"], 69 | info["StackVersion"]) 70 | subdomain = domain['Subdomain'] 71 | main_zone = domain['Zone'] # type: str 72 | 73 | target_group_name = lb_name + 'TargetGroup' 74 | listeners = configuration.get('Listeners') or get_listeners( 75 | lb_name, target_group_name, subdomain, main_zone, configuration, account_info) 76 | 77 | health_check_protocol = configuration.get('HealthCheckProtocol') or 'HTTP' 78 | 79 | if health_check_protocol not in ALLOWED_HEALTH_CHECK_PROTOCOLS: 80 | raise click.UsageError('Protocol "{}" is not supported for LoadBalancer'.format(health_check_protocol)) 81 | 82 | health_check_path = configuration.get("HealthCheckPath") or '/health' 83 | health_check_port = configuration.get("HealthCheckPort") or configuration["HTTPPort"] 84 | 85 | if configuration.get('LoadBalancerName'): 86 | loadbalancer_name = generate_valid_cloud_name(configuration["LoadBalancerName"], 32) 87 | elif configuration.get('NameSuffix'): 88 | version = '{}-{}'.format(info["StackVersion"], 89 | configuration['NameSuffix']) 90 | loadbalancer_name = get_load_balancer_name(info["StackName"], version) 91 | del (configuration['NameSuffix']) 92 | else: 93 | loadbalancer_name = get_load_balancer_name(info["StackName"], 94 | info["StackVersion"]) 95 | 96 | loadbalancer_scheme = configuration.get('Scheme') or 'internal' 97 | if loadbalancer_scheme == 'internet-facing': 98 | click.secho('You are deploying an internet-facing ELB that will be ' 99 | 'publicly accessible! You should have OAUTH2 and HTTPS ' 100 | 'in place!', bold=True, err=True) 101 | 102 | if loadbalancer_scheme not in ALLOWED_LOADBALANCER_SCHEMES: 103 | raise click.UsageError('Scheme "{}" is not supported for LoadBalancer'.format(loadbalancer_scheme)) 104 | 105 | if loadbalancer_scheme == "internal": 106 | loadbalancer_subnet_map = "LoadBalancerInternalSubnets" 107 | else: 108 | loadbalancer_subnet_map = "LoadBalancerSubnets" 109 | 110 | vpc_id = configuration.get("VpcId") or account_info.VpcID 111 | 112 | tags = [ 113 | # Tag "Name" 114 | { 115 | "Key": "Name", 116 | "Value": "{0}-{1}".format(info["StackName"], info["StackVersion"]) 117 | }, 118 | # Tag "StackName" 119 | { 120 | "Key": "StackName", 121 | "Value": info["StackName"], 122 | }, 123 | # Tag "StackVersion" 124 | { 125 | "Key": "StackVersion", 126 | "Value": info["StackVersion"] 127 | } 128 | ] 129 | 130 | # load balancer 131 | definition["Resources"][lb_name] = { 132 | "Type": "AWS::ElasticLoadBalancingV2::LoadBalancer", 133 | "Properties": { 134 | 'Name': loadbalancer_name, 135 | 'Scheme': loadbalancer_scheme, 136 | 'SecurityGroups': resolve_security_groups(configuration["SecurityGroups"], args.region), 137 | 'Subnets': {"Fn::FindInMap": [loadbalancer_subnet_map, {"Ref": "AWS::Region"}, "Subnets"]}, 138 | 'LoadBalancerAttributes': [ 139 | { 140 | "Key": "idle_timeout.timeout_seconds", 141 | "Value": "60" 142 | } 143 | ], 144 | "Tags": tags 145 | } 146 | } 147 | definition["Resources"][target_group_name] = { 148 | 'Type': 'AWS::ElasticLoadBalancingV2::TargetGroup', 149 | 'Properties': { 150 | 'Name': loadbalancer_name, 151 | 'HealthCheckIntervalSeconds': '10', 152 | 'HealthCheckPath': health_check_path, 153 | 'HealthCheckPort': health_check_port, 154 | 'HealthCheckProtocol': health_check_protocol, 155 | 'HealthCheckTimeoutSeconds': '5', 156 | 'HealthyThresholdCount': '2', 157 | 'Port': configuration['HTTPPort'], 158 | 'Protocol': 'HTTP', 159 | 'UnhealthyThresholdCount': '2', 160 | 'VpcId': vpc_id, 161 | 'Tags': tags, 162 | 'TargetGroupAttributes': [{'Key': 'deregistration_delay.timeout_seconds', 'Value': '60'}] 163 | } 164 | } 165 | resource_names = set([lb_name, target_group_name]) 166 | for i, listener in enumerate(listeners): 167 | if i == 0: 168 | suffix = '' 169 | else: 170 | suffix = str(i + 1) 171 | resource_name = lb_name + 'Listener' + suffix 172 | definition['Resources'][resource_name] = listener 173 | resource_names.add(resource_name) 174 | for key, val in configuration.items(): 175 | # overwrite any specified properties, but only properties which were defined by us already 176 | for res in resource_names: 177 | if key in definition['Resources'][res]['Properties'] and key not in SENZA_PROPERTIES: 178 | definition['Resources'][res]['Properties'][key] = val 179 | return definition 180 | -------------------------------------------------------------------------------- /senza/components/iam_role.py: -------------------------------------------------------------------------------- 1 | 2 | import boto3 3 | from senza.utils import ensure_keys 4 | 5 | 6 | def get_merged_policies(roles: list): 7 | iam = boto3.resource('iam') 8 | policies = [] 9 | for rolename in roles: 10 | role = iam.Role(rolename) 11 | for policy in role.policies.all(): 12 | policies.append({'PolicyName': policy.policy_name, 13 | 'PolicyDocument': policy.policy_document}) 14 | return policies 15 | 16 | 17 | def component_iam_role(definition, configuration, args, info, force, account_info): 18 | definition = ensure_keys(definition, "Resources") 19 | role_name = configuration['Name'] 20 | definition['Resources'][role_name] = { 21 | 'Type': 'AWS::IAM::Role', 22 | 'Properties': { 23 | "AssumeRolePolicyDocument": configuration.get('AssumeRolePolicyDocument', { 24 | "Version": "2012-10-17", 25 | "Statement": [ 26 | { 27 | "Effect": "Allow", 28 | "Principal": { 29 | "Service": ["ec2.amazonaws.com"] 30 | }, 31 | "Action": ["sts:AssumeRole"] 32 | } 33 | ] 34 | }), 35 | 'Path': configuration.get('Path', '/'), 36 | 'Policies': configuration.get('Policies', []) + get_merged_policies( 37 | configuration.get('MergePoliciesFromIamRoles', [])) 38 | } 39 | } 40 | return definition 41 | -------------------------------------------------------------------------------- /senza/components/redis_cluster.py: -------------------------------------------------------------------------------- 1 | 2 | from senza.aws import resolve_security_groups 3 | from senza.utils import ensure_keys 4 | 5 | 6 | def component_redis_cluster(definition, configuration, args, info, force, account_info): 7 | name = configuration["Name"] 8 | definition = ensure_keys(definition, "Resources") 9 | 10 | number_of_nodes = int(configuration.get('NumberOfNodes', '2')) 11 | 12 | definition["Resources"]["RedisReplicationGroup"] = { 13 | "Type": "AWS::ElastiCache::ReplicationGroup", 14 | "Properties": { 15 | "AutomaticFailoverEnabled": True, 16 | "CacheNodeType": configuration.get('CacheNodeType', 'cache.t2.small'), 17 | "CacheSubnetGroupName": { 18 | "Ref": "RedisSubnetGroup" 19 | }, 20 | "Engine": "redis", 21 | "EngineVersion": configuration.get('EngineVersion', '2.8.19'), 22 | "CacheParameterGroupName": configuration.get('CacheParameterGroupName', 'default.redis2.8'), 23 | "NumCacheClusters": number_of_nodes, 24 | "SecurityGroupIds": resolve_security_groups(configuration["SecurityGroups"], args.region), 25 | "ReplicationGroupDescription": "Redis replicated cache cluster: " + name, 26 | } 27 | } 28 | 29 | definition["Resources"]["RedisSubnetGroup"] = { 30 | "Type": "AWS::ElastiCache::SubnetGroup", 31 | "Properties": { 32 | "Description": "Redis cluster subnet group", 33 | "SubnetIds": {"Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]} 34 | } 35 | } 36 | 37 | return definition 38 | -------------------------------------------------------------------------------- /senza/components/redis_node.py: -------------------------------------------------------------------------------- 1 | 2 | from senza.aws import resolve_security_groups 3 | from senza.utils import ensure_keys 4 | 5 | 6 | def component_redis_node(definition, configuration, args, info, force, account_info): 7 | name = configuration["Name"] 8 | definition = ensure_keys(definition, "Resources") 9 | 10 | definition["Resources"]["RedisCacheCluster"] = { 11 | "Type": "AWS::ElastiCache::CacheCluster", 12 | "Properties": { 13 | "ClusterName": name, 14 | "Engine": "redis", 15 | "EngineVersion": configuration.get('EngineVersion', '2.8.19'), 16 | "CacheParameterGroupName": configuration.get('CacheParameterGroupName', 'default.redis2.8'), 17 | "NumCacheNodes": 1, 18 | "CacheNodeType": configuration.get('CacheNodeType', 'cache.t2.small'), 19 | "CacheSubnetGroupName": { 20 | "Ref": "RedisSubnetGroup" 21 | }, 22 | "VpcSecurityGroupIds": resolve_security_groups(configuration["SecurityGroups"], args.region) 23 | } 24 | } 25 | 26 | definition["Resources"]["RedisSubnetGroup"] = { 27 | "Type": "AWS::ElastiCache::SubnetGroup", 28 | "Properties": { 29 | "Description": "Redis cluster subnet group", 30 | "SubnetIds": {"Fn::FindInMap": ["ServerSubnets", {"Ref": "AWS::Region"}, "Subnets"]} 31 | } 32 | } 33 | 34 | return definition 35 | -------------------------------------------------------------------------------- /senza/components/stups_auto_configuration.py: -------------------------------------------------------------------------------- 1 | import senza.stups.taupage as taupage 2 | 3 | from senza.components.subnet_auto_configuration import component_subnet_auto_configuration 4 | from senza.utils import ensure_keys 5 | 6 | 7 | def component_stups_auto_configuration(definition, configuration, args, info, force, account_info): 8 | for channel in taupage.CHANNELS.values(): 9 | most_recent_image = taupage.find_image(args.region, channel) 10 | if most_recent_image: 11 | configuration = ensure_keys(configuration, "Images", channel.image_mapping, args.region) 12 | configuration["Images"][channel.image_mapping][args.region] = most_recent_image.id 13 | elif channel == taupage.DEFAULT_CHANNEL: 14 | # Require at least one image from the stable channel 15 | raise Exception('No Taupage AMI found') 16 | 17 | component_subnet_auto_configuration(definition, configuration, args, info, force, account_info) 18 | 19 | return definition 20 | -------------------------------------------------------------------------------- /senza/components/subnet_auto_configuration.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | 3 | from senza.components.configuration import component_configuration 4 | from senza.utils import ensure_keys 5 | from senza.aws import get_tag 6 | 7 | 8 | def component_subnet_auto_configuration(definition, configuration, args, info, force, account_info): 9 | ec2 = boto3.resource('ec2', args.region) 10 | 11 | vpc_id = configuration.get('VpcId', account_info.VpcID) 12 | availability_zones = configuration.get('AvailabilityZones') 13 | public_only = configuration.get('PublicOnly') 14 | 15 | server_subnets = [] 16 | lb_subnets = [] 17 | lb_internal_subnets = [] 18 | all_subnets = [] 19 | for subnet in ec2.subnets.filter(Filters=[{'Name': 'vpc-id', 'Values': [vpc_id]}]): 20 | name = get_tag(subnet.tags, 'Name', '') 21 | if availability_zones and subnet.availability_zone not in availability_zones: 22 | # skip subnet as it's not in one of the given AZs 23 | continue 24 | all_subnets.append(subnet.id) 25 | if public_only: 26 | if 'dmz' in name: 27 | lb_subnets.append(subnet.id) 28 | lb_internal_subnets.append(subnet.id) 29 | server_subnets.append(subnet.id) 30 | else: 31 | if 'dmz' in name: 32 | lb_subnets.append(subnet.id) 33 | elif 'internal' in name: 34 | lb_internal_subnets.append(subnet.id) 35 | server_subnets.append(subnet.id) 36 | elif 'nat' in name: 37 | # ignore creating listeners in NAT gateway subnets 38 | pass 39 | else: 40 | server_subnets.append(subnet.id) 41 | 42 | if not lb_subnets: 43 | if public_only: 44 | # assume default AWS VPC setup with all subnets being public 45 | lb_subnets = all_subnets 46 | lb_internal_subnets = all_subnets 47 | server_subnets = all_subnets 48 | else: 49 | # no DMZ subnets were found, just use the same set for both LB and instances 50 | lb_subnets = server_subnets 51 | 52 | configuration = ensure_keys(configuration, "ServerSubnets", args.region) 53 | configuration["ServerSubnets"][args.region] = server_subnets 54 | 55 | configuration = ensure_keys(configuration, "LoadBalancerSubnets", args.region) 56 | configuration["LoadBalancerSubnets"][args.region] = lb_subnets 57 | 58 | configuration = ensure_keys(configuration, "LoadBalancerInternalSubnets", args.region) 59 | configuration["LoadBalancerInternalSubnets"][args.region] = lb_internal_subnets 60 | 61 | component_configuration(definition, configuration, args, info, force, account_info) 62 | 63 | return definition 64 | -------------------------------------------------------------------------------- /senza/components/taupage_auto_scaling_group.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | import json 3 | import re 4 | import sys 5 | import textwrap 6 | 7 | import click 8 | import pierone.api 9 | import pierone.cli 10 | import stups_cli.config 11 | import os 12 | import yaml 13 | import zign.api 14 | from senza.aws import resolve_referenced_resource 15 | from senza.components.auto_scaling_group import component_auto_scaling_group 16 | from senza.docker import docker_image_exists 17 | from senza.utils import ensure_keys 18 | from typing import Optional 19 | 20 | _AWS_FN_RE = re.compile(r"('[{]{2} (.*?) [}]{2}')", re.DOTALL) 21 | 22 | # from kio OpenAPI yaml 23 | APPLICATION_ID_RE = re.compile(r"^[a-z][a-z0-9-]*[a-z0-9]$") 24 | APPLICATION_VERSION_RE = re.compile(r"^[A-Za-z0-9](?:[A-Za-z0-9._-]*[A-Za-z0-9])?$") 25 | 26 | 27 | def check_application_id(app_id: str): 28 | if not APPLICATION_ID_RE.match(app_id): 29 | raise click.UsageError('Application id must satisfy regular ' 30 | 'expression pattern "{}"'.format(APPLICATION_ID_RE.pattern)) 31 | 32 | 33 | def check_application_version(version: str): 34 | if not APPLICATION_VERSION_RE.match(version): 35 | raise click.UsageError('Application version must satisfy regular ' 36 | 'expression pattern "{}"'.format(APPLICATION_VERSION_RE.pattern)) 37 | 38 | 39 | def get_token(docker_image: pierone.api.DockerImage) -> Optional[dict]: 40 | """ 41 | Attempt to get existing token. 42 | If that fails, try to login to pierone and then get the token. 43 | 44 | :return: {'access_token': ..., 45 | 'creation_time': ..., 46 | 'expires_in': ..., 47 | 'refresh_token': ..., 48 | 'scope': ..., 49 | 'token_type': ...} 50 | """ 51 | token = zign.api.get_existing_token('pierone') 52 | if token: 53 | return token 54 | 55 | config = stups_cli.config.load_config('pierone') 56 | url = docker_image.registry 57 | url = pierone.cli.set_pierone_url(config, url) 58 | user = zign.api.get_config().get('user') or os.getenv('USER') 59 | pierone.api.docker_login( 60 | url=url, realm=None, name='pierone', 61 | user=user, password=None, prompt=True 62 | ) 63 | return zign.api.get_existing_token('pierone') 64 | 65 | 66 | def check_docker_image_exists(docker_image: pierone.api.DockerImage): 67 | token = None 68 | if 'pierone' in docker_image.registry: 69 | token = get_token(docker_image) 70 | if not token: 71 | msg = textwrap.dedent(''' 72 | Unauthorized: Cannot check whether Docker image "{}" exists in Pier One Docker registry. 73 | Please generate a "pierone" OAuth access token using "pierone login". 74 | Alternatively you can skip this check using the "--force" option. 75 | '''.format(docker_image)).strip() 76 | raise click.UsageError(msg) 77 | else: 78 | token = token['access_token'] 79 | exists = pierone.api.image_exists(docker_image, token) 80 | else: 81 | exists = docker_image_exists(str(docker_image)) 82 | 83 | if not exists: 84 | raise click.UsageError('Docker image "{}" does not exist'.format(docker_image)) 85 | 86 | return True 87 | 88 | 89 | def component_taupage_auto_scaling_group(definition, configuration, args, info, force, account_info): 90 | # inherit from the normal auto scaling group but discourage user info and replace with a Taupage config 91 | if 'Image' not in configuration: 92 | configuration['Image'] = 'LatestTaupageImage' 93 | definition = component_auto_scaling_group(definition, configuration, args, info, force, account_info) 94 | 95 | taupage_config = configuration['TaupageConfig'] 96 | 97 | if 'notify_cfn' not in taupage_config: 98 | taupage_config['notify_cfn'] = {'stack': '{}-{}'.format(info["StackName"], info["StackVersion"]), 99 | 'resource': configuration['Name']} 100 | 101 | if 'application_id' not in taupage_config: 102 | taupage_config['application_id'] = info['StackName'] 103 | 104 | if 'application_version' not in taupage_config: 105 | taupage_config['application_version'] = info['StackVersion'] 106 | 107 | check_application_id(taupage_config['application_id']) 108 | check_application_version(taupage_config['application_version']) 109 | 110 | runtime = taupage_config.get('runtime') 111 | if runtime != 'Docker': 112 | raise click.UsageError('Taupage only supports the "Docker" runtime currently') 113 | 114 | source = taupage_config.get('source') 115 | if not source: 116 | raise click.UsageError('The "source" property of TaupageConfig must be specified') 117 | 118 | docker_image = pierone.api.DockerImage.parse(source) 119 | 120 | if not force and docker_image.registry: 121 | check_docker_image_exists(docker_image) 122 | 123 | config_name = configuration["Name"] + "Config" 124 | ensure_keys(definition, "Resources", config_name, "Properties") 125 | properties = definition["Resources"][config_name]["Properties"] 126 | 127 | mappings = definition.get('Mappings', {}) 128 | server_subnets = set(mappings.get('ServerSubnets', {}).get(args.region, {}).get('Subnets', [])) 129 | 130 | # in dmz or public subnet but without public ip 131 | if server_subnets and not properties.get('AssociatePublicIpAddress') and server_subnets ==\ 132 | set(mappings.get('LoadBalancerInternalSubnets', {}).get(args.region, {}).get('Subnets', [])): 133 | # we need to extend taupage_config with the mapping subnet-id => net ip 134 | nat_gateways = {} 135 | ec2 = boto3.client('ec2', args.region) 136 | for nat_gateway in ec2.describe_nat_gateways()['NatGateways']: 137 | if nat_gateway['SubnetId'] in server_subnets: 138 | for address in nat_gateway['NatGatewayAddresses']: 139 | nat_gateways[nat_gateway['SubnetId']] = address['PrivateIp'] 140 | break 141 | if nat_gateways: 142 | taupage_config['nat_gateways'] = nat_gateways 143 | 144 | properties["UserData"] = {"Fn::Base64": generate_user_data(taupage_config, args.region)} 145 | 146 | return definition 147 | 148 | 149 | def generate_user_data(taupage_config, region): 150 | """ 151 | Generates the CloudFormation "UserData" field. 152 | It looks for AWS functions such as Fn:: and Ref and generates the appropriate UserData json field, 153 | It leaves nodes representing AWS functions or refs unmodified and converts into text everything else. 154 | Example:: 155 | environment: 156 | S3_BUCKET: {"Ref": "ExhibitorBucket"} 157 | S3_PREFIX: exhibitor 158 | 159 | transforms into:: 160 | {"Fn::Join": ["", "environment:\n S3_BUCKET: ", {"Ref": "ExhibitorBucket"}, "\n S3_PREFIX: exhibitor"]} 161 | 162 | :param taupage_config: 163 | :return: 164 | """ 165 | 166 | def is_aws_fn(name): 167 | try: 168 | return name == "Ref" or (isinstance(name, str) and name.startswith("Fn::")) 169 | except Exception: 170 | return False 171 | 172 | def transform(node): 173 | """Transform AWS functions and refs into an string representation for later split and substitution""" 174 | 175 | if isinstance(node, dict): 176 | num_keys = len(node) 177 | if 'Stack' in node and 'Output' in node: 178 | return resolve_referenced_resource(node, region) 179 | if num_keys > 0: 180 | key = next(iter(node.keys())) 181 | if num_keys == 1 and is_aws_fn(key): 182 | return "".join(["{{ ", json.dumps(node), " }}"]) 183 | else: 184 | return {key: transform(value) for key, value in node.items()} 185 | else: 186 | return node 187 | elif isinstance(node, list): 188 | return [transform(subnode) for subnode in node] 189 | else: 190 | return node 191 | 192 | def split(text): 193 | """Splits yaml text into text and AWS functions/refs""" 194 | 195 | parts = [] 196 | last_pos = 0 197 | for m in _AWS_FN_RE.finditer(text): 198 | parts += [text[last_pos:m.start(1)], json.loads(m.group(2))] 199 | last_pos = m.end(1) 200 | parts += [text[last_pos:]] 201 | return parts 202 | 203 | yaml_text = yaml.dump(transform(taupage_config), width=sys.maxsize, default_flow_style=False) 204 | 205 | parts = split("#taupage-ami-config\n" + yaml_text) 206 | 207 | if len(parts) == 1: 208 | return parts[0] 209 | else: 210 | return {"Fn::Join": ["", parts]} 211 | -------------------------------------------------------------------------------- /senza/components/weighted_dns_elastic_load_balancer.py: -------------------------------------------------------------------------------- 1 | from typing import Dict 2 | 3 | from senza.definitions import AccountArguments 4 | from senza.components.elastic_load_balancer import component_elastic_load_balancer 5 | from senza.manaus.route53 import Route53 6 | 7 | 8 | def component_weighted_dns_elastic_load_balancer(definition, 9 | configuration: Dict, 10 | args, 11 | info, 12 | force, 13 | account_info: AccountArguments, 14 | lb_component=component_elastic_load_balancer): 15 | if 'Domains' not in configuration: 16 | if 'MainDomain' in configuration: 17 | main_domain = configuration['MainDomain'] 18 | main_subdomain, fall_back_hz = account_info.split_domain(main_domain) 19 | try: 20 | hosted_zone = next(Route53.get_hosted_zones(domain_name=main_domain)) 21 | main_zone = hosted_zone.name 22 | except StopIteration: 23 | main_zone = fall_back_hz 24 | del configuration['MainDomain'] 25 | else: 26 | main_zone = account_info.Domain 27 | main_subdomain = info['StackName'] 28 | 29 | if 'VersionDomain' in configuration: 30 | version_domain = configuration['VersionDomain'] 31 | version_subdomain, fall_back_hz = account_info.split_domain(version_domain) 32 | try: 33 | hosted_zone = next(Route53.get_hosted_zones(domain_name=version_domain)) 34 | version_zone = hosted_zone.name 35 | except StopIteration: 36 | version_zone = fall_back_hz 37 | del configuration['VersionDomain'] 38 | else: 39 | version_zone = account_info.Domain 40 | version_subdomain = '{}-{}'.format(info['StackName'], info['StackVersion']) 41 | 42 | configuration['Domains'] = {'MainDomain': {'Type': 'weighted', 43 | 'Zone': '{}.'.format(main_zone.rstrip('.')), 44 | 'Subdomain': main_subdomain}, 45 | 'VersionDomain': {'Type': 'standalone', 46 | 'Zone': '{}.'.format(version_zone.rstrip('.')), 47 | 'Subdomain': version_subdomain}} 48 | return lb_component(definition, configuration, args, info, force, 49 | account_info) 50 | -------------------------------------------------------------------------------- /senza/components/weighted_dns_elastic_load_balancer_v2.py: -------------------------------------------------------------------------------- 1 | 2 | from senza.components.weighted_dns_elastic_load_balancer import component_weighted_dns_elastic_load_balancer 3 | from senza.components.elastic_load_balancer_v2 import component_elastic_load_balancer_v2 4 | 5 | 6 | def component_weighted_dns_elastic_load_balancer_v2(definition, configuration, args, info, force, account_info): 7 | return component_weighted_dns_elastic_load_balancer(definition, configuration, args, info, force, account_info, 8 | lb_component=component_elastic_load_balancer_v2) 9 | -------------------------------------------------------------------------------- /senza/configuration.py: -------------------------------------------------------------------------------- 1 | """ 2 | Class and instance to read and write senza configuration. 3 | 4 | Senza configuration consists of an hierarchical yaml file with 5 | sections > keys > values, which are represented in the form SECTION.KEY 6 | """ 7 | 8 | from collections.abc import MutableMapping 9 | from pathlib import Path 10 | from typing import Dict, Tuple 11 | 12 | import yaml 13 | from click import get_app_dir 14 | 15 | from .exceptions import InvalidConfigKey 16 | 17 | CONFIGURATION_PATH = Path(get_app_dir('senza')) / "config.yaml" 18 | 19 | 20 | class Configuration(MutableMapping): 21 | 22 | """ 23 | Class to read and write senza configuration as map. Keys take the form of 24 | SECTION.KEY 25 | """ 26 | 27 | def __init__(self, path: Path): 28 | self.config_path = path 29 | 30 | def __iter__(self): 31 | yield from self.raw_dict 32 | 33 | def __len__(self): 34 | return len(self.raw_dict) 35 | 36 | def __getitem__(self, key: str) -> str: 37 | section, sub_key = self.__split_key(key) 38 | return self.raw_dict[section][sub_key] 39 | 40 | def __setitem__(self, key: str, value): 41 | section, sub_key = self.__split_key(key) 42 | cfg = self.raw_dict 43 | 44 | if section not in self.raw_dict: 45 | cfg[section] = {} 46 | cfg[section][sub_key] = str(value) 47 | self.__save(cfg) 48 | 49 | def __delitem__(self, key): 50 | section, sub_key = self.__split_key(key) 51 | cfg = self.raw_dict 52 | del cfg[section][sub_key] 53 | self.__save(cfg) 54 | 55 | @staticmethod 56 | def __split_key(key: str) -> Tuple[str, str]: 57 | """ 58 | Splits the full key in section and subkey 59 | """ 60 | try: 61 | section, sub_key = key.split('.', 1) 62 | except ValueError: 63 | # error message inspired by git config 64 | raise InvalidConfigKey('key does not contain ' 65 | 'a section: {}'.format(key)) 66 | return section, sub_key 67 | 68 | def __save(self, cfg): 69 | """ 70 | Saves the configuration in the configuration path, creating the 71 | directory if necessary. 72 | """ 73 | try: 74 | self.config_path.parent.mkdir(parents=True) 75 | except FileExistsError: 76 | # this try...except can be replaced with exist_ok=True when 77 | # we drop python3.4 support 78 | pass 79 | with self.config_path.open('w+') as config_file: 80 | yaml.safe_dump(cfg, config_file, 81 | default_flow_style=False) 82 | 83 | @property 84 | def raw_dict(self) -> Dict[str, Dict[str, str]]: 85 | """ 86 | Returns a dict with the configuration data as stored in config.yaml 87 | """ 88 | try: 89 | with self.config_path.open() as config_file: 90 | cfg = yaml.safe_load(config_file) 91 | except FileNotFoundError: 92 | cfg = {} 93 | return cfg 94 | 95 | 96 | configuration = Configuration(CONFIGURATION_PATH) # pylint: disable=locally-disabled, invalid-name 97 | -------------------------------------------------------------------------------- /senza/definitions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions and classes related to Senza Definition files 3 | """ 4 | 5 | # pylint: disable=locally-disabled, invalid-name 6 | 7 | from typing import Tuple 8 | import sys 9 | 10 | from clickclick import choice 11 | from senza.aws import get_account_alias, get_account_id 12 | from senza.manaus.ec2 import EC2 13 | from senza.manaus.exceptions import VPCError 14 | from senza.manaus.route53 import Route53 15 | from senza.templates._helper import get_mint_bucket_name 16 | 17 | 18 | class AccountArguments: 19 | """ 20 | Account arguments to use in the definitions 21 | """ 22 | def __init__(self, region): 23 | self.Region = region 24 | self.__AccountAlias = None 25 | self.__AccountID = None 26 | self.__Domain = None 27 | self.__MintBucket = None 28 | self.__TeamID = None 29 | self.__VpcID = None 30 | 31 | @property 32 | def AccountID(self) -> str: 33 | """ 34 | Returns the (non-human friendly) account id 35 | """ 36 | if self.__AccountID is None: 37 | self.__AccountID = get_account_id() 38 | return self.__AccountID 39 | 40 | @property 41 | def AccountAlias(self) -> str: 42 | """ 43 | Returns the human readable account alias 44 | """ 45 | if self.__AccountAlias is None: 46 | self.__AccountAlias = get_account_alias() 47 | return self.__AccountAlias 48 | 49 | @property 50 | def Domain(self) -> str: 51 | """ 52 | Return the domain name for the account. 53 | """ 54 | if self.__Domain is None: 55 | self.__setDomain() 56 | return self.__Domain.rstrip('.') 57 | 58 | def __setDomain(self, domain_name=None) -> str: 59 | """ 60 | Sets domain for account. If there's only one hosted zone matching the 61 | domain_name it will be used otherwise the user will be presented with 62 | a choice. 63 | """ 64 | domain_list = list(Route53.get_hosted_zones(domain_name)) 65 | if len(domain_list) == 0: 66 | raise AttributeError('No Domain configured') 67 | elif len(domain_list) > 1: 68 | domain = choice('Please select the domain', 69 | sorted(domain.domain_name 70 | for domain in domain_list)) 71 | else: 72 | domain = domain_list[0].domain_name 73 | self.__Domain = domain 74 | return domain 75 | 76 | def split_domain(self, domain_name) -> Tuple[str, str]: 77 | """ 78 | Splits domain_name in sub_domain and main_domain based on the account 79 | domain. 80 | """ 81 | self.__setDomain(domain_name) 82 | if domain_name.endswith('.{}'.format(self.Domain)): 83 | return domain_name[:-len('.{}'.format(self.Domain))], self.Domain 84 | else: 85 | # default behaviour for unknown domains 86 | return domain_name.split('.', 1) 87 | 88 | @property 89 | def TeamID(self) -> str: 90 | """ 91 | Returns the team id based on the account name 92 | """ 93 | if self.__TeamID is None: 94 | self.__TeamID = self.AccountAlias.split('-', maxsplit=1)[-1] 95 | return self.__TeamID 96 | 97 | @property 98 | def VpcID(self) -> str: 99 | """ 100 | Returns the VPC ID to use. If a there's a default VPC it returns that 101 | one, otherwise it will provide the user a choice if running in an 102 | interactive terminal or raise an exception otherwise. 103 | """ 104 | if self.__VpcID is None: 105 | ec2 = EC2(self.Region) 106 | try: 107 | vpc = ec2.get_default_vpc() 108 | except VPCError as error: 109 | if sys.stdin.isatty() and error.number_of_vpcs: 110 | # if running in interactive terminal and there are VPCs 111 | # to choose from 112 | vpcs = ec2.get_all_vpcs() 113 | options = [(vpc.vpc_id, str(vpc)) for vpc in vpcs] 114 | print("Can't find a default VPC") 115 | vpc = choice("Select VPC to use", 116 | options=options) 117 | else: # if not running in interactive terminal (e.g Jenkins) 118 | raise 119 | self.__VpcID = vpc.vpc_id 120 | return self.__VpcID 121 | 122 | @property 123 | def MintBucket(self) -> str: 124 | """ 125 | Returns the mintbucket for the current account 126 | """ 127 | if self.__MintBucket is None: 128 | self.__MintBucket = get_mint_bucket_name(self.Region) 129 | return self.__MintBucket 130 | -------------------------------------------------------------------------------- /senza/docker.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions to interact with docker 3 | """ 4 | 5 | import requests 6 | 7 | 8 | def docker_image_exists(docker_image: str) -> bool: 9 | """ 10 | Check whether the docker image exists by calling the Docker registry REST API 11 | """ 12 | 13 | parts = docker_image.split('/') 14 | registry = parts[0] 15 | repo = '/'.join(parts[1:]) 16 | repo, tag = repo.split(':') 17 | 18 | for scheme in 'https', 'http': 19 | try: 20 | url = '{scheme}://{registry}/v2/{repo}/tags/list'.format(scheme=scheme, 21 | registry=registry, 22 | repo=repo) 23 | response = requests.get(url, timeout=5) 24 | result = response.json() 25 | return tag in result.get('tags', []) 26 | except requests.RequestException: 27 | pass 28 | return False 29 | -------------------------------------------------------------------------------- /senza/error_handling.py: -------------------------------------------------------------------------------- 1 | """ 2 | Functions to handle exceptions that bubble to the top, including Sentry 3 | integration 4 | """ 5 | 6 | import sys 7 | from tempfile import NamedTemporaryFile 8 | from traceback import format_exception 9 | from typing import Optional # noqa: F401 10 | 11 | import yaml.constructor 12 | from botocore.exceptions import ClientError, NoCredentialsError 13 | from clickclick import fatal_error 14 | from raven import Client 15 | 16 | import senza 17 | from .configuration import configuration 18 | from .exceptions import (InvalidDefinition, InvalidUserDataType, 19 | PiuNotFound, SecurityGroupNotFound, 20 | InvalidParameterFile) 21 | from .manaus.exceptions import (ELBNotFound, HostedZoneNotFound, InvalidState, 22 | RecordNotFound) 23 | from .manaus.utils import extract_client_error_code 24 | 25 | 26 | def store_exception(exception: Exception) -> str: 27 | """ 28 | Stores the exception in a temporary file and returns its filename 29 | """ 30 | 31 | tracebacks = format_exception(etype=type(exception), 32 | value=exception, 33 | tb=exception.__traceback__) # type: [str] 34 | 35 | content = ''.join(tracebacks) 36 | 37 | with NamedTemporaryFile(prefix="senza-traceback-", 38 | delete=False) as error_file: 39 | file_name = error_file.name 40 | error_file.write(content.encode()) 41 | 42 | return file_name 43 | 44 | 45 | def is_credentials_expired_error(client_error: ClientError) -> bool: 46 | """Return true if the exception's error code is ExpiredToken or RequestExpired""" 47 | return extract_client_error_code(client_error) in ['ExpiredToken', 48 | 'RequestExpired'] 49 | 50 | 51 | def is_access_denied_error(client_error: ClientError) -> bool: 52 | """ 53 | Checks the ``ClientError`` details to find out if it is an 54 | Access Denied Error 55 | """ 56 | return extract_client_error_code(client_error) in ['AccessDenied'] 57 | 58 | 59 | def is_validation_error(client_error: ClientError) -> bool: 60 | """ 61 | Checks the ``ClientError`` details to find out if it is an 62 | Validation Error 63 | """ 64 | return extract_client_error_code(client_error) == 'ValidationError' 65 | 66 | 67 | def die_fatal_error(message): 68 | """Sent error message to stderr, in red, and exit""" 69 | fatal_error(message, err=True) 70 | 71 | 72 | class HandleExceptions: 73 | """Class HandleExceptions will display various error messages 74 | depending on the type of the exception and show the stacktrace for general exceptions 75 | depending on the value of stacktrace_visible""" 76 | 77 | stacktrace_visible = False 78 | 79 | def __init__(self, function): 80 | self.function = function 81 | 82 | def die_unknown_error(self, unknown_exception: Exception): 83 | """ 84 | Handles unknown exceptions, shipping them to sentry if it's configured. 85 | 86 | If stacktrace_visible the stacktrace will be printed otherwise the 87 | stacktrace will be stored in a temporary file or sent to sentry. 88 | """ 89 | if sentry: 90 | # The exception should always be sent to sentry if sentry is 91 | # configured 92 | sentry.captureException() 93 | if self.stacktrace_visible: 94 | raise unknown_exception 95 | elif sentry: 96 | die_fatal_error("Unknown Error: {e}.\n" 97 | "This error will be pushed to sentry ".format( 98 | e=unknown_exception)) 99 | elif not sentry: 100 | file_name = store_exception(unknown_exception) 101 | die_fatal_error('Unknown Error: {e}.\n' 102 | 'Please create an issue with the ' 103 | 'content of {fn}'.format(e=unknown_exception, 104 | fn=file_name)) 105 | 106 | def __call__(self, *args, **kwargs): 107 | try: 108 | self.function(*args, **kwargs) 109 | except NoCredentialsError: 110 | die_fatal_error( 111 | 'No AWS credentials found. Use the "zaws" command-line tool ' 112 | 'to get a temporary access key\n' 113 | 'or manually configure either ~/.aws/credentials ' 114 | 'or AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY.') 115 | except ClientError as client_error: 116 | sys.stdout.flush() 117 | if is_credentials_expired_error(client_error): 118 | die_fatal_error('AWS credentials have expired.\n' 119 | 'Use the "zaws" command line tool to get a new ' 120 | 'temporary access key.') 121 | elif is_access_denied_error(client_error): 122 | die_fatal_error( 123 | "AWS missing access rights.\n{}".format( 124 | client_error.response['Error']['Message'])) 125 | elif is_validation_error(client_error): 126 | die_fatal_error( 127 | "Validation Error: {}".format( 128 | client_error.response['Error']['Message'])) 129 | else: 130 | self.die_unknown_error(client_error) 131 | except yaml.constructor.ConstructorError as yaml_error: 132 | err_mesg = "Error parsing definition file:\n{}".format(yaml_error) 133 | if yaml_error.problem == "found unhashable key": 134 | err_mesg += "Please quote all variable values" 135 | die_fatal_error(err_mesg) 136 | except PiuNotFound as error: 137 | die_fatal_error( 138 | "{}\nYou can install piu with the following command:" 139 | "\nsudo pip3 install --upgrade stups-piu".format(error)) 140 | except (ELBNotFound, HostedZoneNotFound, RecordNotFound, 141 | InvalidDefinition, InvalidState, InvalidUserDataType, 142 | InvalidParameterFile) as error: 143 | die_fatal_error(error) 144 | except SecurityGroupNotFound as error: 145 | message = ("{}\nRun `senza init` to (re-)create " 146 | "the security group.").format(error) 147 | die_fatal_error(message) 148 | except Exception as unknown_exception: # pylint: disable=locally-disabled, broad-except 149 | # Catch All 150 | self.die_unknown_error(unknown_exception) 151 | 152 | 153 | def setup_sentry(sentry_endpoint: Optional[str]): 154 | """ 155 | This function setups sentry, this exists mostly to make sentry integration 156 | easier to test 157 | """ 158 | if sentry_endpoint is not None: 159 | sentry_client = Client(sentry_endpoint, 160 | release=senza.__version__) 161 | else: 162 | sentry_client = None 163 | 164 | return sentry_client 165 | 166 | 167 | sentry = setup_sentry(configuration.get( 168 | 'sentry.endpoint')) # pylint: disable=locally-disabled, invalid-name 169 | -------------------------------------------------------------------------------- /senza/exceptions.py: -------------------------------------------------------------------------------- 1 | class SenzaException(Exception): 2 | """ 3 | Base class for Senza exceptions 4 | """ 5 | 6 | 7 | class InvalidState(SenzaException): 8 | """ 9 | Exception raised when executing an action would try to change a stack 10 | to an invalid state 11 | """ 12 | 13 | 14 | class PiuNotFound(SenzaException, FileNotFoundError): 15 | """ 16 | Error raised when piu executable is not found 17 | """ 18 | 19 | def __init__(self): 20 | super().__init__('Command not found: piu') 21 | 22 | 23 | class InvalidConfigKey(SenzaException, ValueError): 24 | """ 25 | Error raised when trying to use an Invalid Config Key 26 | """ 27 | 28 | def __init__(self, message: str): 29 | super().__init__(message) 30 | 31 | 32 | class InvalidDefinition(SenzaException): 33 | """ 34 | Exception raised when trying to parse an invalid senza definition 35 | """ 36 | 37 | def __init__(self, path: str, reason: str): 38 | self.path = path 39 | self.reason = reason 40 | 41 | def __str__(self): 42 | return ("{path} is not a valid senza definition: " 43 | "{reason}".format_map(vars(self))) 44 | 45 | 46 | class InvalidParameterFile(SenzaException): 47 | """ 48 | Exception raised when trying to parse an invalid parameter 49 | """ 50 | 51 | def __init__(self, path: str, reason: str): 52 | self.path = path 53 | self.reason = reason 54 | 55 | def __str__(self): 56 | return ("{path} is not a valid parameter: " 57 | "{reason}".format_map(vars(self))) 58 | 59 | 60 | class SecurityGroupNotFound(SenzaException): 61 | """ 62 | Exception raised when a Security Group is not found 63 | """ 64 | 65 | def __init__(self, security_group: str): 66 | self.security_group = security_group 67 | 68 | def __str__(self): 69 | return 'Security Group "{}" does not exist.'.format(self.security_group) 70 | 71 | 72 | class InvalidUserDataType(SenzaException): 73 | """ 74 | Exception raised when the type of the new user data is different from the 75 | old user data 76 | """ 77 | 78 | def __init__(self, old_type: type, new_type: type): 79 | self.old_type = old_type 80 | self.new_type = new_type 81 | 82 | def __str__(self): 83 | return ('Current user data is a {} but provided user data ' 84 | 'is a {}.').format(self.__human_readable_type(self.old_type), 85 | self.__human_readable_type(self.new_type)) 86 | 87 | def __human_readable_type(self, t) -> str: 88 | if t is str: 89 | return "string" 90 | elif t is dict: 91 | return "map" 92 | elif t is int: 93 | return 'integer' 94 | else: 95 | return str(t) 96 | -------------------------------------------------------------------------------- /senza/manaus/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Package with high level classes to access aws components 3 | """ 4 | 5 | # import relevant exceptions 6 | from botocore.exceptions import ClientError # NOQA 7 | -------------------------------------------------------------------------------- /senza/manaus/acm.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timezone 2 | from enum import Enum 3 | from functools import total_ordering 4 | from ssl import CertificateError, match_hostname 5 | from typing import Any, Dict, Iterator, List, Optional 6 | 7 | from .boto_proxy import BotoClientProxy 8 | 9 | 10 | class ACMCertificateStatus(str, Enum): 11 | expired = "EXPIRED" 12 | failed = "FAILED" 13 | inactive = "INACTIVE" 14 | issued = "ISSUED" 15 | pending_validation = "PENDING_VALIDATION" 16 | revoked = "REVOKED" 17 | validation_timed_out = "VALIDATION_TIMED_OUT" 18 | 19 | 20 | @total_ordering 21 | class ACMCertificate: 22 | """ 23 | See: 24 | http://boto3.readthedocs.io/en/latest/reference/services/acm.html#ACM.Client.list_certificates 25 | http://boto3.readthedocs.io/en/latest/reference/services/acm.html#ACM.Client.describe_certificate 26 | """ 27 | 28 | def __init__( 29 | self, 30 | domain_name: str, 31 | arn: str, 32 | subject_alternative_name: List[str], 33 | domain_validation_options: List[Dict], 34 | serial: str, 35 | subject: str, 36 | issuer: str, 37 | created_at: datetime, 38 | imported_at: datetime, 39 | issued_at: datetime, 40 | status: str, 41 | not_before: datetime, 42 | not_after: datetime, 43 | signature_algorithm: str, 44 | in_use_by: List[str], 45 | revoked_at: Optional[datetime], 46 | revocation_reason: Optional[str], 47 | ): 48 | self.domain_name = domain_name 49 | self.arn = arn 50 | self.subject_alternative_name = subject_alternative_name 51 | self.domain_validation_options = domain_validation_options 52 | self.serial = serial 53 | self.subject = subject 54 | self.issuer = issuer 55 | self.created_at = created_at 56 | self.imported_at = imported_at 57 | self.issued_at = issued_at 58 | self.status = ACMCertificateStatus(status) 59 | self.not_before = not_before 60 | self.not_after = not_after 61 | self.signature_algorithm = signature_algorithm 62 | self.in_use_by = in_use_by 63 | 64 | self.revoked_at = revoked_at 65 | self.revocation_reason = revocation_reason 66 | 67 | def __lt__(self, other: "ACMCertificate"): 68 | self_date = self.created_at or self.imported_at 69 | other_date = other.created_at or other.imported_at 70 | return self_date < other_date 71 | 72 | def __eq__(self, other: "ACMCertificate"): 73 | return self.arn == other.arn 74 | 75 | def __repr__(self): 76 | return "".format_map(vars(self)) 77 | 78 | @classmethod 79 | def from_boto_dict(cls, certificate: Dict[str, Any]) -> "ACMCertificate": 80 | """ 81 | Creates an ACMCertificate based on the dictionary returned by 82 | describe_certificate 83 | """ 84 | 85 | domain_name = certificate["DomainName"] 86 | arn = certificate["CertificateArn"] 87 | subject_alternative_name = certificate["SubjectAlternativeNames"] 88 | domain_validation_options = certificate["DomainValidationOptions"] 89 | subject = certificate["Subject"] 90 | created_at = certificate.get("CreatedAt") 91 | imported_at = certificate.get("ImportedAt") 92 | status = certificate["Status"] 93 | signature_algorithm = certificate["SignatureAlgorithm"] 94 | in_use_by = certificate["InUseBy"] 95 | serial = certificate.get("Serial") 96 | issuer = certificate.get("Issuer") 97 | issued_at = certificate.get("IssuedAt") 98 | not_before = certificate.get("NotBefore") 99 | not_after = certificate.get("NotAfter") 100 | 101 | revoked_at = certificate.get("RevokedAt") 102 | revocation_reason = certificate.get("RevocationReason") 103 | 104 | return cls( 105 | domain_name, 106 | arn, 107 | subject_alternative_name, 108 | domain_validation_options, 109 | serial, 110 | subject, 111 | issuer, 112 | created_at, 113 | imported_at, 114 | issued_at, 115 | status, 116 | not_before, 117 | not_after, 118 | signature_algorithm, 119 | in_use_by, 120 | revoked_at, 121 | revocation_reason, 122 | ) 123 | 124 | @classmethod 125 | def get_by_arn(cls, region: str, arn: str) -> "ACMCertificate": 126 | """ 127 | Gets a ACMCertificate based on ARN alone 128 | """ 129 | client = BotoClientProxy("acm", region) 130 | certificate = client.describe_certificate(CertificateArn=arn)["Certificate"] 131 | return cls.from_boto_dict(certificate) 132 | 133 | @staticmethod 134 | def arn_is_acm_certificate(arn: Optional[str] = None) -> bool: 135 | if arn is None: 136 | return False 137 | else: 138 | return arn.startswith("arn:aws:acm:") 139 | 140 | def is_valid(self, when: Optional[datetime] = None) -> bool: 141 | """ 142 | Checks if the certificate is still valid 143 | """ 144 | when = when if when is not None else datetime.now(timezone.utc) 145 | 146 | if self.status != ACMCertificateStatus.issued: 147 | return False 148 | 149 | return self.not_before < when < self.not_after 150 | 151 | def matches(self, domain_name: str) -> bool: 152 | """ 153 | Checks if certificate subject or alt names match the domain name. 154 | """ 155 | # python ssl friendly certificate: 156 | subject = ((("commonName", self.domain_name),),) 157 | alt_name = [("DNS", name) for name in self.subject_alternative_name] 158 | certificate = {"subject": subject, "subjectAltName": alt_name} 159 | 160 | try: 161 | match_hostname(certificate, domain_name) 162 | except CertificateError: 163 | return False 164 | else: 165 | return True 166 | 167 | 168 | class ACM: 169 | """ 170 | From https://aws.amazon.com/certificate-manager/ 171 | 172 | AWS Certificate Manager is a service that lets you easily provision, 173 | manage, and deploy Secure Sockets Layer/Transport Layer Security (SSL/TLS) 174 | certificates for use with AWS services. 175 | 176 | See http://boto3.readthedocs.io/en/latest/reference/services/acm.html 177 | """ 178 | 179 | def __init__(self, region=str): 180 | self.region = region 181 | 182 | def get_certificates( 183 | self, *, valid_only: bool = True, domain_name: Optional[str] = None 184 | ) -> Iterator[ACMCertificate]: 185 | """ 186 | Gets certificates from ACM. By default it returns all valid certificates 187 | 188 | :param region: AWS region 189 | :param valid_only: Return only valid certificates 190 | :param domain_name: Return only certificates that match the domain 191 | """ 192 | # TODO implement pagination 193 | client = BotoClientProxy("acm", self.region) 194 | certificates = client.list_certificates()["CertificateSummaryList"] 195 | for summary in certificates: 196 | arn = summary["CertificateArn"] 197 | certificate = ACMCertificate.get_by_arn(self.region, arn) 198 | if valid_only and not certificate.is_valid(): 199 | pass 200 | elif domain_name is not None and not certificate.matches(domain_name): 201 | pass 202 | else: 203 | yield certificate 204 | -------------------------------------------------------------------------------- /senza/manaus/boto_proxy.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | import boto3 4 | from botocore.exceptions import ClientError 5 | 6 | from .utils import extract_client_error_code 7 | 8 | __all__ = ['BotoClientProxy'] 9 | 10 | 11 | class BotoClientProxy: 12 | def __init__(self, *args, **kwargs): 13 | self.__client = boto3.client(*args, **kwargs) 14 | 15 | @staticmethod 16 | def __decorator(function, *args, **kwargs): 17 | def wrapper(*args, **kwargs): 18 | max_tries = 5 19 | sleep_time = 10 20 | last_error = None 21 | for _ in range(max_tries): 22 | try: 23 | return function(*args, **kwargs) 24 | except ClientError as error: 25 | last_error = error 26 | if extract_client_error_code(error) == "Throttling": 27 | sleep(sleep_time) 28 | else: 29 | raise 30 | else: 31 | if last_error: 32 | raise last_error 33 | return wrapper 34 | 35 | def __getattr__(self, item): 36 | client_attr = getattr(self.__client, item) 37 | if callable(client_attr): 38 | return self.__decorator(client_attr) 39 | else: 40 | return client_attr 41 | -------------------------------------------------------------------------------- /senza/manaus/ec2.py: -------------------------------------------------------------------------------- 1 | from collections import OrderedDict 2 | from typing import Dict, Iterator, List, Optional 3 | 4 | import boto3 5 | 6 | from .exceptions import VPCError 7 | 8 | 9 | class EC2VPC: 10 | 11 | """ 12 | See: 13 | http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#vpc 14 | """ 15 | 16 | def __init__(self, 17 | vpc_id: str, 18 | is_default: bool, 19 | tags: Optional[List[Dict[str, str]]]): 20 | self.vpc_id = vpc_id 21 | self.is_default = is_default 22 | tags = tags or [] # type: List[Dict[str, str]] 23 | self.tags = OrderedDict([(t['Key'], t['Value']) for t in tags]) # type: Dict[str, str] 24 | 25 | self.name = self.tags.get('Name', self.vpc_id) 26 | 27 | def __str__(self): 28 | return '{name} ({vpc_id})'.format_map(vars(self)) 29 | 30 | def __repr__(self): 31 | return ''.format_map(vars(self)) 32 | 33 | @classmethod 34 | def from_boto_vpc(cls, vpc) -> "EC2VPC": 35 | """ 36 | Converts an ec2.VPC as returned by resource.vpcs.all() 37 | 38 | See: 39 | http://boto3.readthedocs.io/en/latest/reference/services/ec2.html#vpc 40 | """ 41 | 42 | return cls(vpc.vpc_id, vpc.is_default, vpc.tags) 43 | 44 | 45 | class EC2: 46 | 47 | def __init__(self, region: str): 48 | self.region = region 49 | 50 | def get_all_vpcs(self) -> Iterator[EC2VPC]: 51 | """ 52 | Get all VPCs from the account 53 | """ 54 | resource = boto3.resource('ec2', self.region) 55 | 56 | for vpc in resource.vpcs.all(): 57 | yield EC2VPC.from_boto_vpc(vpc) 58 | 59 | def get_default_vpc(self) -> EC2VPC: 60 | """ 61 | Get one VPC from the account, either the default or, if only one 62 | exists, that one. 63 | """ 64 | resource = boto3.resource('ec2', self.region) 65 | 66 | number_of_vpcs = 0 67 | # We shouldn't use the list with .all() because it has internal paging! 68 | for vpc_number, vpc in enumerate(resource.vpcs.all(), start=1): 69 | number_of_vpcs = vpc_number 70 | 71 | if vpc.is_default: 72 | return EC2VPC.from_boto_vpc(vpc) 73 | 74 | if vpc_number == 1: 75 | first_vpc = vpc 76 | 77 | if number_of_vpcs == 0: 78 | raise VPCError("Can't find any VPC!", number_of_vpcs) 79 | elif number_of_vpcs == 1: 80 | # Use the only one VPC if it's not the default VPC found 81 | return EC2VPC.from_boto_vpc(first_vpc) 82 | else: 83 | raise VPCError("Multiple VPCs are only supported if one " 84 | "VPC is the default VPC (IsDefault=true)!", 85 | number_of_vpcs) 86 | -------------------------------------------------------------------------------- /senza/manaus/elb.py: -------------------------------------------------------------------------------- 1 | """ 2 | ELB_ related classes and functions. 3 | 4 | For more information see the `ELB documentation`_ and the `boto3 documentation`_ 5 | 6 | .. _ELB: https://aws.amazon.com/elasticloadbalancing/ 7 | .. _ELB documentation: https://aws.amazon.com/documentation/elastic-load-balancing/ 8 | .. _boto3 documentation: http://boto3.readthedocs.io/en/latest/reference/services/elb.html 9 | """ 10 | 11 | from datetime import datetime 12 | from enum import Enum 13 | from typing import Any, Dict, List, Optional 14 | 15 | from .boto_proxy import BotoClientProxy 16 | from .exceptions import ELBNotFound 17 | from .route53 import Route53HostedZone 18 | 19 | 20 | class ELBScheme(str, Enum): 21 | """ 22 | DNS record type 23 | """ 24 | 25 | internet_facing = "internet-facing" 26 | internal = "internal" 27 | 28 | @classmethod 29 | def from_str(cls, value: str) -> "ELBScheme": 30 | """ 31 | Returns the an attribute based on a string as returned by boto 32 | """ 33 | value = value.replace("-", "_") 34 | return cls[value] 35 | 36 | 37 | class ELBHealthCheck: 38 | 39 | """ 40 | Represents the HealthCheck section of an ELB. 41 | 42 | See `HealthCheck` key in: 43 | http://boto3.readthedocs.io/en/latest/reference/services/elb.html#ElasticLoadBalancing.Client.describe_load_balancers 44 | """ 45 | 46 | def __init__( 47 | self, 48 | target: str, 49 | interval: int, 50 | timeout: int, 51 | unhealthy_threshold: int, 52 | healthy_threshold: int, 53 | ): 54 | self.target = target 55 | self.interval = interval 56 | self.timeout = timeout 57 | self.unhealthy_threshold = unhealthy_threshold 58 | self.healthy_threshold = healthy_threshold 59 | 60 | @classmethod 61 | def from_boto_dict(cls, health_check: Dict[str, Any]) -> "ELBHealthCheck": 62 | """ 63 | Instantiates a `ELBHealthCheck` object from the dictionary returned 64 | by boto3. 65 | """ 66 | target = health_check["Target"] 67 | interval = health_check["Interval"] 68 | timeout = health_check["Timeout"] 69 | unhealthy_threshold = health_check["UnhealthyThreshold"] 70 | healthy_threshold = health_check["HealthyThreshold"] 71 | 72 | cls(target, interval, timeout, unhealthy_threshold, healthy_threshold) 73 | 74 | 75 | class ELBListener: 76 | 77 | """ 78 | Represents the ELBListener section of an ELB. 79 | 80 | See `ELBListener` key in: 81 | http://boto3.readthedocs.io/en/latest/reference/services/elb.html#ElasticLoadBalancing.Client.describe_load_balancers 82 | """ 83 | 84 | def __init__( 85 | self, 86 | protocol: str, 87 | load_balancer_port: int, 88 | instance_protocol: str, 89 | instance_port: int, 90 | ssl_certificate_id: str, 91 | ): 92 | self.protocol = protocol 93 | self.load_balancer_port = load_balancer_port 94 | self.instance_protocol = instance_protocol 95 | self.instance_port = instance_port 96 | self.ssl_certificate_id = ssl_certificate_id 97 | 98 | @classmethod 99 | def from_boto_dict(cls, listener) -> "ELBListener": 100 | """ 101 | Instantiates a `ELBListener` object from the dictionary returned 102 | by boto3. 103 | """ 104 | 105 | protocol = listener["Protocol"] 106 | load_balancer_port = listener["LoadBalancerPort"] 107 | instance_protocol = listener["InstanceProtocol"] 108 | instance_port = listener["InstancePort"] 109 | ssl_certificate_id = listener.get("SSLCertificateId") 110 | 111 | return cls( 112 | protocol, 113 | load_balancer_port, 114 | instance_protocol, 115 | instance_port, 116 | ssl_certificate_id, 117 | ) 118 | 119 | 120 | class ELB: 121 | 122 | """ 123 | Elastic Load Balancer. 124 | 125 | See: 126 | http://boto3.readthedocs.io/en/latest/reference/services/elb.html#ElasticLoadBalancing.Client.describe_load_balancers 127 | """ 128 | 129 | def __init__( 130 | self, 131 | name: str, 132 | dns_name: str, 133 | hosted_zone_name: str, 134 | hosted_zone_id: str, 135 | listener_descriptions: dict, 136 | policies: dict, 137 | backend_server_descriptions: list, 138 | availability_zones: List[str], 139 | subnets: List[str], 140 | instance_ids: List[str], 141 | vpc_id: str, 142 | health_check: ELBHealthCheck, 143 | source_security_group: Dict[str, str], 144 | security_groups: List[str], 145 | created_time: datetime, 146 | scheme: ELBScheme, 147 | listeners: Optional[ELBListener] = None, 148 | region: Optional[str] = None, 149 | ): 150 | self.name = name 151 | self.dns_name = dns_name 152 | self.hosted_zone_name = hosted_zone_name 153 | self.hosted_zone_id = hosted_zone_id 154 | self.policies = policies 155 | self.backend_server_descriptions = backend_server_descriptions 156 | self.availability_zones = availability_zones 157 | self.subnets = subnets 158 | self.vpc_id = vpc_id 159 | self.instance_ids = instance_ids 160 | self.health_check = health_check 161 | self.source_security_group = source_security_group 162 | self.security_groups = security_groups 163 | self.created_time = created_time 164 | self.scheme = scheme 165 | 166 | self.hosted_zone = Route53HostedZone(name=hosted_zone_name, id=hosted_zone_id) 167 | 168 | if listeners is None: 169 | listeners = [ 170 | ELBListener.from_boto_dict(each["Listener"]) 171 | for each in listener_descriptions 172 | ] 173 | self.listeners = listeners 174 | 175 | if region is None: 176 | _, region, _ = dns_name.split(".", maxsplit=2) 177 | 178 | self.region = region 179 | 180 | @classmethod 181 | def from_boto_dict(cls, load_balancer) -> "ELB": 182 | """ 183 | Instantiates a `ELB` object from the dictionary returned 184 | by `boto3.client('elb').describe_load_balancers`. 185 | """ 186 | 187 | name = load_balancer["LoadBalancerName"] 188 | dns_name = load_balancer["DNSName"] 189 | hosted_zone_name = load_balancer.get("CanonicalHostedZoneName") 190 | hosted_zone_id = load_balancer["CanonicalHostedZoneNameID"] 191 | listener_descriptions = load_balancer["ListenerDescriptions"] 192 | policies = load_balancer["Policies"] 193 | backend_server_descriptions = load_balancer["BackendServerDescriptions"] 194 | availability_zones = load_balancer["AvailabilityZones"] 195 | subnets = load_balancer["Subnets"] 196 | vpc_id = load_balancer["VPCId"] 197 | instance_ids = load_balancer["Instances"] 198 | health_check = ELBHealthCheck.from_boto_dict(load_balancer["HealthCheck"]) 199 | source_security_group = load_balancer["SourceSecurityGroup"] 200 | security_groups = load_balancer["SecurityGroups"] 201 | created_time = load_balancer["CreatedTime"] 202 | scheme = ELBScheme.from_str(load_balancer["Scheme"]) 203 | 204 | return cls( 205 | name, 206 | dns_name, 207 | hosted_zone_name, 208 | hosted_zone_id, 209 | listener_descriptions, 210 | policies, 211 | backend_server_descriptions, 212 | availability_zones, 213 | subnets, 214 | vpc_id, 215 | instance_ids, 216 | health_check, 217 | source_security_group, 218 | security_groups, 219 | created_time, 220 | scheme, 221 | ) 222 | 223 | @classmethod 224 | def get_by_dns_name(cls, dns_name: str) -> "ELB": 225 | """ 226 | Gets a `ELB` by its DNS name and raises and `ELBNotFound` exception 227 | if such an ELB doesn't exist. 228 | 229 | :raises: ELBNotFound 230 | """ 231 | _, region, _ = dns_name.split(".", maxsplit=2) 232 | client = BotoClientProxy("elb", region) 233 | 234 | response = client.describe_load_balancers() 235 | next_marker = response.get("NextMarker") 236 | load_balancers = response["LoadBalancerDescriptions"] # type: List 237 | while next_marker: 238 | response = client.describe_load_balancers(Marker=next_marker) 239 | next_marker = response.get("NextMarker") 240 | load_balancers.extend(response["LoadBalancerDescriptions"]) 241 | 242 | for load_balancer in load_balancers: 243 | if load_balancer["DNSName"] == dns_name: 244 | return cls.from_boto_dict(load_balancer) 245 | 246 | raise ELBNotFound(dns_name) 247 | -------------------------------------------------------------------------------- /senza/manaus/exceptions.py: -------------------------------------------------------------------------------- 1 | """ 2 | Exceptions to be raised by Manaus components 3 | """ 4 | 5 | 6 | class ManausException(Exception): 7 | """ 8 | Base class for Manaus exceptions 9 | """ 10 | 11 | 12 | class InvalidState(ManausException): 13 | """ 14 | Exception raised when executing an action would try to change a stack 15 | to an invalid state 16 | """ 17 | 18 | def __init__(self, reason: str): 19 | super().__init__("Invalid State: {}".format(reason)) 20 | 21 | 22 | class ELBNotFound(ManausException): 23 | """ 24 | Error raised when the ELB is not found 25 | """ 26 | 27 | def __init__(self, domain_name: str): 28 | super().__init__("ELB not found: {}".format(domain_name)) 29 | 30 | 31 | class HostedZoneNotFound(ManausException): 32 | """ 33 | Error raised when the Route53 hosted zone is not found 34 | """ 35 | 36 | def __init__(self, name: str): 37 | super().__init__("Hosted Zone not found: {}".format(name)) 38 | 39 | 40 | class RecordNotFound(ManausException): 41 | """ 42 | Error raised when the Route53 record is not found 43 | """ 44 | 45 | def __init__(self, name: str): 46 | super().__init__("Route 53 Record not found: {}".format(name)) 47 | 48 | 49 | class StackNotFound(ManausException): 50 | """ 51 | Error raised when the CloudFormation Stack is not found 52 | """ 53 | 54 | def __init__(self, name: str): 55 | super().__init__("CloudFormation Stack not found: {}".format(name)) 56 | 57 | 58 | class StackNotUpdated(ManausException): 59 | """ 60 | Error raised when the CloudFormation Stack is not updated because no changes 61 | are needed. 62 | """ 63 | 64 | def __init__(self, name: str): 65 | super().__init__("CloudFormation Stack not updated: {}".format(name)) 66 | 67 | 68 | class VPCError(ManausException, AttributeError): 69 | """ 70 | Error raised when there are issues with VPCs configuration 71 | """ 72 | 73 | def __init__(self, message: str, number_of_vpcs: int = None): 74 | super().__init__(message) 75 | self.number_of_vpcs = number_of_vpcs 76 | -------------------------------------------------------------------------------- /senza/manaus/iam.py: -------------------------------------------------------------------------------- 1 | """ 2 | IAM related classes and functions. 3 | 4 | For more information see the `IAM documentation`_ and the 5 | `boto3 documentation`_ 6 | 7 | .. _IAM: http://docs.aws.amazon.com/IAM/latest/UserGuide/introduction.html 8 | .. _IAM documentation: https://aws.amazon.com/documentation/iam/ 9 | .. _boto3 documentation: 10 | http://boto3.readthedocs.io/en/latest/reference/services/iam.html 11 | """ 12 | 13 | from datetime import datetime, timezone 14 | from typing import Any, Dict, Iterator, Optional, Union 15 | 16 | import boto3 17 | from botocore.exceptions import ClientError 18 | 19 | from .boto_proxy import BotoClientProxy 20 | 21 | 22 | class IAMServerCertificate: 23 | """ 24 | Server certificate stored in IAM. 25 | 26 | See: 27 | http://boto3.readthedocs.io/en/latest/reference/services/iam.html#IAM.Client.get_server_certificate 28 | """ 29 | 30 | def __init__( 31 | self, 32 | metadata: Dict[str, Union[str, datetime]], 33 | certificate_body: str, 34 | certificate_chain: str, 35 | ): 36 | 37 | self.metadata = metadata 38 | self.certificate_body = certificate_body 39 | self.certificate_chain = certificate_chain 40 | 41 | # metadata properties 42 | self.name = metadata["ServerCertificateName"] # type: str 43 | self.arn = metadata["Arn"] # type: str 44 | self.expiration = metadata["Expiration"] # type: datetime 45 | self.path = metadata["Path"] # type: str 46 | self.certificate_id = metadata["ServerCertificateId"] # type: str 47 | self.upload_date = metadata["UploadDate"] # type: datetime 48 | 49 | def __lt__(self, other: "IAMServerCertificate"): 50 | return self.upload_date < other.upload_date 51 | 52 | def __eq__(self, other: "IAMServerCertificate"): 53 | return self.arn == other.arn 54 | 55 | def __repr__(self): 56 | return "".format_map(vars(self)) 57 | 58 | @classmethod 59 | def from_boto_dict( 60 | cls, server_certificate: Dict[str, Any] 61 | ) -> "IAMServerCertificate": 62 | """ 63 | Converts the dict returned by ``boto3.client.get_server_certificate`` 64 | to a ``IAMServerCertificate`` instance. 65 | """ 66 | 67 | metadata = server_certificate["ServerCertificateMetadata"] 68 | certificate_body = server_certificate["CertificateBody"] 69 | certificate_chain = server_certificate["CertificateChain"] 70 | 71 | return cls(metadata, certificate_body, certificate_chain) 72 | 73 | @classmethod 74 | def from_boto_server_certificate(cls, server_certificate) -> "IAMServerCertificate": 75 | """ 76 | Converts an ServerCertificate as returned by server_certificates.all() 77 | """ 78 | metadata = server_certificate.server_certificate_metadata 79 | certificate_body = server_certificate.certificate_body 80 | certificate_chain = server_certificate.certificate_chain 81 | 82 | return cls(metadata, certificate_body, certificate_chain) 83 | 84 | @classmethod 85 | def get_by_name(cls, region: str, name: str) -> "IAMServerCertificate": 86 | """ 87 | Get IAMServerCertificate using the name of the server certificate 88 | """ 89 | client = BotoClientProxy("iam", region) 90 | iam = IAM(region) 91 | 92 | try: 93 | response = client.get_server_certificate(ServerCertificateName=name) 94 | server_certificate = response["ServerCertificate"] 95 | certificate = cls.from_boto_dict(server_certificate) 96 | except ClientError as error: 97 | # IAM.get_certificates can get certificates with a suffix 98 | certificates = sorted(iam.get_certificates(name=name), reverse=True) 99 | try: 100 | # try to return the latest certificate that matches the name 101 | certificate = certificates[0] 102 | except IndexError: 103 | raise error 104 | 105 | return certificate 106 | 107 | @staticmethod 108 | def arn_is_server_certificate(arn: Optional[str] = None): 109 | """ 110 | Checks if the Amazon Resource Name (ARN) refers to an iam 111 | server certificate. 112 | 113 | See: 114 | http://docs.aws.amazon.com/IAM/latest/UserGuide/reference_identifiers.html 115 | """ 116 | if arn is None: 117 | return False 118 | else: 119 | return arn.startswith("arn:aws:iam:") and "server-certificate" in arn 120 | 121 | def is_valid(self, when: Optional[datetime] = None) -> bool: 122 | """ 123 | Checks if the certificate is still valid 124 | """ 125 | when = when if when is not None else datetime.now(timezone.utc) 126 | 127 | return when < self.expiration 128 | 129 | 130 | class IAM: 131 | 132 | """ 133 | Represents the IAM service. 134 | 135 | See: 136 | http://boto3.readthedocs.io/en/latest/reference/services/iam.html 137 | """ 138 | 139 | def __init__(self, region: str): 140 | self.region = region 141 | 142 | def get_certificates( 143 | self, *, valid_only: bool = True, name: Optional[str] = None 144 | ) -> Iterator[IAMServerCertificate]: 145 | """ 146 | Gets certificates from IAM. 147 | By default it will fetch all valid certificates, but it's also possible 148 | to return also invalid certificates and filtering by name. 149 | """ 150 | resource = boto3.resource("iam", self.region) 151 | 152 | for server_certificate in resource.server_certificates.all(): 153 | certificate = IAMServerCertificate.from_boto_server_certificate( 154 | server_certificate 155 | ) 156 | 157 | if name is not None and not certificate.name.startswith(name): 158 | continue 159 | 160 | if valid_only and not certificate.is_valid(): 161 | continue 162 | 163 | yield certificate 164 | -------------------------------------------------------------------------------- /senza/manaus/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generic functions related to AWS/Boto/Manaus but don't belong to any specific 3 | component 4 | """ 5 | 6 | from typing import Dict, Optional # noqa: F401 pylint: disable=locally-disabled, unused-import 7 | 8 | from botocore.exceptions import ClientError 9 | 10 | __all__ = ["extract_client_error_code"] 11 | 12 | 13 | def extract_client_error_code(exception: ClientError) -> Optional[str]: 14 | """ 15 | Extracts the client error code from a boto ClientError exception. Returns 16 | None if it fails. 17 | """ 18 | error = exception.response.get('Error', {}) # type: Dict[str, Optional[str]] 19 | error_code = error.get('Code') 20 | return error_code 21 | -------------------------------------------------------------------------------- /senza/patch.py: -------------------------------------------------------------------------------- 1 | 2 | import codecs 3 | import base64 4 | import datetime 5 | 6 | import yaml 7 | 8 | from .spotinst.components import elastigroup_api 9 | from .exceptions import InvalidUserDataType 10 | from .manaus.boto_proxy import BotoClientProxy 11 | 12 | LAUNCH_CONFIGURATION_PROPERTIES = set([ 13 | 'AssociatePublicIpAddress', 14 | 'BlockDeviceMappings', 15 | 'ClassicLinkVPCId', 16 | 'ClassicLinkVPCSecurityGroups', 17 | 'EbsOptimized', 18 | 'IamInstanceProfile', 19 | 'ImageId', 20 | 'InstanceId', 21 | 'InstanceMonitoring', 22 | 'InstanceType', 23 | 'KernelId', 24 | 'KeyName', 25 | 'LaunchConfigurationName', 26 | 'PlacementTenancy', 27 | 'RamdiskId', 28 | 'SecurityGroups', 29 | 'SpotPrice', 30 | 'UserData', 31 | ]) 32 | 33 | 34 | def should_patch_user_data(new_val, old_val): 35 | ''' 36 | Validate if User Data should be patched. 37 | ''' 38 | current_user_data = yaml.safe_load(old_val) 39 | if isinstance(new_val, dict): 40 | return True 41 | elif isinstance(current_user_data, dict): 42 | raise InvalidUserDataType(type(current_user_data), 43 | type(new_val)) 44 | return False 45 | 46 | 47 | def patch_user_data(old: str, new: dict): 48 | first_line, sep, data = old.partition('\n') 49 | data = yaml.safe_load(data) 50 | if not isinstance(data, dict): 51 | raise ValueError('Instance user data has invalid YAML: must be key/value pairs') 52 | data.update(**new) 53 | return first_line + sep + yaml.safe_dump(data, default_flow_style=False) 54 | 55 | 56 | def patch_auto_scaling_group(group: dict, region: str, properties: dict): 57 | asg = BotoClientProxy('autoscaling', region) 58 | result = asg.describe_launch_configurations(LaunchConfigurationNames=[group['LaunchConfigurationName']]) 59 | lcs = result['LaunchConfigurations'] 60 | changed = False 61 | for lc in lcs: 62 | lc_props = {k: lc[k] for k in properties} 63 | if properties != lc_props: 64 | # create new launch configuration with specified properties 65 | kwargs = {} 66 | for key in LAUNCH_CONFIGURATION_PROPERTIES: 67 | # NOTE: we only take non-empty values (otherwise the parameter validation will complain :-( ) 68 | val = lc.get(key) 69 | if val is not None and val != '': 70 | if key == 'UserData': 71 | val = codecs.decode(val.encode('utf-8'), 'base64').decode('utf-8') 72 | kwargs[key] = val 73 | now = datetime.datetime.utcnow().strftime('%Y%m%dT%H%M%S') 74 | kwargs['LaunchConfigurationName'] = '{}-{}'.format(kwargs['LaunchConfigurationName'][:64], now) 75 | for key, val in properties.items(): 76 | 77 | if key == 'UserData': 78 | if should_patch_user_data(val, kwargs['UserData']): 79 | kwargs[key] = patch_user_data(kwargs[key], val) 80 | else: 81 | kwargs[key] = val 82 | asg.create_launch_configuration(**kwargs) 83 | asg.update_auto_scaling_group(AutoScalingGroupName=group['AutoScalingGroupName'], 84 | LaunchConfigurationName=kwargs['LaunchConfigurationName']) 85 | changed = True 86 | return changed 87 | 88 | 89 | def patch_elastigroup(group, properties, elastigroup_id, spotinst_account_data): 90 | ''' 91 | Patch specific properties of an existing ElastiGroup 92 | ''' 93 | changed = False 94 | properties_to_patch = {} 95 | 96 | group_user_data = group['compute']['launchSpecification']['userData'] 97 | current_user_data = codecs.decode(group_user_data.encode('utf-8'), 'base64').decode('utf-8') 98 | 99 | current_properties = { 100 | 'ImageId': group['compute']['launchSpecification']['imageId'], 101 | 'InstanceType': group['compute']['instanceTypes']['ondemand'], 102 | 'UserData': current_user_data 103 | } 104 | 105 | for key, val in properties.items(): 106 | if key in current_properties: 107 | if key == 'UserData': 108 | if should_patch_user_data(val, current_properties[key]): 109 | patched_user_data = patch_user_data(current_properties[key], val) 110 | encoded_user_data = base64.b64encode(patched_user_data.encode('utf-8')).decode('utf-8') 111 | properties_to_patch[key] = encoded_user_data 112 | else: 113 | if current_properties[key] != val: 114 | properties_to_patch[key] = val 115 | 116 | if len(properties_to_patch) > 0: 117 | elastigroup_api.patch_elastigroup(properties_to_patch, elastigroup_id, spotinst_account_data) 118 | changed = True 119 | 120 | return changed 121 | -------------------------------------------------------------------------------- /senza/spotinst/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Deploy Spotinst Elastigroups using AWS CloudFormation templates 3 | """ 4 | from senza.exceptions import SenzaException 5 | 6 | __version__ = '0.1' 7 | 8 | 9 | class MissingSpotinstAccount(SenzaException): 10 | """ 11 | Exception raised when failed to map the target cloud account to a spotinst account 12 | """ 13 | 14 | def __init__(self, cloud_account_id: str): 15 | self.cloud_account_id = cloud_account_id 16 | 17 | def __str__(self): 18 | return "{cloud_account_id} cloud account was not found in your Spotinst organization ".format_map(vars(self)) 19 | -------------------------------------------------------------------------------- /senza/spotinst/components/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando-stups/senza/0134404b04382fcb9fc0675aacfb30480a079947/senza/spotinst/components/__init__.py -------------------------------------------------------------------------------- /senza/spotinst/components/elastigroup_api.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Wrapper methods for ElastiGroup's API 3 | ''' 4 | import click 5 | import requests 6 | import json 7 | import boto3 8 | 9 | from senza.components.elastigroup import ELASTIGROUP_RESOURCE_TYPE 10 | 11 | SPOTINST_API_URL = 'https://api.spotinst.io' 12 | 13 | DEPLOY_STRATEGY_RESTART = 'RESTART_SERVER' 14 | DEPLOY_STRATEGY_REPLACE = 'REPLACE_SERVER' 15 | DEFAULT_CONNECT_TIMEOUT = 9 16 | DEFAULT_READ_TIMEOUT = 30 17 | STATEFUL_STATE_ACTIVE = 'ACTIVE' 18 | 19 | 20 | class SpotInstAccountData: 21 | ''' 22 | Data required to access SpotInst API 23 | ''' 24 | 25 | def __init__(self, account_id, access_token): 26 | self.account_id = account_id 27 | self.access_token = access_token 28 | 29 | 30 | def get_spotinst_account_data(region, stack_name): 31 | """ 32 | Extracts the Spotinst API access token and cloud account ID required to use the SpotInst API 33 | It returns those parameters from the first resource of Type ``Custom::elastigroup`` 34 | found in the stack with the name and region provided as arguments 35 | """ 36 | cf = boto3.client('cloudformation', region) 37 | template = cf.get_template(StackName=stack_name)['TemplateBody'] 38 | 39 | resources = template.get('Resources', []) 40 | for name, resource in resources.items(): 41 | if resource.get("Type", None) == ELASTIGROUP_RESOURCE_TYPE: 42 | spotinst_token = resource['Properties']['accessToken'] 43 | spotinst_account_id = resource['Properties']['accountId'] 44 | return SpotInstAccountData(spotinst_account_id, spotinst_token) 45 | 46 | raise click.Abort() 47 | 48 | 49 | def update_elastigroup(body, elastigroup_id, spotinst_account_data): 50 | ''' 51 | Performs the update ElastiGroup API call. 52 | 53 | Note: Although this should only return one element in the list, 54 | it still returns the entire list to prevent some silent decision making 55 | 56 | For more details see https://api.spotinst.com/elastigroup/amazon-web-services/update/ 57 | ''' 58 | headers = { 59 | "Authorization": "Bearer {}".format(spotinst_account_data.access_token), 60 | "Content-Type": "application/json" 61 | } 62 | 63 | response = requests.put( 64 | '{}/aws/ec2/group/{}?accountId={}'.format(SPOTINST_API_URL, elastigroup_id, spotinst_account_data.account_id), 65 | headers=headers, timeout=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT), data=json.dumps(body)) 66 | response.raise_for_status() 67 | data = response.json() 68 | groups = data.get("response", {}).get("items", []) 69 | 70 | return groups 71 | 72 | 73 | def update_capacity(minimum, maximum, target, elastigroup_id, spotinst_account_data): 74 | ''' 75 | Updates the capacity (number of instances) for an ElastiGroup by calling the SpotInst API. 76 | Returns the updated description of the ElastiGroup as a dict. 77 | Exceptions will be thrown for HTTP errors. 78 | ''' 79 | 80 | new_capacity = { 81 | 'target': target, 82 | 'minimum': minimum, 83 | 'maximum': maximum 84 | } 85 | 86 | body = {'group': {'capacity': new_capacity}} 87 | 88 | return update_elastigroup(body, elastigroup_id, spotinst_account_data) 89 | 90 | 91 | def get_elastigroup(elastigroup_id, spotinst_account_data): 92 | ''' 93 | Returns a list containing the description of an ElastiGroup as a dict. 94 | Exceptions will be thrown for HTTP errors. 95 | 96 | Note: Although this should only return one element in the list, 97 | it still returns the entire list to prevent some silent decision making 98 | 99 | For more details see https://api.spotinst.com/elastigroup/amazon-web-services/list-group/ 100 | ''' 101 | headers = { 102 | "Authorization": "Bearer {}".format(spotinst_account_data.access_token), 103 | "Content-Type": "application/json" 104 | } 105 | 106 | response = requests.get( 107 | '{}/aws/ec2/group/{}?accountId={}'.format(SPOTINST_API_URL, elastigroup_id, spotinst_account_data.account_id), 108 | headers=headers, timeout=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT)) 109 | response.raise_for_status() 110 | data = response.json() 111 | groups = data.get("response", {}).get("items", []) 112 | 113 | return groups 114 | 115 | 116 | def get_stateful_instances(elastigroup_id, spotinst_account_data): 117 | ''' 118 | Returns a list containing the description of the stateful instances of an ElastiGroup. 119 | Exceptions will be thrown for HTTP errors. 120 | 121 | https://docs.spot.io/spotinst-api/elastigroup/amazon-web-services/stateful-api/list-stateful-instances/ 122 | ''' 123 | headers = { 124 | "Authorization": "Bearer {}".format(spotinst_account_data.access_token), 125 | "Content-Type": "application/json" 126 | } 127 | 128 | response = requests.get( 129 | '{}/aws/ec2/group/{}/statefulInstance?accountId={}'.format( 130 | SPOTINST_API_URL, elastigroup_id, spotinst_account_data.account_id 131 | ), 132 | headers=headers, timeout=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT)) 133 | response.raise_for_status() 134 | data = response.json() 135 | stateful_instances = data.get("response", {}).get("items", []) 136 | 137 | return stateful_instances 138 | 139 | 140 | def recycle_stateful_instance(elastigroup_id, stateful_instance_id, spotinst_account_data): 141 | ''' 142 | Triggers recycling of a single stateful instance of an ElastiGroup. 143 | Returns operation status. 144 | 145 | Exceptions will be thrown for HTTP errors. 146 | 147 | https://docs.spot.io/spotinst-api/elastigroup/amazon-web-services/stateful-api/recycle-stateful-instance/ 148 | ''' 149 | headers = { 150 | "Authorization": "Bearer {}".format(spotinst_account_data.access_token), 151 | "Content-Type": "application/json" 152 | } 153 | 154 | response = requests.put( 155 | '{}/aws/ec2/group/{}/statefulInstance/{}/recycle?accountId={}'.format( 156 | SPOTINST_API_URL, elastigroup_id, stateful_instance_id, spotinst_account_data.account_id 157 | ), 158 | headers=headers, timeout=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT)) 159 | response.raise_for_status() 160 | data = response.json() 161 | status = data.get("response", {}).get("status", {}) 162 | 163 | return status 164 | 165 | 166 | def patch_elastigroup(properties, elastigroup_id, spotinst_account_data): 167 | ''' 168 | Patch specific properties of the ElastiGroup. 169 | ''' 170 | compute = {} 171 | if 'InstanceType' in properties: 172 | compute['instanceTypes'] = { 173 | 'ondemand': properties['InstanceType'], 174 | } 175 | 176 | if 'ImageId' in properties: 177 | compute.setdefault('launchSpecification', {})['imageId'] = properties['ImageId'] 178 | 179 | if 'UserData' in properties: 180 | compute.setdefault('launchSpecification', {})['userData'] = properties['UserData'] 181 | 182 | body = {'group': {'compute': compute}} 183 | return update_elastigroup(body, elastigroup_id, spotinst_account_data) 184 | 185 | 186 | def deploy(batch_size=20, grace_period=300, strategy=DEPLOY_STRATEGY_REPLACE, 187 | elastigroup_id=None, spotinst_account_data=None): 188 | ''' 189 | Triggers Blue/Green Deployment that replaces the existing instances in the Elastigroup 190 | 191 | For more details see https://api.spotinst.com/elastigroup/amazon-web-services/deploy/ 192 | ''' 193 | headers = { 194 | "Authorization": "Bearer {}".format(spotinst_account_data.access_token), 195 | "Content-Type": "application/json" 196 | } 197 | 198 | body = { 199 | 'batchSizePercentage': batch_size, 200 | 'gracePeriod': grace_period, 201 | 'strategy': { 202 | 'action': strategy 203 | } 204 | } 205 | 206 | response = requests.put( 207 | '{}/aws/ec2/group/{}/roll?accountId={}'.format(SPOTINST_API_URL, elastigroup_id, 208 | spotinst_account_data.account_id), 209 | headers=headers, timeout=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT), data=json.dumps(body)) 210 | response.raise_for_status() 211 | data = response.json() 212 | deploys = data.get("response", {}).get("items", []) 213 | 214 | return deploys 215 | 216 | 217 | def deploy_status(deploy_id, elastigroup_id, spotinst_account_data): 218 | ''' 219 | Obtains the current status of a deployment. 220 | 221 | For more details see https://api.spotinst.com/elastigroup/amazon-web-services/deploy-status/ 222 | ''' 223 | headers = { 224 | "Authorization": "Bearer {}".format(spotinst_account_data.access_token), 225 | "Content-Type": "application/json" 226 | } 227 | 228 | response = requests.get( 229 | '{}/aws/ec2/group/{}/roll/{}?accountId={}'.format(SPOTINST_API_URL, elastigroup_id, deploy_id, 230 | spotinst_account_data.account_id), 231 | headers=headers, timeout=(DEFAULT_CONNECT_TIMEOUT, DEFAULT_READ_TIMEOUT)) 232 | response.raise_for_status() 233 | data = response.json() 234 | deploys = data.get("response", {}).get("items", []) 235 | 236 | return deploys 237 | -------------------------------------------------------------------------------- /senza/stack_references.py: -------------------------------------------------------------------------------- 1 | def check_file_exceptions(stack_references: list): 2 | """ 3 | Check all stack references to see if any references that looks like a yaml 4 | filename wasn't matched and raises an filerror 5 | """ 6 | for stack in stack_references: 7 | stack.raise_file_exception() 8 | -------------------------------------------------------------------------------- /senza/stups/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando-stups/senza/0134404b04382fcb9fc0675aacfb30480a079947/senza/stups/__init__.py -------------------------------------------------------------------------------- /senza/stups/piu.py: -------------------------------------------------------------------------------- 1 | from subprocess import call 2 | from typing import Optional 3 | 4 | from ..exceptions import PiuNotFound 5 | from ..manaus.route53 import Route53, Route53Record # NOQA pylint: disable=locally-disabled, unused-import 6 | 7 | 8 | class Piu: 9 | """ 10 | Wrapper around `piu `_ 11 | 12 | For more information about `piu` see 13 | http://stups.readthedocs.io/en/latest/user-guide/ssh-access.html#ssh-access 14 | """ 15 | @staticmethod 16 | def request_access(instance: str, reason: str, odd_host: Optional[str], 17 | connect: bool): 18 | """ 19 | Request SSH access to a single host 20 | """ 21 | reason = '{} via senza'.format(reason) 22 | cmd = ['piu', 'request-access', 23 | instance, reason] 24 | 25 | if connect: 26 | cmd.append('--connect') 27 | 28 | if odd_host is not None: 29 | cmd.extend(['-O', odd_host]) 30 | 31 | try: 32 | call(cmd) 33 | except FileNotFoundError: 34 | raise PiuNotFound 35 | 36 | @staticmethod 37 | def find_odd_host(region: str) -> Optional[str]: 38 | """ 39 | Tries to find the odd host based on the region and route53 records 40 | """ 41 | route53 = Route53() 42 | hosted_zones = list(route53.get_hosted_zones()) 43 | for hosted_zone in hosted_zones: 44 | potential_name = 'odd-{region}.{domain}'.format(region=region, 45 | domain=hosted_zone.name) 46 | records = route53.get_records(name=potential_name) 47 | try: 48 | record = next(records) # type: Route53Record 49 | except StopIteration: 50 | # The domain name was not found 51 | pass 52 | else: 53 | odd_host = record.name[:-1] # remove the trailing dot 54 | return odd_host 55 | 56 | return None 57 | -------------------------------------------------------------------------------- /senza/stups/taupage.py: -------------------------------------------------------------------------------- 1 | import boto3 2 | from collections import namedtuple 3 | 4 | TaupageChannel = namedtuple("TaupageChannel", ("image_mapping", "ami_wildcard", "public_ami_wildcard")) 5 | 6 | 7 | def _channel(suffix): 8 | return TaupageChannel("LatestTaupage{}Image".format(suffix), 9 | "Taupage{}-AMI-*".format(suffix), 10 | "Taupage{}-Public-AMI-*".format(suffix)) 11 | 12 | 13 | DEFAULT_CHANNEL = _channel("") 14 | 15 | CHANNELS = { 16 | "latest": DEFAULT_CHANNEL, 17 | "staging": _channel("Staging"), 18 | "dev": _channel("Dev") 19 | } 20 | 21 | 22 | def find_image(region: str, channel: TaupageChannel = None): 23 | '''Find the latest Taupage AMI, first try private images, fallback to public''' 24 | 25 | if channel is None: 26 | channel = DEFAULT_CHANNEL 27 | 28 | ec2 = boto3.resource('ec2', region) 29 | filters = [{'Name': 'name', 'Values': [channel.ami_wildcard]}, 30 | {'Name': 'is-public', 'Values': ['false']}, 31 | {'Name': 'state', 'Values': ['available']}, 32 | {'Name': 'root-device-type', 'Values': ['ebs']}] 33 | images = list(ec2.images.filter(Filters=filters)) 34 | if not images: 35 | public_filters = [{'Name': 'name', 'Values': [channel.public_ami_wildcard]}, 36 | {'Name': 'is-public', 'Values': ['true']}, 37 | {'Name': 'state', 'Values': ['available']}, 38 | {'Name': 'root-device-type', 'Values': ['ebs']}] 39 | images = list(ec2.images.filter(Filters=public_filters)) 40 | 41 | if not images: 42 | return None 43 | 44 | most_recent_image = sorted(images, key=lambda i: i.name)[-1] 45 | return most_recent_image 46 | -------------------------------------------------------------------------------- /senza/subcommands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando-stups/senza/0134404b04382fcb9fc0675aacfb30480a079947/senza/subcommands/__init__.py -------------------------------------------------------------------------------- /senza/subcommands/config.py: -------------------------------------------------------------------------------- 1 | from typing import Optional 2 | 3 | from click import argument, command 4 | from click.exceptions import BadArgumentUsage 5 | 6 | from ..configuration import configuration 7 | from ..exceptions import InvalidConfigKey 8 | 9 | 10 | @command('config') 11 | @argument('key') 12 | @argument('value', required=False) 13 | def cmd_config(key: str, value: Optional[str]): 14 | """ 15 | Get and set senza options. 16 | """ 17 | if value is None: 18 | try: 19 | value = configuration[key] 20 | print(value) 21 | except InvalidConfigKey as e: 22 | raise BadArgumentUsage(e) 23 | except KeyError: 24 | exit(1) 25 | else: 26 | try: 27 | configuration[key] = value 28 | except InvalidConfigKey as e: 29 | raise BadArgumentUsage(e) 30 | -------------------------------------------------------------------------------- /senza/subcommands/root.py: -------------------------------------------------------------------------------- 1 | """ 2 | Senza's root command with tasks and flags common to all sub-commands 3 | """ 4 | 5 | import sys 6 | import time 7 | from distutils.version import LooseVersion 8 | from pathlib import Path 9 | from typing import Optional 10 | 11 | import click 12 | import requests 13 | import senza 14 | from clickclick import AliasedGroup, warning 15 | 16 | from ..error_handling import sentry 17 | 18 | CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"]) 19 | 20 | PYPI_URL = "https://pypi.python.org/pypi/stups-senza/json" 21 | ONE_DAY = 86400 # seconds 22 | 23 | 24 | def get_latest_version_from_disk() -> Optional[LooseVersion]: 25 | """ 26 | Tries to read a cached latest version from the disk returning None if the 27 | file doesn't exist or if it's older than 24 hours 28 | """ 29 | version_cache = Path(click.get_app_dir("senza")) / "pypi_version" 30 | now = time.time() 31 | latest_version = None 32 | if version_cache.exists() and now - version_cache.stat().st_mtime < ONE_DAY: 33 | with version_cache.open() as version_cache_file: 34 | str_version = version_cache_file.read() 35 | if str_version: 36 | latest_version = LooseVersion(str_version) 37 | return latest_version 38 | 39 | 40 | def get_latest_version_from_pypi() -> Optional[LooseVersion]: 41 | """ 42 | Gets the latest release version pypi api using distutils order 43 | to sort the releases (same as pip). 44 | """ 45 | try: 46 | pypi_response = requests.get(PYPI_URL, timeout=1) 47 | except requests.Timeout: 48 | return None 49 | 50 | # the potential exception is not caught here but it's caught in 51 | # check_senza_version and pushed to sentry if it is configured 52 | pypi_response.raise_for_status() 53 | pypi_data = pypi_response.json() 54 | releases = pypi_data["releases"] 55 | versions = [LooseVersion(version) for version in releases.keys()] 56 | return sorted(versions)[-1] 57 | 58 | 59 | def get_latest_version() -> Optional[LooseVersion]: 60 | """ 61 | Gets the latest version either from the file cache or from pip. 62 | 63 | If the file cache exists it will be valid for 24 hours. 64 | """ 65 | version_cache = Path(click.get_app_dir("senza")) / "pypi_version" 66 | latest_version = get_latest_version_from_disk() or get_latest_version_from_pypi() 67 | 68 | if latest_version is not None: 69 | try: 70 | version_cache.parent.mkdir(parents=True) 71 | except FileExistsError: 72 | # this try...except can be replaced with exist_ok=True when 73 | # we drop python3.4 support 74 | pass 75 | 76 | with version_cache.open("w") as version_cache_file: 77 | version_cache_file.write(str(latest_version)) 78 | return latest_version 79 | 80 | 81 | def check_senza_version(current_version: str): 82 | """ 83 | Checks if senza is updated and prints a warning with instructions to update 84 | if it's not. 85 | """ 86 | if not sys.stdout.isatty(): 87 | return 88 | current_version = LooseVersion(current_version) 89 | try: 90 | latest_version = get_latest_version() 91 | except Exception: 92 | if sentry is not None: 93 | sentry.captureException() 94 | return 95 | 96 | if latest_version is not None and current_version < latest_version: 97 | if __file__.startswith("/home"): 98 | # if it's installed in the user folder 99 | cmd = "pip3 install --upgrade stups-senza" 100 | else: 101 | cmd = "sudo pip3 install --upgrade stups-senza" 102 | warning( 103 | "Your senza version ({current}) is outdated. " 104 | "Please install the new one using '{cmd}'".format( 105 | current=current_version, cmd=cmd 106 | ) 107 | ) 108 | 109 | 110 | def print_version(ctx, param, value): 111 | """ 112 | Prints current senza version and checks if it's the latest one. 113 | """ 114 | assert param.name == "version" 115 | if not value or ctx.resilient_parsing: 116 | return 117 | 118 | click.echo("Senza {}".format(senza.__version__)) 119 | # this needs to be here since when this is called cli() is not 120 | check_senza_version(senza.__version__) 121 | ctx.exit() 122 | 123 | 124 | @click.group(cls=AliasedGroup, context_settings=CONTEXT_SETTINGS) 125 | @click.option( 126 | "-V", 127 | "--version", 128 | is_flag=True, 129 | callback=print_version, 130 | expose_value=False, 131 | is_eager=True, 132 | help="Print the current version number and exit.", 133 | ) 134 | def cli(): 135 | """ 136 | Senza's root command. 137 | 138 | Checks the version. 139 | 140 | Sub command can be added by using `cli.add_command(SUB_COMMAND_FUNCTION)` 141 | or using the `@cli.command()` decorator 142 | """ 143 | check_senza_version(senza.__version__) 144 | -------------------------------------------------------------------------------- /senza/templates/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | Modules and functions for senza component templates 3 | """ 4 | 5 | from types import ModuleType, FunctionType 6 | import pkg_resources 7 | 8 | 9 | def get_template_description(name, module: ModuleType) -> str: 10 | """ 11 | Gets the human-readable template description based on the name of the 12 | component and the component's module docstring 13 | """ 14 | return "{}: {}".format(name, (module.__doc__ or "").strip()) 15 | 16 | 17 | def has_functions(module, names): 18 | return all( 19 | isinstance(getattr(module, function_name, None), FunctionType) 20 | for function_name in names 21 | ) 22 | 23 | 24 | def get_templates() -> dict: 25 | """ 26 | Returns a dict with all the template modules 27 | """ 28 | entry_points = pkg_resources.iter_entry_points("senza.templates") 29 | template_modules = {} 30 | for entry_point in entry_points: # type: pkg_resources.EntryPoint 31 | try: 32 | module = entry_point.resolve() 33 | except ImportError: 34 | # ignore bad entry points 35 | continue 36 | else: 37 | # make sure the entry point resolves to a module with the essential interface functions 38 | if isinstance(module, ModuleType) and has_functions( 39 | module, ("gather_user_variables", "generate_definition") 40 | ): 41 | template_modules[entry_point.name] = module 42 | return template_modules 43 | -------------------------------------------------------------------------------- /senza/templates/_helper.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | 4 | import boto3 5 | import botocore.exceptions 6 | import click 7 | import clickclick 8 | from click import confirm 9 | from clickclick import Action 10 | from senza.aws import get_account_alias, get_account_id, get_security_group 11 | 12 | from ..manaus.boto_proxy import BotoClientProxy 13 | 14 | 15 | def prompt(variables: dict, var_name, *args, **kwargs): 16 | if var_name not in variables: 17 | if callable(kwargs.get("default")): 18 | # evaluate callable 19 | kwargs["default"] = kwargs["default"]() 20 | 21 | variables[var_name] = click.prompt(*args, **kwargs) 22 | elif "type" in kwargs: 23 | # ensure the variable as the right type 24 | type = kwargs["type"] 25 | variables[var_name] = type(variables[var_name]) 26 | 27 | 28 | def choice(variables: dict, var_name, *args, **kwargs): 29 | if var_name not in variables: 30 | if callable(kwargs.get("default")): 31 | # evaluate callable 32 | kwargs["default"] = kwargs["default"]() 33 | 34 | variables[var_name] = clickclick.choice(*args, **kwargs) 35 | elif "type" in kwargs: 36 | # ensure the variable as the right type 37 | type = kwargs["type"] 38 | variables[var_name] = type(variables[var_name]) 39 | 40 | 41 | def check_value(max_length: int, match_regex: str): 42 | def _value_checker(value: str): 43 | if len(value) <= max_length: 44 | if re.match(match_regex, value): 45 | return value 46 | else: 47 | raise click.UsageError("did not match regex {}.".format(match_regex)) 48 | else: 49 | raise click.UsageError( 50 | "Value is too long! {} > {} chars".format(len(value), max_length) 51 | ) 52 | 53 | return _value_checker 54 | 55 | 56 | def check_security_group(sg_name, rules, region, allow_from_self=False): 57 | rules_missing = set() 58 | for rule in rules: 59 | rules_missing.add(rule) 60 | 61 | with Action("Checking security group {}..".format(sg_name)): 62 | sg = get_security_group(region, sg_name) 63 | if sg: 64 | for rule in sg.ip_permissions: 65 | # NOTE: boto object has port as string! 66 | for proto, port in rules: 67 | if rule["IpProtocol"] == proto and rule["FromPort"] == int(port): 68 | rules_missing.remove((proto, port)) 69 | 70 | if sg: 71 | return rules_missing 72 | else: 73 | create_sg = click.confirm( 74 | "Security group {} does not exist. Do you want Senza to create it now?".format( 75 | sg_name 76 | ), 77 | default=True, 78 | ) 79 | if create_sg: 80 | ec2c = BotoClientProxy("ec2", region) 81 | # FIXME which vpc? 82 | vpc = ec2c.describe_vpcs()["Vpcs"][0] 83 | sg = ec2c.create_security_group( 84 | GroupName=sg_name, 85 | Description="Application security group", 86 | VpcId=vpc["VpcId"], 87 | ) 88 | ec2c.create_tags( 89 | Resources=[sg["GroupId"]], Tags=[{"Key": "Name", "Value": sg_name}] 90 | ) 91 | ip_permissions = [] 92 | for proto, port in rules: 93 | ip_permissions.append( 94 | { 95 | "IpProtocol": proto, 96 | "FromPort": port, 97 | "ToPort": port, 98 | "IpRanges": [{"CidrIp": "0.0.0.0/0"}], 99 | } 100 | ) 101 | if allow_from_self: 102 | ip_permissions.append( 103 | { 104 | "IpProtocol": "-1", 105 | "UserIdGroupPairs": [{"GroupId": sg["GroupId"]}], 106 | } 107 | ) 108 | ec2c.authorize_security_group_ingress( 109 | GroupId=sg["GroupId"], IpPermissions=ip_permissions 110 | ) 111 | return set() 112 | 113 | 114 | def get_mint_bucket_name(region: str): 115 | account_id = get_account_id() 116 | account_alias = get_account_alias() 117 | s3 = boto3.resource("s3") 118 | parts = account_alias.split("-") 119 | prefix = parts[0] 120 | bucket_name = "{}-stups-mint-{}-{}".format(prefix, account_id, region) 121 | bucket = s3.Bucket(bucket_name) 122 | try: 123 | bucket.load() 124 | return bucket.name 125 | except Exception: 126 | bucket = None 127 | for bucket in s3.buckets.all(): 128 | if bucket.name.startswith("{}-stups-mint-{}-".format(prefix, account_id)): 129 | return bucket.name 130 | return bucket_name 131 | 132 | 133 | def create_mint_read_policy_document(application_id: str, bucket_name: str, region: str): 134 | return { 135 | "Version": "2012-10-17", 136 | "Statement": [ 137 | { 138 | "Sid": "AllowMintRead", 139 | "Effect": "Allow", 140 | "Action": ["s3:GetObject"], 141 | "Resource": [ 142 | "arn:aws:s3:::{}/{}/*".format(bucket_name, application_id) 143 | ], 144 | } 145 | ], 146 | } 147 | 148 | 149 | def check_iam_role(application_id: str, bucket_name: str, region: str): 150 | role_name = "app-{}".format(application_id) 151 | with Action("Checking IAM role {}..".format(role_name)): 152 | iam = BotoClientProxy("iam") 153 | try: 154 | iam.get_role(RoleName=role_name) 155 | exists = True 156 | except botocore.exceptions.ClientError: 157 | exists = False 158 | 159 | assume_role_policy_document = { 160 | "Statement": [ 161 | { 162 | "Action": "sts:AssumeRole", 163 | "Effect": "Allow", 164 | "Principal": {"Service": "ec2.amazonaws.com"}, 165 | "Sid": "", 166 | } 167 | ], 168 | "Version": "2008-10-17", 169 | } 170 | 171 | create = False 172 | if not exists: 173 | create = confirm( 174 | "IAM role {} does not exist. " 175 | "Do you want Senza to create it now?".format(role_name), 176 | default=True, 177 | ) 178 | if create: 179 | with Action("Creating IAM role {}..".format(role_name)): 180 | iam.create_role( 181 | RoleName=role_name, 182 | AssumeRolePolicyDocument=json.dumps(assume_role_policy_document), 183 | ) 184 | 185 | attach_mint_read_policy = bucket_name is not None and ( 186 | (not exists and create) 187 | or ( 188 | exists and confirm( 189 | "IAM role {} already exists. ".format(role_name) 190 | + "Do you want Senza to overwrite the role policy?" 191 | ) 192 | ) 193 | ) 194 | if attach_mint_read_policy: 195 | with Action("Updating IAM role policy of {}..".format(role_name)): 196 | mint_read_policy = create_mint_read_policy_document(application_id, bucket_name, region) 197 | iam.put_role_policy( 198 | RoleName=role_name, 199 | PolicyName=role_name, 200 | PolicyDocument=json.dumps(mint_read_policy), 201 | ) 202 | 203 | 204 | def check_s3_bucket(bucket_name: str, region: str): 205 | s3 = boto3.resource("s3", region) 206 | with Action("Checking S3 bucket {}..".format(bucket_name)): 207 | exists = False 208 | try: 209 | s3.meta.client.head_bucket(Bucket=bucket_name) 210 | exists = True 211 | except Exception: 212 | pass 213 | if not exists: 214 | with Action("Creating S3 bucket {}...".format(bucket_name)): 215 | s3.create_bucket( 216 | Bucket=bucket_name, 217 | CreateBucketConfiguration={"LocationConstraint": region}, 218 | ) 219 | -------------------------------------------------------------------------------- /senza/templates/bgapp.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Background app with single EC2 instance 3 | ''' 4 | 5 | from clickclick import warning 6 | from senza.utils import pystache_render 7 | from ._helper import prompt, confirm, check_security_group, check_iam_role, get_mint_bucket_name, check_value 8 | 9 | TEMPLATE = ''' 10 | # basic information for generating and executing this definition 11 | SenzaInfo: 12 | StackName: {{application_id}} 13 | Parameters: 14 | - ImageVersion: 15 | Description: "Docker image version of {{ application_id }}." 16 | 17 | # a list of senza components to apply to the definition 18 | SenzaComponents: 19 | 20 | # this basic configuration is required for the other components 21 | - Configuration: 22 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 23 | 24 | # will create a launch configuration and auto scaling group with scaling triggers 25 | - AppServer: 26 | Type: Senza::TaupageAutoScalingGroup 27 | InstanceType: {{ instance_type }} 28 | SecurityGroups: 29 | - app-{{application_id}} 30 | IamRoles: 31 | - app-{{application_id}} 32 | AssociatePublicIpAddress: false # change for standalone deployment in default VPC 33 | TaupageConfig: 34 | application_version: "{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" 35 | runtime: Docker 36 | source: "{{ docker_image }}:{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" 37 | {{#mint_bucket}} 38 | mint_bucket: "{{ mint_bucket }}" 39 | {{/mint_bucket}} 40 | ''' 41 | 42 | 43 | def gather_user_variables(variables, region, account_info): 44 | prompt(variables, 'application_id', 'Application ID', default='hello-world', 45 | value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$')) 46 | prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")', 47 | default='stups/hello-world') 48 | prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') 49 | if 'pierone' in variables['docker_image'] or confirm('Did you need OAuth-Credentials from Mint?'): 50 | prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region)) 51 | else: 52 | variables['mint_bucket'] = None 53 | 54 | sg_name = 'app-{}'.format(variables['application_id']) 55 | rules_missing = check_security_group(sg_name, [('tcp', 22)], region, allow_from_self=True) 56 | 57 | if ('tcp', 22) in rules_missing: 58 | warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format( 59 | sg_name)) 60 | 61 | check_iam_role(variables['application_id'], variables['mint_bucket'], region) 62 | 63 | return variables 64 | 65 | 66 | def generate_definition(variables): 67 | definition_yaml = pystache_render(TEMPLATE, variables) 68 | return definition_yaml 69 | -------------------------------------------------------------------------------- /senza/templates/rediscluster.py: -------------------------------------------------------------------------------- 1 | ''' 2 | Elasticache cluster running multiple redis nodes, with replication / HA 3 | ''' 4 | 5 | from clickclick import warning 6 | from senza.utils import pystache_render 7 | 8 | from ._helper import prompt, check_security_group, check_value 9 | 10 | 11 | TEMPLATE = ''' 12 | # basic information for generating and executing this definition 13 | SenzaInfo: 14 | StackName: {{ application_id }} 15 | 16 | # a list of senza components to apply to the definition 17 | SenzaComponents: 18 | 19 | # this basic configuraation is required for the other components 20 | - Configuration: 21 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 22 | 23 | - {{ application_id }}: 24 | Type: Senza::RedisCluster 25 | CacheNodeType: {{ instance_type }} 26 | NumberOfNodes: {{ number_of_nodes }} 27 | SecurityGroups: 28 | - redis-{{ application_id }} 29 | 30 | ''' 31 | 32 | 33 | def gather_user_variables(variables, region, account_info): 34 | # maximal 32 characters because of the loadbalancer-name 35 | prompt(variables, 'application_id', 'Application ID', default='hello-world', 36 | value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$')) 37 | prompt(variables, 'instance_type', 'EC2 instance type', default='cache.m3.medium') 38 | prompt(variables, 'number_of_nodes', 'Number of nodes in cluster', default='2', 39 | value_proc=check_value(1, '^[2-5]$')) 40 | 41 | sg_name = 'redis-{}'.format(variables['application_id']) 42 | 43 | rules_missing = check_security_group(sg_name, [('tcp', 6379)], region, allow_from_self=True) 44 | if ('tcp', 6379) in rules_missing: 45 | warning('Security group {} does not allow tcp/6379 access yet, you will not be able to access redis'.format( 46 | sg_name)) 47 | 48 | return variables 49 | 50 | 51 | def generate_definition(variables): 52 | definition_yaml = pystache_render(TEMPLATE, variables) 53 | return definition_yaml 54 | -------------------------------------------------------------------------------- /senza/templates/redisnode.py: -------------------------------------------------------------------------------- 1 | """ 2 | Elasticache node running redis, without replication / HA (for caching) 3 | """ 4 | 5 | from clickclick import warning 6 | from senza.utils import pystache_render 7 | 8 | from ._helper import prompt, check_security_group, check_value 9 | 10 | 11 | TEMPLATE = ''' 12 | # basic information for generating and executing this definition 13 | SenzaInfo: 14 | StackName: {{ application_id }} 15 | 16 | # a list of senza components to apply to the definition 17 | SenzaComponents: 18 | 19 | # this basic configuraation is required for the other components 20 | - Configuration: 21 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 22 | 23 | - {{ application_id }}: 24 | Type: Senza::RedisNode 25 | CacheNodeType: {{ instance_type }} 26 | SecurityGroups: 27 | - redis-{{ application_id }} 28 | ''' 29 | 30 | 31 | def gather_user_variables(variables, region, account_info): # pylint: disable=locally-disabled, unused-argument 32 | """ 33 | Gather all the variables needed to create the redis node 34 | """ 35 | # maximal 32 characters because of the loadbalancer-name 36 | prompt(variables, 'application_id', 'Application ID', default='hello-world', 37 | value_proc=check_value(18, '^[a-zA-Z][-a-zA-Z0-9]*$')) 38 | prompt(variables, 'instance_type', 'EC2 instance type', default='cache.t2.small') 39 | 40 | sg_name = 'redis-{}'.format(variables['application_id']) 41 | 42 | rules_missing = check_security_group(sg_name, [('tcp', 6379)], region, allow_from_self=True) 43 | if ('tcp', 6379) in rules_missing: 44 | warning('Security group {} does not allow tcp/6379 access, ' 45 | 'you will not be able to access your redis'.format(sg_name)) 46 | 47 | return variables 48 | 49 | 50 | def generate_definition(variables): 51 | """ 52 | Generates the redis node definition yaml from template 53 | """ 54 | definition_yaml = pystache_render(TEMPLATE, variables) 55 | return definition_yaml 56 | -------------------------------------------------------------------------------- /senza/templates/webapp.py: -------------------------------------------------------------------------------- 1 | ''' 2 | HTTP app with auto scaling, ELB and DNS 3 | ''' 4 | 5 | from clickclick import warning, error 6 | from senza.utils import pystache_render 7 | from ._helper import prompt, choice, confirm, check_security_group, check_iam_role, get_mint_bucket_name, check_value 8 | 9 | TEMPLATE = ''' 10 | # basic information for generating and executing this definition 11 | SenzaInfo: 12 | StackName: {{application_id}} 13 | Parameters: 14 | - ImageVersion: 15 | Description: "Docker image version of {{ application_id }}." 16 | 17 | # a list of senza components to apply to the definition 18 | SenzaComponents: 19 | 20 | # this basic configuration is required for the other components 21 | - Configuration: 22 | Type: Senza::StupsAutoConfiguration # auto-detect network setup 23 | 24 | # will create a launch configuration and auto scaling group with scaling triggers 25 | - AppServer: 26 | Type: Senza::TaupageAutoScalingGroup 27 | InstanceType: {{ instance_type }} 28 | SecurityGroups: 29 | - app-{{application_id}} 30 | IamRoles: 31 | - app-{{application_id}} 32 | ElasticLoadBalancer: AppLoadBalancer 33 | AssociatePublicIpAddress: false # change for standalone deployment in default VPC 34 | TaupageConfig: 35 | application_version: "{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" 36 | runtime: Docker 37 | source: "{{ docker_image }}:{{=<% %>=}}{{Arguments.ImageVersion}}<%={{ }}=%>" 38 | health_check_path: {{http_health_check_path}} 39 | ports: 40 | {{http_port}}: {{http_port}} 41 | {{#mint_bucket}} 42 | mint_bucket: "{{ mint_bucket }}" 43 | {{/mint_bucket}} 44 | 45 | # creates an ELB entry and Route53 domains to this ELB 46 | - AppLoadBalancer: 47 | Type: Senza::WeightedDnsElasticLoadBalancer 48 | HTTPPort: {{http_port}} 49 | HealthCheckPath: {{http_health_check_path}} 50 | SecurityGroups: 51 | - app-{{application_id}}-lb 52 | Scheme: {{loadbalancer_scheme}} 53 | ''' 54 | 55 | 56 | def gather_user_variables(variables, region, account_info): 57 | # maximal 32 characters because of the loadbalancer-name 58 | prompt(variables, 'application_id', 'Application ID', default='hello-world', 59 | value_proc=check_value(60, '^[a-zA-Z][-a-zA-Z0-9]*$')) 60 | prompt(variables, 'docker_image', 'Docker image without tag/version (e.g. "pierone.example.org/myteam/myapp")', 61 | default='stups/hello-world') 62 | prompt(variables, 'http_port', 'HTTP port', default=8080, type=int) 63 | prompt(variables, 'http_health_check_path', 'HTTP health check path', default='/') 64 | prompt(variables, 'instance_type', 'EC2 instance type', default='t2.micro') 65 | if 'pierone' in variables['docker_image'] or confirm('Did you need OAuth-Credentials from Mint?'): 66 | prompt(variables, 'mint_bucket', 'Mint S3 bucket name', default=lambda: get_mint_bucket_name(region)) 67 | else: 68 | variables['mint_bucket'] = None 69 | choice(variables, 'loadbalancer_scheme', 70 | prompt='Please select the load balancer scheme', 71 | options=[('internal', 72 | 'internal: only accessible from the own VPC'), 73 | ('internet-facing', 74 | 'internet-facing: accessible from the public internet')], 75 | default='internal') 76 | http_port = variables['http_port'] 77 | 78 | sg_name = 'app-{}'.format(variables['application_id']) 79 | rules_missing = check_security_group(sg_name, [('tcp', 22), ('tcp', http_port)], region, allow_from_self=True) 80 | 81 | if ('tcp', 22) in rules_missing: 82 | warning('Security group {} does not allow SSH access, you will not be able to ssh into your servers'.format( 83 | sg_name)) 84 | 85 | if ('tcp', http_port) in rules_missing: 86 | error('Security group {} does not allow inbound TCP traffic on the specified HTTP port ({})'.format( 87 | sg_name, http_port 88 | )) 89 | 90 | rules_missing = check_security_group(sg_name + '-lb', [('tcp', 443)], region) 91 | 92 | if rules_missing: 93 | error('Load balancer security group {} does not allow inbound HTTPS traffic'.format(sg_name)) 94 | 95 | check_iam_role(variables['application_id'], variables['mint_bucket'], region) 96 | 97 | return variables 98 | 99 | 100 | def generate_definition(variables): 101 | definition_yaml = pystache_render(TEMPLATE, variables) 102 | return definition_yaml 103 | -------------------------------------------------------------------------------- /senza/utils.py: -------------------------------------------------------------------------------- 1 | """ 2 | Random functions that are useful in several places and not don't fall under 3 | the domain of any other module. 4 | """ 5 | 6 | import re 7 | import pystache 8 | 9 | 10 | def named_value(dictionary): 11 | """ 12 | Gets the name and value of a dict with a single key (for example SenzaInfo 13 | parameters or Senza Components) 14 | """ 15 | return next(iter(dictionary.items())) 16 | 17 | 18 | def ensure_keys(dict_obj, *keys): 19 | """ 20 | Ensure ``dict_obj`` has the hierarchy ``{keys[0]: {keys[1]: {...}}}`` 21 | 22 | The innermost key will have ``{}`` has value if didn't exist already. 23 | """ 24 | if len(keys) == 0: 25 | return dict_obj 26 | else: 27 | first, rest = keys[0], keys[1:] 28 | if first not in dict_obj: 29 | dict_obj[first] = {} 30 | dict_obj[first] = ensure_keys(dict_obj[first], *rest) 31 | return dict_obj 32 | 33 | 34 | def camel_case_to_underscore(name): 35 | """ 36 | Converts name from CamelCase to snake_case 37 | """ 38 | # the two steps are needed to support words with sequences of more than 39 | # one uppercase character 40 | step1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name) 41 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', step1).lower() 42 | 43 | 44 | def pystache_render(*args, **kwargs): 45 | """ 46 | Render pystache template with strict mode 47 | """ 48 | render = pystache.Renderer(missing_tags='strict') 49 | return render.render(*args, **kwargs) 50 | 51 | 52 | def generate_valid_cloud_name(name: str, length: int): 53 | """ 54 | Generate a name with that length and remove double - signs 55 | remove a starting or trailing - 56 | """ 57 | return re.sub(r'(-(?=-{1,})|^-|-$)', '', name[:length]) 58 | 59 | 60 | def get_load_balancer_name(stack_name: str, stack_version: str): 61 | """ 62 | Returns the name of the load balancer for the stack name and version, 63 | truncating the name if necessary. 64 | """ 65 | # Loadbalancer name cannot exceed 32 characters, try to shorten 66 | nchars = 32 - len(stack_version) - 1 67 | return '{}-{}'.format(generate_valid_cloud_name(stack_name, nchars), stack_version) 68 | -------------------------------------------------------------------------------- /setup-meta.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | Additional setup to register the convenience meta package on PyPI 6 | """ 7 | 8 | import setuptools 9 | import setup 10 | 11 | from setup import VERSION, DESCRIPTION, LICENSE, URL, AUTHOR, EMAIL, KEYWORDS, CLASSIFIERS 12 | 13 | 14 | NAME = 'senza' 15 | 16 | 17 | def setup_package(): 18 | version = VERSION 19 | 20 | install_reqs = [setup.NAME] 21 | 22 | setuptools.setup( 23 | name=NAME, 24 | version=version, 25 | url=URL, 26 | description=DESCRIPTION, 27 | author=AUTHOR, 28 | author_email=EMAIL, 29 | license=LICENSE, 30 | keywords=KEYWORDS, 31 | long_description='This is just a meta package. Please use https://pypi.python.org/pypi/{}'.format(setup.NAME), 32 | classifiers=CLASSIFIERS, 33 | packages=[], 34 | install_requires=install_reqs, 35 | ) 36 | 37 | 38 | if __name__ == '__main__': 39 | setup_package() 40 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | import sys 5 | import os 6 | import inspect 7 | 8 | import setuptools 9 | from setuptools.command.test import test as TestCommand 10 | from setuptools import setup, Command 11 | 12 | if sys.version_info < (3, 4, 0): 13 | sys.stderr.write('FATAL: STUPS Senza needs to be run with Python 3.4+\n') 14 | sys.exit(1) 15 | 16 | __location__ = os.path.join(os.getcwd(), os.path.dirname(inspect.getfile(inspect.currentframe()))) 17 | 18 | 19 | def read_version(package): 20 | with open(os.path.join(package, '__init__.py'), 'r') as fd: 21 | for line in fd: 22 | if line.startswith('__version__ = '): 23 | return line.split()[-1].strip().strip("'") 24 | 25 | 26 | NAME = 'stups-senza' 27 | MAIN_PACKAGE = 'senza' 28 | VERSION = read_version(MAIN_PACKAGE) 29 | DESCRIPTION = 'AWS Cloud Formation deployment CLI' 30 | LICENSE = 'Apache License 2.0' 31 | URL = 'https://github.com/zalando-stups/senza' 32 | AUTHOR = 'Henning Jacobs' 33 | EMAIL = 'henning.jacobs@zalando.de' 34 | KEYWORDS = 'aws cloud formation cf elb ec2 stups immutable stacks route53 boto' 35 | 36 | COVERAGE_XML = True 37 | COVERAGE_HTML = False 38 | JUNIT_XML = True 39 | 40 | # Add here all kinds of additional classifiers as defined under 41 | # https://pypi.python.org/pypi?%3Aaction=list_classifiers 42 | CLASSIFIERS = [ 43 | 'Development Status :: 4 - Beta', 44 | 'Environment :: Console', 45 | 'Intended Audience :: Developers', 46 | 'Intended Audience :: System Administrators', 47 | 'License :: OSI Approved :: Apache Software License', 48 | 'Operating System :: POSIX :: Linux', 49 | 'Programming Language :: Python', 50 | 'Programming Language :: Python :: 3.4', 51 | 'Programming Language :: Python :: Implementation :: CPython', 52 | ] 53 | 54 | CONSOLE_SCRIPTS = ['senza = senza.cli:main'] 55 | 56 | 57 | class PyTest(TestCommand): 58 | 59 | user_options = [('cov=', None, 'Run coverage'), ('cov-xml=', None, 'Generate junit xml report'), ('cov-html=', 60 | None, 'Generate junit html report'), ('junitxml=', None, 'Generate xml of test results')] 61 | 62 | def initialize_options(self): 63 | TestCommand.initialize_options(self) 64 | self.cov = None 65 | self.cov_xml = False 66 | self.cov_html = False 67 | self.junitxml = None 68 | 69 | def finalize_options(self): 70 | TestCommand.finalize_options(self) 71 | if self.cov is not None: 72 | self.cov = ['--cov', self.cov, '--cov-report', 'term-missing'] 73 | if self.cov_xml: 74 | self.cov.extend(['--cov-report', 'xml']) 75 | if self.cov_html: 76 | self.cov.extend(['--cov-report', 'html']) 77 | if self.junitxml is not None: 78 | self.junitxml = ['--junitxml', self.junitxml] 79 | 80 | def run_tests(self): 81 | try: 82 | import pytest 83 | except Exception: 84 | raise RuntimeError('py.test is not installed, run: pip install pytest') 85 | params = {'args': self.test_args} 86 | if self.cov: 87 | params['args'] += self.cov 88 | if self.junitxml: 89 | params['args'] += self.junitxml 90 | errno = pytest.main(**params) 91 | sys.exit(errno) 92 | 93 | 94 | class Flake8(Command): 95 | 96 | user_options = [] 97 | 98 | def initialize_options(self): 99 | pass 100 | 101 | def finalize_options(self): 102 | pass 103 | 104 | def run(self): 105 | from flake8.main.cli import main 106 | 107 | errno = main(['senza']) 108 | sys.exit(errno) 109 | 110 | 111 | def get_install_requirements(path): 112 | content = open(os.path.join(__location__, path)).read() 113 | return [req for req in content.split('\\n') if req != ''] 114 | 115 | 116 | def read(fname): 117 | return open(os.path.join(__location__, fname)).read() 118 | 119 | 120 | def setup_package(): 121 | # Assemble additional setup commands 122 | cmdclass = {} 123 | cmdclass['test'] = PyTest 124 | cmdclass['flake8'] = Flake8 125 | 126 | install_reqs = get_install_requirements('requirements.txt') 127 | 128 | command_options = {'test': {'test_suite': ('setup.py', 'tests'), 'cov': ('setup.py', MAIN_PACKAGE)}} 129 | if JUNIT_XML: 130 | command_options['test']['junitxml'] = 'setup.py', 'junit.xml' 131 | if COVERAGE_XML: 132 | command_options['test']['cov_xml'] = 'setup.py', True 133 | if COVERAGE_HTML: 134 | command_options['test']['cov_html'] = 'setup.py', True 135 | 136 | setup( 137 | name=NAME, 138 | version=VERSION, 139 | url=URL, 140 | description=DESCRIPTION, 141 | author=AUTHOR, 142 | author_email=EMAIL, 143 | license=LICENSE, 144 | keywords=KEYWORDS, 145 | long_description=read('README.rst'), 146 | classifiers=CLASSIFIERS, 147 | test_suite='tests', 148 | packages=setuptools.find_packages(exclude=['tests', 'tests.*']), 149 | install_requires=install_reqs, 150 | setup_requires=['flake8'], 151 | cmdclass=cmdclass, 152 | tests_require=['pytest-cov', 'pytest>=3.6.3', 'mock', 'responses'], 153 | command_options=command_options, 154 | entry_points={'console_scripts': CONSOLE_SCRIPTS, 155 | 'senza.templates': ['bgapp = senza.templates.bgapp', 156 | 'postgresapp = senza.templates.postgresapp', 157 | 'rediscluster = senza.templates.rediscluster', 158 | 'redisnode = senza.templates.redisnode', 159 | 'webapp = senza.templates.webapp']}, 160 | ) 161 | 162 | 163 | if __name__ == '__main__': 164 | setup_package() 165 | -------------------------------------------------------------------------------- /tests/test_autoscaling.py: -------------------------------------------------------------------------------- 1 | import copy 2 | import click 3 | import pytest 4 | 5 | from unittest.mock import MagicMock 6 | 7 | from senza.components.auto_scaling_group import component_auto_scaling_group 8 | 9 | 10 | def test_resource_overrides_autoscaling_policy(): 11 | definition = { 12 | "Resources": { 13 | "FooScaleUp": { 14 | "Properties": { 15 | "AdjustmentType": "ChangeInCapacity", 16 | "AutoScalingGroupName": { 17 | "Ref": "Foo" 18 | }, 19 | "Cooldown": "180", 20 | "ScalingAdjustment": "2" 21 | }, 22 | "Type": "AWS::AutoScaling::ScalingPolicy" 23 | }, 24 | "FooScaleDown": { 25 | "Properties": { 26 | "AdjustmentType": "ChangeInCapacity", 27 | "AutoScalingGroupName": { 28 | "Ref": "Foo" 29 | }, 30 | "Cooldown": "90", 31 | "ScalingAdjustment": "-3" 32 | }, 33 | "Type": "AWS::AutoScaling::ScalingPolicy" 34 | }, 35 | } 36 | } 37 | 38 | expected = copy.copy(definition["Resources"]) 39 | 40 | configuration = { 41 | 'Name': 'Foo', 42 | 'InstanceType': 't2.micro', 43 | 'Image': 'foo', 44 | 'AutoScaling': { 45 | 'Minimum': 2, 46 | 'Maximum': 10, 47 | } 48 | } 49 | 50 | args = MagicMock() 51 | args.region = "foo" 52 | 53 | info = { 54 | 'StackName': 'FooStack', 55 | 'StackVersion': 'FooVersion' 56 | } 57 | 58 | result = component_auto_scaling_group( 59 | definition, configuration, args, info, False, MagicMock()) 60 | 61 | assert result["Resources"]["FooScaleUp"] == expected["FooScaleUp"] 62 | assert result["Resources"]["FooScaleDown"] == expected["FooScaleDown"] 63 | 64 | 65 | def test_resource_overrides_autoscaling_policy_with_incorrect_ref(): 66 | definition = { 67 | "Resources": { 68 | "FooScaleUp": { 69 | "Properties": { 70 | "AdjustmentType": "ChangeInCapacity", 71 | "AutoScalingGroupName": { 72 | "Ref": "NotFoo" 73 | }, 74 | "Cooldown": "180", 75 | "ScalingAdjustment": "2" 76 | }, 77 | "Type": "AWS::AutoScaling::ScalingPolicy" 78 | }, 79 | } 80 | } 81 | 82 | expected = copy.copy(definition["Resources"]["FooScaleUp"]) 83 | 84 | configuration = { 85 | 'Name': 'Foo', 86 | 'InstanceType': 't2.micro', 87 | 'Image': 'foo', 88 | 'AutoScaling': { 89 | 'Minimum': 2, 90 | 'Maximum': 10, 91 | } 92 | } 93 | 94 | args = MagicMock() 95 | args.region = "foo" 96 | 97 | info = { 98 | 'StackName': 'FooStack', 99 | 'StackVersion': 'FooVersion' 100 | } 101 | 102 | with pytest.raises(click.exceptions.UsageError): 103 | result = component_auto_scaling_group( 104 | definition, configuration, args, info, False, MagicMock()) 105 | 106 | 107 | def test_resource_overrides_cpu_alarm(): 108 | definition = { 109 | "Resources": { 110 | "FooCPUAlarmLow": { 111 | "Properties": { 112 | "AlarmActions": [ 113 | { 114 | "Ref": "FooScaleDown" 115 | } 116 | ], 117 | "AlarmDescription": "Scale-down if CPU < 30% for 10.0 minutes (Average)", 118 | "ComparisonOperator": "LessThanThreshold", 119 | "Dimensions": [ 120 | { 121 | "Name": "AutoScalingGroupName", 122 | "Value": { 123 | "Ref": "Foo" 124 | } 125 | } 126 | ], 127 | "EvaluationPeriods": 10, 128 | "MetricName": "CPUUtilization", 129 | "Namespace": "AWS/EC2", 130 | "Period": 60, 131 | "Statistic": "Average", 132 | "Threshold": 30 133 | }, 134 | "Type": "AWS::CloudWatch::Alarm" 135 | }, 136 | } 137 | } 138 | 139 | expected = copy.copy(definition["Resources"]["FooCPUAlarmLow"]) 140 | 141 | configuration = { 142 | 'Name': 'Foo', 143 | 'InstanceType': 't2.micro', 144 | 'Image': 'foo', 145 | 'AutoScaling': { 146 | 'Minimum': 2, 147 | 'Maximum': 10, 148 | 'MetricType': 'NetworkIn', 149 | 'Period': 60, 150 | 'EvaluationPeriods': 10, 151 | 'ScaleUpThreshold': '50 TB', 152 | 'ScaleDownThreshold': '10', 153 | 'Statistic': 'Maximum' 154 | } 155 | } 156 | 157 | args = MagicMock() 158 | args.region = "foo" 159 | 160 | info = { 161 | 'StackName': 'FooStack', 162 | 'StackVersion': 'FooVersion' 163 | } 164 | 165 | result = component_auto_scaling_group( 166 | definition, configuration, args, info, False, MagicMock()) 167 | 168 | assert "FooCPUAlarmLow" in result["Resources"] 169 | assert result["Resources"]["FooCPUAlarmLow"] == expected 170 | -------------------------------------------------------------------------------- /tests/test_configuration.py: -------------------------------------------------------------------------------- 1 | from mock import MagicMock, mock_open 2 | 3 | import pytest 4 | import senza.configuration 5 | from senza.exceptions import InvalidConfigKey 6 | 7 | 8 | class MockConfig: 9 | 10 | def __init__(self): 11 | self.open = mock_open(read_data='{"section": {"key": "value"}}') 12 | 13 | @property 14 | def parent(self): 15 | return self 16 | 17 | def mkdir(self, *args, **kwargs): 18 | return True 19 | 20 | 21 | def test_dict(): 22 | config = senza.configuration.Configuration(MockConfig()) 23 | assert config.raw_dict == {'section': {'key': 'value'}} 24 | assert len(config) == 1 25 | assert next(iter(config)) == 'section' 26 | 27 | 28 | def test_dict_file_not_found(): 29 | m_config = MockConfig() 30 | m_config.open.side_effect = FileNotFoundError 31 | config = senza.configuration.Configuration(m_config) 32 | assert config.raw_dict == {} 33 | assert len(config) == 0 34 | 35 | 36 | def test_get(): 37 | config = senza.configuration.Configuration(MockConfig()) 38 | assert config['section.key'] == 'value' 39 | 40 | 41 | def test_get_bad_key(): 42 | config = senza.configuration.Configuration(MockConfig()) 43 | with pytest.raises(InvalidConfigKey): 44 | config['key'] 45 | 46 | 47 | def test_set(): 48 | mock = MockConfig() 49 | config = senza.configuration.Configuration(mock) 50 | config['section.new_key'] = 'other_value' 51 | mock.open.assert_called_with('w+') 52 | 53 | # new sections don't raise errors 54 | config['section2.new_key'] = 'other_value' 55 | 56 | 57 | def test_del(): 58 | mock = MockConfig() 59 | config = senza.configuration.Configuration(mock) 60 | del config['section.key'] 61 | mock.open.assert_called_with('w+') 62 | 63 | with pytest.raises(KeyError): 64 | del config['section2.new_key'] 65 | -------------------------------------------------------------------------------- /tests/test_docker.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | import requests 4 | 5 | from senza.docker import docker_image_exists 6 | 7 | 8 | def test_docker_image_exists(monkeypatch): 9 | get = MagicMock() 10 | monkeypatch.setattr('requests.get', get) 11 | 12 | get.return_value = MagicMock(name='response') 13 | get.return_value.json = lambda: {'tags': ['1.0']} 14 | assert docker_image_exists('my-registry/foo/bar:1.0') is True 15 | 16 | get.side_effect = requests.HTTPError() 17 | assert docker_image_exists('foo/bar:1.0') is False 18 | 19 | get.side_effect = requests.HTTPError() 20 | assert docker_image_exists('my-registry/foo/bar:1.0') is False 21 | -------------------------------------------------------------------------------- /tests/test_manaus/test_boto_proxy.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | import botocore.exceptions 4 | import pytest 5 | from senza.manaus.boto_proxy import BotoClientProxy 6 | 7 | 8 | @pytest.fixture(autouse=True) 9 | def mock_boto_client(monkeypatch): 10 | m = MagicMock(p=42) 11 | m.return_value = m 12 | monkeypatch.setattr('boto3.client', m) 13 | return m 14 | 15 | 16 | def test_proxy(mock_boto_client: MagicMock): 17 | proxy = BotoClientProxy('test') 18 | mock_boto_client.assert_called_once_with('test') 19 | proxy.random_test(42) 20 | mock_boto_client.random_test.assert_called_once_with(42) 21 | assert proxy.random_test is not mock_boto_client.random_test 22 | assert proxy.p is mock_boto_client.p 23 | 24 | 25 | def test_throttling(mock_boto_client: MagicMock, monkeypatch): 26 | monkeypatch.setattr('senza.manaus.boto_proxy.sleep', MagicMock()) 27 | i = 0 28 | 29 | def throttled(arg): 30 | nonlocal i 31 | if i < 3: 32 | i += 1 33 | raise botocore.exceptions.ClientError( 34 | {'Error': {'Code': 'Throttling'}}, 35 | 'testing' 36 | ) 37 | else: 38 | return arg 39 | 40 | mock_boto_client.throttled.side_effect = throttled 41 | proxy = BotoClientProxy('test') 42 | mock_boto_client.assert_called_once_with('test') 43 | assert proxy.throttled(42) == 42 44 | mock_boto_client.throttled.assert_called_with(42) 45 | assert mock_boto_client.throttled.call_count == 4 46 | 47 | 48 | def test_throttling_forever(mock_boto_client: MagicMock, monkeypatch): 49 | monkeypatch.setattr('senza.manaus.boto_proxy.sleep', MagicMock()) 50 | 51 | def throttled(arg): 52 | raise botocore.exceptions.ClientError( 53 | {'Error': {'Code': 'Throttling'}}, 54 | 'testing' 55 | ) 56 | 57 | mock_boto_client.throttled.side_effect = throttled 58 | proxy = BotoClientProxy('test') 59 | 60 | with pytest.raises(botocore.exceptions.ClientError): 61 | proxy.throttled(42) 62 | mock_boto_client.throttled.assert_called_with(42) 63 | assert mock_boto_client.throttled.call_count == 5 64 | 65 | 66 | def test_random_error(mock_boto_client: MagicMock, monkeypatch): 67 | monkeypatch.setattr('senza.manaus.boto_proxy.sleep', MagicMock()) 68 | 69 | def throttled(arg): 70 | raise botocore.exceptions.ClientError( 71 | {'Error': {'Code': "everyday i'm shuffling"}}, 72 | 'testing' 73 | ) 74 | 75 | mock_boto_client.throttled.side_effect = throttled 76 | proxy = BotoClientProxy('test') 77 | 78 | with pytest.raises(botocore.exceptions.ClientError): 79 | proxy.throttled(42) 80 | mock_boto_client.throttled.assert_called_with(42) 81 | assert mock_boto_client.throttled.call_count == 1 82 | -------------------------------------------------------------------------------- /tests/test_manaus/test_ec2.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | import pytest 4 | from senza.manaus.ec2 import EC2, EC2VPC 5 | from senza.manaus.exceptions import VPCError 6 | 7 | 8 | def test_from_boto_vpc(): 9 | mock_vpc = MagicMock() 10 | mock_vpc.vpc_id = 'vpc-id' 11 | mock_vpc.is_default = True 12 | mock_vpc.tags = [{'Key': 'mykey', 'Value': 'myvalue'}, 13 | {'Key': 'theanswer', 'Value': '42'}, 14 | {'Key': 'Name', 'Value': 'my-vpc'}] 15 | vpc = EC2VPC.from_boto_vpc(mock_vpc) 16 | 17 | assert vpc.vpc_id == 'vpc-id' 18 | assert vpc.is_default 19 | assert vpc.tags['mykey'] == 'myvalue' 20 | assert vpc.tags['theanswer'] == '42' 21 | assert vpc.name == 'my-vpc' 22 | 23 | 24 | def test_get_default_vpc(monkeypatch): 25 | mock_vpc1 = MagicMock() 26 | mock_vpc1.vpc_id = 'vpc-id1' 27 | mock_vpc1.is_default = True 28 | mock_vpc1.tags = [] 29 | 30 | mock_vpc2 = MagicMock() 31 | mock_vpc2.vpc_id = 'vpc-id2' 32 | mock_vpc2.is_default = False 33 | mock_vpc2.tags = [] 34 | 35 | mock_vpc3 = MagicMock() 36 | mock_vpc3.vpc_id = 'vpc-id3' 37 | mock_vpc3.is_default = False 38 | mock_vpc3.tags = [] 39 | 40 | mock_vpc4 = MagicMock() 41 | mock_vpc4.vpc_id = 'vpc-id4' 42 | mock_vpc4.is_default = True 43 | mock_vpc4.tags = None 44 | 45 | m_resource = MagicMock() 46 | m_resource.return_value = m_resource 47 | monkeypatch.setattr('boto3.resource', m_resource) 48 | 49 | ec2 = EC2('eu-test-1') 50 | 51 | # return default vpc 52 | m_resource.vpcs.all.return_value = [mock_vpc1, mock_vpc2] 53 | vpc1 = ec2.get_default_vpc() 54 | assert vpc1.vpc_id == 'vpc-id1' 55 | 56 | # ony one, non default 57 | m_resource.vpcs.all.return_value = [mock_vpc2] 58 | vpc2 = ec2.get_default_vpc() 59 | assert vpc2.vpc_id == 'vpc-id2' 60 | 61 | # no vpcs 62 | m_resource.vpcs.all.return_value = [] 63 | with pytest.raises(VPCError) as exc_info: 64 | ec2.get_default_vpc() 65 | assert str(exc_info.value) == "Can't find any VPC!" 66 | 67 | # multiple vpcs 68 | m_resource.vpcs.all.return_value = [mock_vpc2, mock_vpc3] 69 | with pytest.raises(VPCError) as exc_info: 70 | ec2.get_default_vpc() 71 | 72 | # no tags in vpc return default vpc 73 | m_resource.vpcs.all.return_value = [mock_vpc4, mock_vpc2] 74 | vpc3 = ec2.get_default_vpc() 75 | assert vpc3.vpc_id == 'vpc-id4' 76 | 77 | assert str(exc_info.value) == ("Multiple VPCs are only supported if one " 78 | "VPC is the default VPC (IsDefault=true)!") 79 | 80 | 81 | def test_get_all_vpc(monkeypatch): 82 | mock_vpc1 = MagicMock() 83 | mock_vpc1.vpc_id = 'vpc-id1' 84 | mock_vpc1.is_default = True 85 | mock_vpc1.tags = [] 86 | 87 | mock_vpc2 = MagicMock() 88 | mock_vpc2.vpc_id = 'vpc-id2' 89 | mock_vpc2.is_default = False 90 | mock_vpc2.tags = [] 91 | 92 | mock_vpc3 = MagicMock() 93 | mock_vpc3.vpc_id = 'vpc-id3' 94 | mock_vpc3.is_default = False 95 | mock_vpc3.tags = [] 96 | 97 | m_resource = MagicMock() 98 | m_resource.return_value = m_resource 99 | monkeypatch.setattr('boto3.resource', m_resource) 100 | 101 | ec2 = EC2('eu-test-1') 102 | 103 | m_resource.vpcs.all.return_value = [mock_vpc1, mock_vpc2, mock_vpc3] 104 | vpcs = list(ec2.get_all_vpcs()) 105 | assert len(vpcs) == 3 106 | -------------------------------------------------------------------------------- /tests/test_manaus/test_elb.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime, timezone 2 | from unittest.mock import MagicMock 3 | 4 | import pytest 5 | from senza.manaus.elb import ELB 6 | from senza.manaus.exceptions import ELBNotFound 7 | 8 | 9 | def test_get_hosted_zone(monkeypatch): 10 | m_client = MagicMock() 11 | m_client.return_value = m_client 12 | description1 = {'AvailabilityZones': ['eu-central-1a', 'eu-central-1b'], 13 | 'BackendServerDescriptions': [], 14 | 'CanonicalHostedZoneName': 'example.eu-central-1.elb.amazonaws.com', 15 | 'CanonicalHostedZoneNameID': 'Z215JYRZR1TBD5', 16 | 'CreatedTime': datetime(2016, 6, 30, 17 | 8, 56, 37, 260000, 18 | tzinfo=timezone.utc), 19 | 'DNSName': 'example.eu-central-1.elb.amazonaws.com', 20 | 'HealthCheck': {'HealthyThreshold': 2, 21 | 'Interval': 10, 22 | 'Target': 'HTTP:8080/health_check', 23 | 'Timeout': 5, 24 | 'UnhealthyThreshold': 2}, 25 | 'Instances': [{'InstanceId': 'i-0000'}], 26 | 'ListenerDescriptions': [ 27 | {'Listener': {'InstancePort': 8080, 28 | 'InstanceProtocol': 'HTTP', 29 | 'LoadBalancerPort': 443, 30 | 'Protocol': 'HTTPS', 31 | 'SSLCertificateId': 'arn:aws:iam::000:server-certificate/cert'}, 32 | 'PolicyNames': ['ELBSecurityPolicy-2015-05']}], 33 | 'LoadBalancerName': 'example-2', 34 | 'Policies': {'AppCookieStickinessPolicies': [], 35 | 'LBCookieStickinessPolicies': [], 36 | 'OtherPolicies': [ 37 | 'ELBSecurityPolicy-2015-05']}, 38 | 'Scheme': 'internet-facing', 39 | 'SecurityGroups': ['sg-a97d82c1'], 40 | 'SourceSecurityGroup': {'GroupName': 'app-example-lb', 41 | 'OwnerAlias': '000'}, 42 | 'Subnets': ['subnet-0000', 'subnet-0000'], 43 | 'VPCId': 'vpc-0000'} 44 | 45 | description2 = {'AvailabilityZones': ['eu-central-1a', 'eu-central-1b'], 46 | 'BackendServerDescriptions': [], 47 | 'CanonicalHostedZoneName': 'test.eu-central-1.elb.amazonaws.com', 48 | 'CanonicalHostedZoneNameID': 'ABCDWRONG', 49 | 'CreatedTime': datetime(2016, 6, 30, 50 | 8, 56, 37, 260000, 51 | tzinfo=timezone.utc), 52 | 'DNSName': 'test.eu-central-1.elb.amazonaws.com', 53 | 'HealthCheck': {'HealthyThreshold': 2, 54 | 'Interval': 10, 55 | 'Target': 'HTTP:8080/health_check', 56 | 'Timeout': 5, 57 | 'UnhealthyThreshold': 2}, 58 | 'Instances': [{'InstanceId': 'i-0000'}], 59 | 'ListenerDescriptions': [ 60 | {'Listener': {'InstancePort': 8080, 61 | 'InstanceProtocol': 'HTTP', 62 | 'LoadBalancerPort': 443, 63 | 'Protocol': 'HTTPS', 64 | 'SSLCertificateId': 'arn:aws:iam::000:server-certificate/cert'}, 65 | 'PolicyNames': ['ELBSecurityPolicy-2015-05']}], 66 | 'LoadBalancerName': 'test-2', 67 | 'Policies': {'AppCookieStickinessPolicies': [], 68 | 'LBCookieStickinessPolicies': [], 69 | 'OtherPolicies': [ 70 | 'ELBSecurityPolicy-2015-05']}, 71 | 'Scheme': 'internet-facing', 72 | 'SecurityGroups': ['sg-a97d82c1'], 73 | 'SourceSecurityGroup': {'GroupName': 'app-example-lb', 74 | 'OwnerAlias': '576069677832'}, 75 | 'Subnets': ['subnet-0000', 'subnet-0000'], 76 | 'VPCId': 'vpc-0000'} 77 | 78 | m_client.describe_load_balancers.side_effect = [ 79 | {'ResponseMetadata': {'HTTPStatusCode': 200, 80 | 'RequestId': 'FakeId'}, 81 | 'LoadBalancerDescriptions': [description1], 82 | 'NextMarker': 'something'}, 83 | {'ResponseMetadata': {'HTTPStatusCode': 200, 84 | 'RequestId': 'FakeId'}, 85 | 'LoadBalancerDescriptions': [description2]}, 86 | {'ResponseMetadata': {'HTTPStatusCode': 200, 87 | 'RequestId': 'FakeId'}, 88 | 'LoadBalancerDescriptions': [description1]}, 89 | ] 90 | monkeypatch.setattr('boto3.client', m_client) 91 | 92 | elb = ELB.get_by_dns_name('example.eu-central-1.elb.amazonaws.com') 93 | assert elb.hosted_zone.id == "Z215JYRZR1TBD5" 94 | assert elb.region == 'eu-central-1' 95 | 96 | with pytest.raises(ELBNotFound): 97 | ELB.get_by_dns_name('example.eu-west-1.elb.amazonaws.com') 98 | -------------------------------------------------------------------------------- /tests/test_patch.py: -------------------------------------------------------------------------------- 1 | import codecs 2 | 3 | from unittest.mock import MagicMock 4 | import pytest 5 | import base64 6 | 7 | from senza.exceptions import InvalidUserDataType 8 | from senza.patch import patch_auto_scaling_group, patch_elastigroup 9 | from senza.spotinst.components.elastigroup_api import SpotInstAccountData 10 | 11 | 12 | def test_patch_auto_scaling_group(monkeypatch): 13 | 14 | lc = {'ImageId': 'originalimage', 'LaunchConfigurationName': 'originallc', 15 | 'UserData': codecs.encode(b'myuserdata', 'base64').decode('utf-8')} 16 | result = {'LaunchConfigurations': [lc]} 17 | 18 | asg = MagicMock() 19 | asg.describe_launch_configurations.return_value = result 20 | 21 | new_lc = {} 22 | 23 | def create_lc(**kwargs): 24 | new_lc.update(kwargs) 25 | 26 | asg.create_launch_configuration = create_lc 27 | monkeypatch.setattr('boto3.client', lambda x, region: asg) 28 | group = {'AutoScalingGroupName': 'myasg', 'LaunchConfigurationName': 'originallc'} 29 | properties = {'ImageId': 'mynewimage'} 30 | patch_auto_scaling_group(group, 'myregion', properties) 31 | 32 | assert new_lc['UserData'] == 'myuserdata' 33 | 34 | 35 | def test_patch_elastigroup(monkeypatch): 36 | spotinst_account_data = SpotInstAccountData('act-zwk', 'fake-token') 37 | elastigroup_id = 'sig-xfy' 38 | 39 | new_lc = {} 40 | 41 | def create_lc(properties_to_patch, *args): 42 | new_lc.update(properties_to_patch) 43 | 44 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.patch_elastigroup', create_lc) 45 | 46 | properties = {'ImageId': 'mynewimage', 'InstanceType': 'mynewinstancetyoe', 'UserData': {'source': 'newsource >'}} 47 | group = {'compute': { 48 | 'launchSpecification': { 49 | 'userData': base64.b64encode('#firstline\nsource: oldsource\n'.encode('utf-8')).decode('utf-8'), 50 | 'imageId': 'myoldimage' 51 | }, 52 | 'instanceTypes': { 53 | 'ondemand': 'myoldinstancetyoe' 54 | } 55 | } 56 | } 57 | changed = patch_elastigroup(group, properties, elastigroup_id, spotinst_account_data) 58 | 59 | assert changed 60 | assert new_lc['ImageId'] == 'mynewimage' 61 | assert new_lc['UserData'] == base64.b64encode('#firstline\nsource: newsource >\n'.encode('utf-8')).decode('utf-8') 62 | assert new_lc['InstanceType'] == 'mynewinstancetyoe' 63 | 64 | 65 | def test_patch_auto_scaling_group_taupage_config(monkeypatch): 66 | 67 | lc = {'ImageId': 'originalimage', 'LaunchConfigurationName': 'originallc', 68 | 'UserData': codecs.encode(b'#firstline\nsource: oldsource', 'base64').decode('utf-8')} 69 | result = {'LaunchConfigurations': [lc]} 70 | 71 | asg = MagicMock() 72 | asg.describe_launch_configurations.return_value = result 73 | 74 | new_lc = {} 75 | 76 | def create_lc(**kwargs): 77 | new_lc.update(kwargs) 78 | 79 | asg.create_launch_configuration = create_lc 80 | monkeypatch.setattr('boto3.client', lambda x, region: asg) 81 | group = {'AutoScalingGroupName': 'myasg', 'LaunchConfigurationName': 'originallc'} 82 | properties = {'UserData': {'source': 'newsource'}} 83 | patch_auto_scaling_group(group, 'myregion', properties) 84 | 85 | assert new_lc['UserData'] == '#firstline\nsource: newsource\n' 86 | 87 | 88 | def test_patch_user_data_wrong_type(monkeypatch): 89 | 90 | lc = {'ImageId': 'originalimage', 'LaunchConfigurationName': 'originallc', 91 | 'UserData': codecs.encode(b'#firstline\nsource: oldsource', 'base64').decode('utf-8')} 92 | result = {'LaunchConfigurations': [lc]} 93 | 94 | asg = MagicMock() 95 | asg.describe_launch_configurations.return_value = result 96 | 97 | new_lc = {} 98 | 99 | def create_lc(**kwargs): 100 | new_lc.update(kwargs) 101 | 102 | asg.create_launch_configuration = create_lc 103 | monkeypatch.setattr('boto3.client', lambda x, region: asg) 104 | group = {'AutoScalingGroupName': 'myasg', 'LaunchConfigurationName': 'originallc'} 105 | properties = {'UserData': "it's a string"} 106 | with pytest.raises(InvalidUserDataType) as exc_info: 107 | patch_auto_scaling_group(group, 'myregion', properties) 108 | 109 | assert str(exc_info.value) == ('Current user data is a map but provided ' 110 | 'user data is a string.') 111 | 112 | 113 | def test_patch_user_data_wrong_type_elastigroup(monkeypatch): 114 | spotinst_account_data = SpotInstAccountData('act-zwk', 'fake-token') 115 | elastigroup_id = 'sig-xfy' 116 | 117 | properties = {'UserData': "it's a string"} 118 | group = {'compute': { 119 | 'launchSpecification': { 120 | 'userData': codecs.encode(b'#firstline\nsource: oldsource', 'base64').decode('utf-8'), 121 | 'imageId': 'myoldimage' 122 | }, 123 | 'instanceTypes': { 124 | 'ondemand': 'myoldinstancetyoe' 125 | } 126 | } 127 | } 128 | with pytest.raises(InvalidUserDataType) as exc_info: 129 | patch_elastigroup(group, properties, elastigroup_id, spotinst_account_data) 130 | 131 | assert str(exc_info.value) == ('Current user data is a map but provided ' 132 | 'user data is a string.') -------------------------------------------------------------------------------- /tests/test_respawn.py: -------------------------------------------------------------------------------- 1 | import copy 2 | 3 | from unittest.mock import MagicMock 4 | from senza.respawn import respawn_auto_scaling_group, respawn_elastigroup, respawn_stateful_elastigroup 5 | from senza.spotinst.components.elastigroup_api import SpotInstAccountData 6 | 7 | 8 | def test_respawn_auto_scaling_group(monkeypatch): 9 | 10 | inst = {'InstanceId': 'myinst-1', 'LaunchConfigurationName': 'lc-1', 'LifecycleState': 'InService'} 11 | group = {'LaunchConfigurationName': 'lc-2', 'Instances': [inst], 'MinSize': 1, 'MaxSize': 1, 'DesiredCapacity': 1, 12 | 'LoadBalancerNames': ['myelb']} 13 | groups = {'AutoScalingGroups': [group]} 14 | instance_states = [{'InstanceId': 'myinst-1', 'State': 'InService'}, 15 | {'InstanceId': 'myinst-2', 'State': 'InService'}] 16 | asg = MagicMock() 17 | asg.describe_auto_scaling_groups.return_value = groups 18 | 19 | def terminate_instance(InstanceId, **kwargs): 20 | for i in range(len(instance_states)): 21 | if instance_states[i]['InstanceId'] == InstanceId: 22 | del instance_states[i] 23 | break 24 | 25 | asg.terminate_instance_in_auto_scaling_group = terminate_instance 26 | elb = MagicMock() 27 | elb.describe_instance_health.return_value = {'InstanceStates': instance_states} 28 | services = {'autoscaling': asg, 'elb': elb} 29 | 30 | def client(service, region): 31 | assert region == 'myregion' 32 | return services[service] 33 | monkeypatch.setattr('boto3.client', client) 34 | monkeypatch.setattr('time.sleep', lambda s: s) 35 | respawn_auto_scaling_group('myasg', 'myregion') 36 | 37 | 38 | def test_respawn_auto_scaling_group_without_elb(monkeypatch): 39 | 40 | inst = {'InstanceId': 'myinst-1', 'LaunchConfigurationName': 'lc-1', 'LifecycleState': 'InService'} 41 | instances = [inst] 42 | group = {'AutoScalingGroupName': 'myasg', 43 | 'LaunchConfigurationName': 'lc-2', 'Instances': instances, 'MinSize': 1, 'MaxSize': 1, 'DesiredCapacity': 1, 44 | 'LoadBalancerNames': []} 45 | groups = {'AutoScalingGroups': [group]} 46 | asg = MagicMock() 47 | asg.describe_auto_scaling_groups.return_value = groups 48 | 49 | def update_group(**kwargs): 50 | instances.append({'InstanceId': 'myinst-2', 'LaunchConfigurationName': 'lc-2', 'LifecycleState': 'InService'}) 51 | 52 | def terminate_instance(InstanceId, **kwargs): 53 | for i in range(len(instances)): 54 | if instances[i]['InstanceId'] == InstanceId: 55 | del instances[i] 56 | break 57 | 58 | asg.update_auto_scaling_group = update_group 59 | asg.terminate_instance_in_auto_scaling_group = terminate_instance 60 | services = {'autoscaling': asg} 61 | def client(service, *args): 62 | return services[service] 63 | monkeypatch.setattr('boto3.client', client) 64 | monkeypatch.setattr('time.sleep', lambda s: s) 65 | respawn_auto_scaling_group('myasg', 'myregion') 66 | 67 | 68 | def test_respawn_elastigroup_with_stateful_instances(monkeypatch): 69 | elastigroup_id = 'sig-xfy' 70 | stack_name = 'my-app-stack' 71 | region = 'my-region' 72 | batch_size = None 73 | 74 | spotinst_account = SpotInstAccountData('act-zwk', 'fake-token') 75 | spotinst_account_mock = MagicMock() 76 | spotinst_account_mock.return_value = spotinst_account 77 | 78 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.get_spotinst_account_data', spotinst_account_mock) 79 | 80 | instances = [{ 81 | 'id': 'ssi-1abc9', 82 | 'instanceId': 'i-abcdef123', 83 | 'privateIp': '172.31.0.0', 84 | 'state': 'ACTIVE', 85 | '_subnetId': 'subnet-abc' 86 | }, { 87 | 'id': 'ssi-9xyz1', 88 | 'instanceId': 'i-123defabc', 89 | 'privateIp': '172.31.255.0', 90 | 'state': 'ACTIVE', 91 | '_subnetId': 'subnet-def' 92 | }, { 93 | 'id': 'ssi-2def8', 94 | 'instanceId': 'i-def321cba', 95 | 'privateIp': '172.31.128.0', 96 | 'state': 'ACTIVE', 97 | '_subnetId': 'subnet-abc' 98 | }] 99 | instances_waited_for = [] 100 | recycle_triggered_for = [] 101 | 102 | def get_stateful_instances(*args): 103 | recycling_instances = [i for i in instances if i['state'] == 'RECYCLING'] 104 | for i in recycling_instances: 105 | i['_ticks_left'] -= 1 106 | if i['_ticks_left'] == 0: 107 | i['state'] = 'ACTIVE' 108 | instances_waited_for.append(i['id']) 109 | 110 | # return a snapshot of our internal state to avoid surprises 111 | return copy.deepcopy(instances) 112 | 113 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.get_stateful_instances', get_stateful_instances) 114 | 115 | def recycle_stateful_instance(gid, ssi, acc): 116 | assert any(i['id'] == ssi for i in instances), \ 117 | "stateful instance must be on the list for this group".format(ssi) 118 | 119 | in_subnet = None 120 | recycle_triggered_for.append(ssi) 121 | for i in instances: 122 | if i['id'] == ssi: 123 | i['state'] = 'RECYCLING' 124 | i['_ticks_left'] = 5 125 | in_subnet = i['_subnetId'] 126 | break 127 | 128 | assert all(i['_subnetId'] == in_subnet for i in instances if i['state'] == 'RECYCLING'), \ 129 | "all recycling instances should be in the same subnet" 130 | 131 | return [{'code': 200, 'message': 'OK'}] 132 | 133 | monkeypatch.setattr( 134 | 'senza.spotinst.components.elastigroup_api.recycle_stateful_instance', 135 | recycle_stateful_instance 136 | ) 137 | 138 | ec2_instances = { 139 | 'Reservations': [{ 140 | 'Instances': [{ 141 | 'InstanceId': 'i-def321cba', 142 | 'Placement': { 143 | 'AvailabilityZone': 'my-region-a' 144 | }, 145 | 'SubnetId': 'subnet-abc' 146 | }, { 147 | 'InstanceId': 'i-123defabc', 148 | 'Placement': { 149 | 'AvailabilityZone': 'my-region-b' 150 | }, 151 | 'SubnetId': 'subnet-def' 152 | }, { 153 | 'InstanceId': 'i-abcdef123', 154 | 'Placement': { 155 | 'AvailabilityZone': 'my-region-a' 156 | }, 157 | 'SubnetId': 'subnet-abc' 158 | }] 159 | }] 160 | } 161 | ec2 = MagicMock() 162 | ec2.describe_instances.return_value = ec2_instances 163 | services = {'ec2': ec2} 164 | def client(service, *args): 165 | return services[service] 166 | monkeypatch.setattr('boto3.client', client) 167 | monkeypatch.setattr('time.sleep', lambda s: s) 168 | 169 | batch_per_subnet = False 170 | respawn_elastigroup(elastigroup_id, stack_name, region, batch_size, batch_per_subnet) 171 | 172 | assert instances_waited_for == ['ssi-1abc9', 'ssi-2def8', 'ssi-9xyz1'] 173 | assert recycle_triggered_for == ['ssi-1abc9', 'ssi-2def8', 'ssi-9xyz1'] 174 | 175 | # now run the same test, but this time with batching 176 | instances_waited_for.clear() 177 | recycle_triggered_for.clear() 178 | 179 | batch_per_subnet = True 180 | respawn_elastigroup(elastigroup_id, stack_name, region, batch_size, batch_per_subnet) 181 | 182 | assert instances_waited_for == ['ssi-1abc9', 'ssi-2def8', 'ssi-9xyz1'] 183 | assert recycle_triggered_for == ['ssi-1abc9', 'ssi-2def8', 'ssi-9xyz1'] 184 | 185 | 186 | def test_respawn_elastigroup_no_stateful_instances(monkeypatch): 187 | elastigroup_id = 'sig-xfy' 188 | stack_name = 'my-app-stack' 189 | region = 'my-region' 190 | batch_size = 35 191 | batch_per_subnet = False 192 | 193 | spotinst_account = SpotInstAccountData('act-zwk', 'fake-token') 194 | spotinst_account_mock = MagicMock() 195 | spotinst_account_mock.return_value = spotinst_account 196 | 197 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.get_spotinst_account_data', spotinst_account_mock) 198 | 199 | get_stateful_instances_output = [] 200 | get_stateful_instances_output_mock = MagicMock() 201 | get_stateful_instances_output_mock.return_value = get_stateful_instances_output 202 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.get_stateful_instances', get_stateful_instances_output_mock) 203 | 204 | deploy_output = [{ 205 | 'id': 'deploy-1' 206 | }] 207 | deploy_output_mock = MagicMock() 208 | deploy_output_mock.return_value = deploy_output 209 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.deploy', deploy_output_mock) 210 | 211 | execution_data = { 212 | 'percentage': 0, 213 | 'runs': 0, 214 | 'status': 'starting' 215 | } 216 | 217 | def deploy_status(*args): 218 | execution_data['runs'] += 1 219 | execution_data['percentage'] += 50 220 | if execution_data['percentage'] == 100: 221 | execution_data['status'] = 'finished' 222 | else: 223 | execution_data['status'] = 'in_progress' 224 | return [{ 225 | 'id': args[0], 226 | 'status': execution_data['status'], 227 | 'progress': { 228 | 'value': execution_data['percentage'] 229 | } 230 | }] 231 | monkeypatch.setattr('senza.spotinst.components.elastigroup_api.deploy_status', deploy_status) 232 | respawn_elastigroup(elastigroup_id, stack_name, region, batch_size, batch_per_subnet) 233 | 234 | assert execution_data['runs'] == 2 235 | assert execution_data['percentage'] == 100 236 | -------------------------------------------------------------------------------- /tests/test_stups/test_piu.py: -------------------------------------------------------------------------------- 1 | from unittest.mock import MagicMock 2 | 3 | import pytest 4 | from senza.exceptions import PiuNotFound 5 | from senza.stups.piu import Piu 6 | 7 | 8 | def test_request_access(monkeypatch): 9 | m_call = MagicMock() 10 | monkeypatch.setattr('senza.stups.piu.call', m_call) 11 | 12 | Piu.request_access('127.0.0.1', 'no reason', None, True) 13 | m_call.assert_called_once_with(['piu', 'request-access', 14 | '127.0.0.1', 'no reason via senza', 15 | '--connect']) 16 | 17 | m_call.reset_mock() 18 | Piu.request_access('127.0.0.1', 'no reason', None, False) 19 | m_call.assert_called_once_with(['piu', 'request-access', 20 | '127.0.0.1', 'no reason via senza']) 21 | 22 | m_call.reset_mock() 23 | Piu.request_access('127.0.0.1', 'no reason', 'example.com', True) 24 | m_call.assert_called_once_with(['piu', 'request-access', 25 | '127.0.0.1', 'no reason via senza', 26 | '--connect', 27 | '-O', 'example.com']) 28 | 29 | 30 | def test_find_odd_host(monkeypatch): 31 | m_client = MagicMock() 32 | m_client.return_value = m_client 33 | hosted_zone1 = {'Config': {'PrivateZone': False}, 34 | 'CallerReference': '0000', 35 | 'ResourceRecordSetCount': 42, 36 | 'Id': '/hostedzone/random1', 37 | 'Name': 'example.com.'} 38 | mock_records = [{'Name': 'odd-eu-west-1.example.com.', 39 | 'ResourceRecords': [{'Value': '127.0.0.1'}], 40 | 'TTL': 600, 41 | 'Type': 'A'} 42 | ] 43 | m_client.list_hosted_zones.return_value = {'MaxItems': '100', 44 | 'ResponseMetadata': { 45 | 'HTTPStatusCode': 200, 46 | 'RequestId': 'FakeId'}, 47 | 'HostedZones': [hosted_zone1], 48 | 'IsTcallcated': False} 49 | m_client.list_resource_record_sets.return_value = { 50 | "ResourceRecordSets": mock_records} 51 | monkeypatch.setattr('boto3.client', m_client) 52 | 53 | odd_host = Piu.find_odd_host('eu-west-1') 54 | assert odd_host == 'odd-eu-west-1.example.com' 55 | 56 | no_odd_host = Piu.find_odd_host('moon-crater-1') 57 | assert no_odd_host is None 58 | 59 | 60 | def test_request_access_not_installed(monkeypatch): 61 | m_call = MagicMock() 62 | m_call.side_effect = FileNotFoundError 63 | monkeypatch.setattr('senza.stups.piu.call', m_call) 64 | 65 | with pytest.raises(PiuNotFound): 66 | Piu.request_access('127.0.0.1', 'no reason', None, True) 67 | m_call.assert_called_once_with(['piu', 'request-access', 68 | '127.0.0.1', 'no reason via senza', 69 | '--connect']) 70 | -------------------------------------------------------------------------------- /tests/test_subcommands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/zalando-stups/senza/0134404b04382fcb9fc0675aacfb30480a079947/tests/test_subcommands/__init__.py -------------------------------------------------------------------------------- /tests/test_subcommands/test_config.py: -------------------------------------------------------------------------------- 1 | from os import makedirs 2 | from pathlib import Path 3 | 4 | import yaml 5 | from click.testing import CliRunner 6 | from senza.cli import cli 7 | from senza.subcommands.config import configuration 8 | 9 | from fixtures import disable_version_check # noqa: F401 10 | 11 | 12 | def create_config(app_dir): 13 | configuration.config_path = Path(app_dir) / 'config.yaml' 14 | makedirs(app_dir, exist_ok=True) 15 | with configuration.config_path.open('w+') as fd: 16 | fd.write('{"section": {"key": "value"}}') 17 | 18 | 19 | def read_config(app_dir): 20 | config_path = Path(app_dir) / 'config.yaml' 21 | with config_path.open() as fd: 22 | data = yaml.safe_load(fd) 23 | return data 24 | 25 | 26 | def test_get_config(disable_version_check): # noqa: F811 27 | runner = CliRunner() 28 | 29 | with runner.isolated_filesystem() as (test_dir): 30 | create_config(test_dir) 31 | result = runner.invoke(cli, 32 | ['config', 'section.key'], 33 | catch_exceptions=False) 34 | 35 | assert result.output == 'value\n' 36 | 37 | 38 | def test_get_config_not_found(disable_version_check): # noqa: F811 39 | runner = CliRunner() 40 | 41 | with runner.isolated_filesystem() as (test_dir): 42 | create_config(test_dir) 43 | result = runner.invoke(cli, 44 | ['config', 'section.404'], 45 | catch_exceptions=False) 46 | 47 | assert result.output == '' 48 | assert result.exit_code == 1 49 | 50 | 51 | def test_get_config_no_section(): 52 | runner = CliRunner() 53 | 54 | with runner.isolated_filesystem() as (test_dir): 55 | create_config(test_dir) 56 | result = runner.invoke(cli, 57 | ['config', '404'], 58 | catch_exceptions=False) 59 | 60 | assert "Error: key does not contain a section" in result.output 61 | 62 | 63 | def test_set_config(): 64 | runner = CliRunner() 65 | 66 | with runner.isolated_filesystem() as (test_dir): 67 | create_config(test_dir) 68 | result = runner.invoke(cli, 69 | ['config', 'section.new', 'value'], 70 | catch_exceptions=False) 71 | new_config = read_config(test_dir) 72 | 73 | assert new_config['section']['new'] == 'value' 74 | assert result.exit_code == 0 75 | 76 | 77 | def test_set_config_no_section(): 78 | runner = CliRunner() 79 | 80 | with runner.isolated_filesystem() as (test_dir): 81 | create_config(test_dir) 82 | result = runner.invoke(cli, 83 | ['config', 'new', 'value'], 84 | catch_exceptions=False) 85 | 86 | assert "Error: key does not contain a section" in result.output 87 | -------------------------------------------------------------------------------- /tests/test_subcommands/test_root.py: -------------------------------------------------------------------------------- 1 | from tempfile import TemporaryDirectory 2 | from unittest.mock import MagicMock 3 | 4 | from click.testing import CliRunner 5 | from pytest import fixture 6 | from requests.exceptions import HTTPError, Timeout 7 | from senza.subcommands.root import check_senza_version, cli 8 | 9 | from fixtures import disable_version_check # noqa: F401 10 | 11 | 12 | @fixture() 13 | def mock_get_app_dir(monkeypatch): 14 | mock = MagicMock() 15 | monkeypatch.setattr("senza.subcommands.root.click.get_app_dir", mock) 16 | return mock 17 | 18 | 19 | @fixture() 20 | def mock_get(monkeypatch): 21 | mock = MagicMock() 22 | mock.return_value = mock 23 | mock.json.return_value = {'releases': {'0.29': None, 24 | '0.42': None, 25 | '0.7': None}} 26 | monkeypatch.setattr("senza.subcommands.root.requests.get", mock) 27 | return mock 28 | 29 | 30 | @fixture() 31 | def mock_warning(monkeypatch): 32 | mock = MagicMock() 33 | monkeypatch.setattr("senza.subcommands.root.warning", mock) 34 | return mock 35 | 36 | 37 | @fixture() 38 | def mock_tty(monkeypatch): 39 | # check_senza_version only prints if we have a TTY 40 | monkeypatch.setattr('sys.stdout.isatty', lambda: True) 41 | 42 | 43 | def test_check_senza_version_notty(monkeypatch, mock_get_app_dir, mock_get, mock_warning): 44 | with TemporaryDirectory() as temp_dir: 45 | mock_get_app_dir.return_value = temp_dir 46 | monkeypatch.setattr("senza.subcommands.root.__file__", 47 | '/home/someuser/pymodules/root.py') 48 | check_senza_version("0.40") 49 | mock_warning.assert_not_called() 50 | 51 | 52 | def test_check_senza_version(monkeypatch, 53 | mock_get_app_dir, mock_get, mock_warning, mock_tty): 54 | 55 | with TemporaryDirectory() as temp_dir_1: 56 | mock_get_app_dir.return_value = temp_dir_1 57 | check_senza_version("0.42") 58 | mock_warning.assert_not_called() 59 | with open(temp_dir_1 + '/pypi_version') as fd: 60 | assert fd.read() == '0.42' 61 | 62 | with TemporaryDirectory() as temp_dir_2: 63 | mock_get_app_dir.return_value = temp_dir_2 64 | check_senza_version("0.43") 65 | mock_warning.assert_not_called() 66 | 67 | with TemporaryDirectory() as temp_dir_3: 68 | mock_get_app_dir.return_value = temp_dir_3 69 | monkeypatch.setattr("senza.subcommands.root.__file__", 70 | '/home/someuser/pymodules/root.py') 71 | check_senza_version("0.40") 72 | mock_warning.assert_called_once_with( 73 | "Your senza version (0.40) is outdated. " 74 | "Please install the new one using 'pip3 install --upgrade stups-senza'" 75 | ) 76 | 77 | with TemporaryDirectory() as temp_dir_4: 78 | mock_get_app_dir.return_value = temp_dir_4 79 | mock_warning.reset_mock() 80 | monkeypatch.setattr("senza.subcommands.root.__file__", 81 | '/usr/pymodules/root.py') 82 | check_senza_version("0.40") 83 | mock_warning.assert_called_once_with( 84 | "Your senza version (0.40) is outdated. " 85 | "Please install the new one using " 86 | "'sudo pip3 install --upgrade stups-senza'" 87 | ) 88 | 89 | 90 | def test_check_senza_version_timeout(mock_get_app_dir, mock_get, mock_warning, mock_tty): 91 | with TemporaryDirectory() as temp_dir: 92 | mock_get_app_dir.return_value = temp_dir 93 | mock_get.side_effect = Timeout 94 | check_senza_version("0.2") 95 | mock_warning.assert_not_called() 96 | 97 | 98 | def test_check_senza_version_outdated_cache(monkeypatch, # noqa: F811 99 | mock_get_app_dir, 100 | mock_get, 101 | mock_warning, 102 | mock_tty): 103 | monkeypatch.setattr("senza.subcommands.root.__file__", 104 | '/usr/pymodules/root.py') 105 | with TemporaryDirectory() as temp_dir: 106 | mock_get_app_dir.return_value = temp_dir 107 | with open(temp_dir + '/pypi_version', 'w') as fd: 108 | fd.write('0.40') 109 | check_senza_version("0.40") # This should use the disk cache 110 | mock_warning.assert_not_called() 111 | 112 | monkeypatch.setattr("senza.subcommands.root.ONE_DAY", 0) 113 | check_senza_version("0.40") # This should use the API again 114 | 115 | mock_warning.assert_called_once_with( 116 | "Your senza version (0.40) is outdated. " 117 | "Please install the new one using " 118 | "'sudo pip3 install --upgrade stups-senza'" 119 | ) 120 | 121 | 122 | def test_check_senza_version_exception(monkeypatch, 123 | mock_get_app_dir, 124 | mock_get, 125 | mock_warning, 126 | mock_tty): 127 | mock_sentry = MagicMock() 128 | monkeypatch.setattr("senza.subcommands.root.sentry", mock_sentry) 129 | with TemporaryDirectory() as temp_dir: 130 | mock_get_app_dir.return_value = temp_dir 131 | mock_get.raise_for_status.side_effect = HTTPError(404, "Not Found") 132 | check_senza_version("0.2") 133 | mock_warning.assert_not_called() 134 | mock_sentry.captureException.assert_called_once_with() 135 | 136 | monkeypatch.setattr("senza.subcommands.root.sentry", None) 137 | with TemporaryDirectory() as temp_dir: 138 | mock_get_app_dir.return_value = temp_dir 139 | mock_get.raise_for_status.side_effect = HTTPError(404, "Not Found") 140 | check_senza_version("0.2") 141 | mock_warning.assert_not_called() 142 | 143 | 144 | def test_version(disable_version_check): # noqa: F811 145 | runner = CliRunner() 146 | result = runner.invoke(cli, ['--version']) 147 | assert result.output.startswith('Senza ') 148 | -------------------------------------------------------------------------------- /tests/test_templates.py: -------------------------------------------------------------------------------- 1 | import click 2 | from unittest.mock import MagicMock 3 | from senza.templates._helper import (create_mint_read_policy_document, get_mint_bucket_name, 4 | check_value, prompt, choice) 5 | from senza.templates.postgresapp import (ebs_optimized_supported, 6 | generate_random_password, 7 | set_default_variables, 8 | generate_definition, 9 | get_latest_image) 10 | 11 | 12 | def test_template_helper_get_mint_bucket_name(monkeypatch): 13 | monkeypatch.setattr('senza.templates._helper.get_account_id', MagicMock(return_value=123)) 14 | monkeypatch.setattr('senza.templates._helper.get_account_alias', MagicMock(return_value='myorg-foobar')) 15 | s3 = MagicMock() 16 | s3.return_value.Bucket.return_value.name = 'myorg-stups-mint-123-myregion' 17 | monkeypatch.setattr('boto3.resource', s3) 18 | 19 | assert 'myorg-stups-mint-123-myregion' == get_mint_bucket_name('myregion'), 'Find Mint Bucket' 20 | 21 | s3 = MagicMock() 22 | s3.return_value.Bucket.return_value.load.side_effect = Exception() 23 | monkeypatch.setattr('boto3.resource', s3) 24 | assert 'myorg-stups-mint-123-otherregion' == get_mint_bucket_name('otherregion'), \ 25 | 'Return Name of Bucket, if no other Bucket found' 26 | 27 | exist_bucket = MagicMock() 28 | exist_bucket.name = 'myorg-stups-mint-123-myregion' 29 | s3 = MagicMock() 30 | s3.return_value.Bucket.return_value.load.side_effect = Exception() 31 | s3.return_value.buckets.all.return_value = [exist_bucket] 32 | monkeypatch.setattr('boto3.resource', s3) 33 | assert 'myorg-stups-mint-123-myregion' == get_mint_bucket_name('otherregion'), 'Find Mint bucket in other Region' 34 | 35 | 36 | def test_template_helper_get_iam_role_policy(monkeypatch): 37 | expected_policy = { 38 | "Version": "2012-10-17", 39 | "Statement": [ 40 | { 41 | "Sid": "AllowMintRead", 42 | "Effect": "Allow", 43 | "Action": [ 44 | "s3:GetObject" 45 | ], 46 | "Resource": [ 47 | "arn:aws:s3:::bucket-name/myapp/*" 48 | ] 49 | }, 50 | ] 51 | } 52 | 53 | assert expected_policy == create_mint_read_policy_document('myapp', 'bucket-name', 'myregion') 54 | 55 | 56 | def test_template_helper_check_value(): 57 | f = check_value(5, '^[A-Z]+$') 58 | assert 'ABC' == f('ABC') 59 | try: 60 | f('abc') 61 | except click.UsageError: 62 | pass 63 | except: 64 | assert False, 'check_value raise with a unkown exception' 65 | else: 66 | assert False, 'check_value doesnot return with a raise' 67 | 68 | try: 69 | f('ABCABC') 70 | except click.UsageError: 71 | pass 72 | except: 73 | assert False, 'check_value raise with a unkown exception' 74 | else: 75 | assert False, 'check_value doesnot return with a raise' 76 | 77 | 78 | def test_choice_callable_default(monkeypatch): 79 | mock = MagicMock() 80 | monkeypatch.setattr('clickclick.choice', mock) 81 | variables = {} 82 | choice(variables, 'test', default=lambda: 'default') 83 | mock.assert_called_once_with(default='default') 84 | 85 | 86 | def test_prompt_callable_default(monkeypatch): 87 | mock = MagicMock() 88 | monkeypatch.setattr('click.prompt', mock) 89 | variables = {} 90 | prompt(variables, 'test', default=lambda: 'default') 91 | mock.assert_called_once_with(default='default') 92 | 93 | 94 | def test_choice_type(): 95 | variables = {'test': '42'} 96 | choice(variables, 'test', type=int) 97 | assert variables['test'] == 42 98 | 99 | 100 | def test_prompt_type(): 101 | variables = {'test': '42'} 102 | prompt(variables, 'test', type=int) 103 | assert variables['test'] == 42 104 | 105 | 106 | def test_ebs_optimized_supported(): 107 | assert ebs_optimized_supported('c3.xlarge') 108 | assert not ebs_optimized_supported('t2.micro') 109 | 110 | 111 | def test_generate_random_password(): 112 | assert len(generate_random_password(62)) == 62 113 | 114 | 115 | def test_generate_definition(): 116 | variables = set_default_variables(dict()) 117 | assert len(generate_definition(variables)) > 300 118 | 119 | 120 | def test_get_latest_image(monkeypatch): 121 | 122 | mock_response = MagicMock() 123 | mock_response.json.return_value = [{'created': '2016-06-09T07:12:34.413Z', 124 | 'created_by': 'someone', 125 | 'name': '0.90-p7'}, 126 | {'created': '2016-06-28T10:19:47.788Z', 127 | 'created_by': 'someone', 128 | 'name': '0.90-p8'}, 129 | {'created': '2016-07-01T06:58:32.956Z', 130 | 'created_by': 'someone', 131 | 'name': '0.90-test'}, 132 | {'created': '2016-07-12T06:58:32.956Z', 133 | 'created_by': 'someone', 134 | 'name': '0.91-SNAPSHOT'}] 135 | 136 | mock_get = MagicMock() 137 | mock_get.return_value = mock_response 138 | monkeypatch.setattr('requests.get', mock_get) 139 | 140 | assert get_latest_image() == 'registry.opensource.zalan.do/acid/spilo-9.5:0.90-test' 141 | 142 | mock_response.ok = False 143 | assert get_latest_image() == '' 144 | -------------------------------------------------------------------------------- /tests/test_traffic.py: -------------------------------------------------------------------------------- 1 | import builtins 2 | import importlib 3 | from unittest.mock import MagicMock 4 | 5 | import botocore.exceptions 6 | import senza.traffic 7 | from senza.aws import SenzaStackSummary 8 | from senza.manaus.route53 import RecordType 9 | from senza.traffic import (StackVersion, get_stack_versions, get_weights, 10 | resolve_to_ip_addresses) 11 | 12 | 13 | def test_get_stack_versions(monkeypatch): 14 | cf = MagicMock() 15 | elb = MagicMock() 16 | 17 | def my_boto3(service, *args): 18 | if service == 'cloudformation': 19 | return cf 20 | elif service == 'elb': 21 | return elb 22 | else: 23 | return MagicMock(side_effect=SyntaxError('unknown option')) 24 | 25 | monkeypatch.setattr('senza.traffic.get_stacks', MagicMock(return_value=[])) 26 | monkeypatch.setattr('boto3.client', my_boto3) 27 | monkeypatch.setattr('boto3.resource', my_boto3) 28 | 29 | stack_version = list(get_stack_versions('my-stack', 'my-region')) 30 | 31 | assert stack_version == [] 32 | 33 | stack = MagicMock(stack_name='my-stack-1') 34 | resource = [ 35 | MagicMock(resource_type='AWS::ElasticLoadBalancing::LoadBalancer'), 36 | MagicMock(resource_type='AWS::Route53::RecordSet', 37 | physical_resource_id='myapp.example.org') 38 | ] 39 | cf.Stack.return_value = MagicMock(tags=[{'Value': '1', 'Key': 'StackVersion'}], 40 | notification_arns=['some-arn'], 41 | resource_summaries=MagicMock(all=MagicMock(return_value=resource))) 42 | elb.describe_load_balancers.return_value = {'LoadBalancerDescriptions': [{'DNSName': 'elb-dns-name'}]} 43 | monkeypatch.setattr('senza.traffic.get_stacks', MagicMock( 44 | return_value=[SenzaStackSummary(stack), SenzaStackSummary({'StackStatus': 'ROLLBACK_COMPLETE', 45 | 'StackName': 'my-stack-1'})])) 46 | stack_version = list(get_stack_versions('my-stack', 'my-region')) 47 | assert stack_version == [StackVersion('my-stack', '1', 48 | ['myapp.example.org'], 49 | ['elb-dns-name'], 50 | ['some-arn'])] 51 | 52 | elb.describe_load_balancers.side_effect = botocore.exceptions.ClientError( 53 | {'Error': {'Code': 'LoadBalancerNotFound'}}, 54 | 'foobar' 55 | ) 56 | stack_version = list(get_stack_versions('my-stack', 'my-region')) 57 | assert stack_version == [StackVersion('my-stack', '1', 58 | ['myapp.example.org'], 59 | [], 60 | ['some-arn'])] 61 | 62 | 63 | def test_get_weights(monkeypatch): 64 | mock_route53 = MagicMock() 65 | mock_record1 = MagicMock(name='app1.example.com', 66 | type=RecordType.A, 67 | weight=100, 68 | set_identifier='app-1') 69 | mock_route53.get_records.return_value = [mock_record1] 70 | monkeypatch.setattr('senza.traffic.Route53', mock_route53) 71 | all_identifiers = ['app-1', 'app-2', 'app-3'] 72 | domains = ['app1.example.com'] 73 | assert get_weights(domains, 'app-1', all_identifiers) == ({'app-1': 100, 74 | 'app-2': 0, 75 | 'app-3': 0}, 76 | 0, 77 | 0) 78 | 79 | # Without weight 80 | mock_record2 = MagicMock(name='app1.example.com', 81 | type=RecordType.A, 82 | weight=None, 83 | set_identifier='app-1') 84 | mock_route53.get_records.return_value = [mock_record2] 85 | 86 | all_identifiers = ['app-1', 'app-2', 'app-3'] 87 | domains = ['app1.example.com'] 88 | assert get_weights(domains, 'app-1', all_identifiers) == ({'app-1': 0, 89 | 'app-2': 0, 90 | 'app-3': 0}, 91 | 0, 92 | 0) 93 | 94 | 95 | def test_resolve_to_ip_addresses(monkeypatch): 96 | query = MagicMock() 97 | monkeypatch.setattr('dns.resolver.query', query) 98 | 99 | query.side_effect = Exception() 100 | assert resolve_to_ip_addresses('example.org') == set() 101 | 102 | query.side_effect = None 103 | query.return_value = [MagicMock(address='1.2.3.4')] 104 | assert resolve_to_ip_addresses('example.org') == {'1.2.3.4'} 105 | -------------------------------------------------------------------------------- /tests/test_utils.py: -------------------------------------------------------------------------------- 1 | from senza.utils import camel_case_to_underscore, get_load_balancer_name, generate_valid_cloud_name 2 | 3 | 4 | def test_camel_case_to_underscore(): 5 | assert camel_case_to_underscore('CamelCaseToUnderscore') == 'camel_case_to_underscore' 6 | assert camel_case_to_underscore('ThisIsABook') == 'this_is_a_book' 7 | assert camel_case_to_underscore('InstanceID') == 'instance_id' 8 | 9 | 10 | def test_get_load_balancer_name(): 11 | assert get_load_balancer_name(stack_name='really-long-application-name', 12 | stack_version='cd871c54') == 'really-long-application-cd871c54' 13 | assert get_load_balancer_name(stack_name='app-name', stack_version='1') == 'app-name-1' 14 | 15 | 16 | def test_generate_valid_cloud_name(): 17 | assert generate_valid_cloud_name(name='invalid-aws--cloud-name', length=32) == 'invalid-aws-cloud-name' 18 | assert generate_valid_cloud_name(name='-invalid-aws-cloud-name', length=32) == 'invalid-aws-cloud-name' 19 | assert generate_valid_cloud_name(name='invalid-aws-cloud-name-', length=32) == 'invalid-aws-cloud-name' 20 | assert generate_valid_cloud_name(name='invalid-aws--cloud-name-', length=32) == 'invalid-aws-cloud-name' 21 | assert generate_valid_cloud_name(name='invalid-aws-cloud-name-long-replaced', length=27) == 'invalid-aws-cloud-name-long' 22 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [flake8] 2 | max-line-length=120 3 | select = E,W,F,N,I 4 | ignore = W503 5 | 6 | [tox] 7 | envlist=py34,py35,flake8 8 | 9 | [tox:travis] 10 | 3.4=py34 11 | 3.5=py35,flake8 12 | 13 | [testenv] 14 | deps = -rrequirements.txt 15 | setuptools 16 | commands=python setup.py test 17 | 18 | [testenv:flake8] 19 | deps=flake8 20 | commands=python setup.py flake8 21 | --------------------------------------------------------------------------------