├── .gitignore ├── LICENSE.md ├── README.md ├── TERMS_OF_USE.txt ├── ansible.cfg ├── automation_host_cft-w-existing-vpc.json ├── automation_host_cft-w-new-vpc.json ├── bin ├── f5aws ├── get_bigip_image_id.py └── get_cfn_stack_state.py ├── build ├── README.md ├── files │ ├── .aws │ │ └── credentials │ └── .f5aws ├── packer │ ├── Builds │ ├── README.md │ ├── aws-ami-template.json │ ├── files │ │ ├── motd │ │ └── requirements.txt │ ├── http │ │ └── ubuntu-14.04-amd64 │ │ │ └── preseed.cfg │ ├── notes.txt │ ├── scripts │ │ ├── base_ubuntu_bootstrap.sh │ │ ├── setup_venv.sh │ │ ├── vagrant.sh │ │ └── vboxguest.sh │ └── virtualbox-template.json └── scripts │ ├── build-stock-ubuntu-ami.sh │ ├── setup_env_aws.sh │ └── setup_env_vbox.sh ├── conf └── config.ini ├── docs └── Orchestrating BIG-IP in the Public Cloud with Ansible.pdf ├── inventory ├── group_vars │ └── localhosts ├── host_vars │ └── localhost └── hosts ├── library ├── bigip_cluster.py ├── bigip_config.py └── bigip_facts.py ├── playbooks ├── cluster_bigips.yml ├── deploy_analytics.yml ├── deploy_analytics_cft.yml ├── deploy_app.yml ├── deploy_app_cft.yml ├── deploy_apps_bigip.yml ├── deploy_apps_gtm.yml ├── deploy_az_cft.yml ├── deploy_bigip.yml ├── deploy_bigip_cft.yml ├── deploy_client.yml ├── deploy_client_cft.yml ├── deploy_gtm.yml ├── deploy_gtm_cft.yml ├── deploy_vpc_cft.yml ├── init.yml ├── remove.yml ├── start_traffic.yml ├── stop_traffic.yml └── teardown_all.yml ├── requirements.txt ├── roles ├── analytics │ ├── files │ │ ├── F5AccessApp.tgz │ │ ├── SplunkforF5Networks.tgz │ │ ├── SplunkforF5Security.tgz │ │ ├── inputs.conf │ │ ├── modify_admin_pass_expect.py │ │ └── splunk-add-on-for-f5-big-ip_230.tgz │ └── tasks │ │ └── main.yml ├── app │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── docker_containers.cfg.j2 ├── bigip_app1 │ ├── files │ │ ├── README.md │ │ ├── asm_policy_linux_high_base64 │ │ ├── eip.json │ │ ├── iapp_f5_http_backport_template.json │ │ ├── image_background_base_64 │ │ ├── image_sorry_base_64 │ │ ├── irule_demo_analytics.tcl │ │ └── irule_sorry_page.tcl │ ├── tasks │ │ ├── main.yml │ │ └── provision_waf_depends.yml │ └── templates │ │ ├── README.md │ │ ├── analytics_pool_payload.json.j2 │ │ ├── asm_logging_profile_payload.json.j2 │ │ ├── bigip_pool_members_from_containers.cfg.j2 │ │ └── iapp_f5_http_backport_service.json.j2 ├── bigip_app2 │ ├── files │ │ └── irule_random_snat.tcl │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── bigip_pool_members_from_containers.cfg.j2 ├── bigip_base │ └── tasks │ │ ├── add_rest_user.yml │ │ ├── main.yml │ │ └── modify_admin.yml ├── bigip_cluster │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── cluster_peer_info.cfg.j2 ├── bigip_network │ └── tasks │ │ └── main.yml ├── bigip_system │ ├── README.md │ └── tasks │ │ ├── main.yml │ │ └── provision_module.yml ├── bigip_system_aws │ └── tasks │ │ └── main.yml ├── client │ ├── files │ │ └── simple-load-generation.jmx │ └── tasks │ │ └── main.yml ├── docker_base │ └── tasks │ │ └── main.yml ├── gtm_app1 │ └── tasks │ │ └── main.yml ├── gtm_app2 │ └── tasks │ │ └── main.yml ├── gtm_cluster │ ├── README.md │ ├── files │ │ └── gtm_expect.py │ └── tasks │ │ └── main.yml ├── gtm_conf │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── wideip_pool.cfg.j2 ├── gtm_network │ ├── README.md │ └── tasks │ │ └── main.yml ├── infra │ ├── files │ │ ├── analyticshost.json │ │ ├── apphost.json │ │ ├── az.json │ │ ├── bigip.json │ │ ├── client.json │ │ ├── eip.json │ │ ├── gtm.json │ │ └── vpc.json │ ├── library │ │ └── cloudformation_state.py │ ├── tasks │ │ ├── deploy_analyticshost_cft.yml │ │ ├── deploy_apphost_cft.yml │ │ ├── deploy_az.yml │ │ ├── deploy_az_cft.yml │ │ ├── deploy_bigip_cft.yml │ │ ├── deploy_client_cft.yml │ │ ├── deploy_clienthost.yml │ │ ├── deploy_eip.yml │ │ ├── deploy_eip_cft.yml │ │ ├── deploy_gtm_cft.yml │ │ ├── deploy_vpc.yml │ │ ├── deploy_vpc_cft.yml │ │ ├── teardown_analyticshost_cft.yml │ │ ├── teardown_apphost.yml │ │ ├── teardown_apphost_cft.yml │ │ ├── teardown_az.yml │ │ ├── teardown_az_cft.yml │ │ ├── teardown_bigip.yml │ │ ├── teardown_bigip_cft.yml │ │ ├── teardown_client.yml │ │ ├── teardown_client_cft.yml │ │ ├── teardown_eip.yml │ │ ├── teardown_eip_cft.yml │ │ ├── teardown_gtm.yml │ │ ├── teardown_gtm_cft.yml │ │ ├── teardown_vpc.yml │ │ └── teardown_vpc_cft.yml │ └── templates │ │ ├── az.j2 │ │ ├── host-managers.j2 │ │ ├── managers.j2 │ │ └── vpc.j2 └── inventory_manager │ ├── defaults │ └── main.yml │ ├── tasks │ ├── main.yml │ ├── provision_analytics.yml │ ├── provision_inventory.yml │ └── teardown.yml │ └── templates │ ├── group_vars_analyticshosts.j2 │ ├── group_vars_apphosts.j2 │ ├── group_vars_bigips.j2 │ ├── group_vars_clienthosts.j2 │ ├── group_vars_gtms.j2 │ ├── group_vars_localhosts.j2 │ ├── hosts-cluster-per-zone.j2 │ ├── hosts-single-cluster.j2 │ ├── hosts-single-standalone.j2 │ └── hosts-standalone-per-zone.j2 ├── src ├── MANIFEST.IN ├── README.txt ├── __init__.py ├── f5_aws │ ├── __init__.py │ ├── cli.py │ ├── config.py │ ├── environment_manager.py │ ├── exceptions.py │ ├── image_finder.py │ ├── playbook_runner.py │ ├── test │ │ ├── README.md │ │ ├── manual_bigip_config.py │ │ ├── test_cfts.py │ │ ├── test_images.py │ │ ├── test_inventory_manager.py │ │ ├── test_lifecycle_single_standalone.py │ │ └── utils.py │ └── utils.py └── setup.py └── vagrant └── Vagrantfile /.gitignore: -------------------------------------------------------------------------------- 1 | # build products... 2 | *.py[co] 3 | *.egg-info 4 | # Emacs backup files... 5 | *~ 6 | .\#* 7 | # RPM stuff... 8 | MANIFEST 9 | dist 10 | rpm-build 11 | # Eclipse/PyDev stuff... 12 | .project 13 | .pydevproject 14 | # PyCharm stuff... 15 | .idea 16 | # Mac OS X stuff... 17 | .DS_Store 18 | # manpage build stuff... 19 | docs/man/man3/* 20 | # Sublime stuff 21 | *.sublime-project 22 | *.sublime-workspace 23 | # docsite stuff... 24 | docsite/rst/modules 25 | docsite/*.html 26 | docsite/_static/*.gif 27 | docsite/_static/*.png 28 | docsite/_static/websupport.js 29 | # deb building stuff... 30 | debian/ 31 | #Vagrant machines 32 | **/.vagrant 33 | #Virtual machines 34 | **/venv 35 | #ssh directories 36 | **/.ssh 37 | #Packer Builds 38 | *.box 39 | #Packer Cache 40 | *.iso 41 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | Copyright (c) 2015, F5 Networks, Inc. 3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 4 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 5 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 6 | -------------------------------------------------------------------------------- /TERMS_OF_USE.txt: -------------------------------------------------------------------------------- 1 | THIS LICENSE AGREEMENT IS ENTERED INTO BETWEEN THE SUBMITTING PARTY AND F5 NETWORKS, INC. AND THE SUBMITTING PARTY AGREES TO BE BOUND BY THE TERMS OF THIS AGREEMENT BY SUBMITTING, POSTING, DOWNLOADING, COPYING, MODIFYING, INPUTTING, INSTALLATION, UPLOAD OR OTHER USE OF F5 MATERIALS AND THE SUBMISSIONS. IF YOU DO NOT AGREE TO THE FOREGOING, DO NOT POST THE SUBISSIONS OR USE THE F5 MATERIALS. 2 | (1) F5 does not claim ownership of the materials you provide to F5 (including feedback and suggestions) or post, upload, input or submit to any F5 GitHub repository (collectively "Submissions"). However, by posting, uploading, inputting, providing or submitting your Submission you grant F5, its affiliated companies and necessary sub-licensees a full, complete, irrevocable copyright license to use your Submission including, without limitation, the rights to: copy, distribute, transmit, publicly display, publicly perform, reproduce, edit, translate and reformat your Submission; and to publish your name in connection with your Submission. In addition, you agree that your submission will be subject to the terms of the MIT License (F5 MIT License). 3 | (2) By posting, uploading, inputting, providing or submitting your Submission you warrant and represent that you own, are approved by your employer, or otherwise control all of the rights to your Submission as described including, without limitation, all the rights necessary for you to provide, post, upload, input or submit the Submissions. 4 | (3) Infringement Indemnification. Submitting party will defend and indemnify F5 against a claim that any information, design, specification, instruction, software, data, or material furnished by the submitting party under this license infringes a trademark, copyright, or patent. F5 will notify the submitting party promptly of such claim and will give sole control of defense and all related settlement negotiations to submitting party. F5 will provide reasonable assistance, information, and authority necessary to perform these obligations. Reasonable out-of-pocket expenses incurred by F5 for providing such assistance will be reimbursed by the submitting party. 5 | (4) THE MATERIALS AND SERVICES MADE AVAILABLE AT AND THROUGH THIS SITE ARE PROVIDED BY F5 ON AN "AS IS" BASIS. F5 MAKES NO REPRESENTATIONS, WARRANTIES OR GUARANTIES OF ANY KIND, EXPRESS OR IMPLIED, AS TO THE OPERATION OF THIS SITE, ITS CONTENT, OR ANY PRODUCTS OR SERVICES DESCRIBED OR OFFERED BY THIS SITE. TO THE FULL EXTENT PERMISSIBLE BY APPLICABLE LAW, F5 DISCLAIMS ALL WARRANTIES, EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, IMPLIED WARRANTIES OF MERCHANTABILITY, INCLUDING MERCHANTABILITY OF COMPUTER PROGRAMS AND INFORMATIONAL CONTENT, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, TITLE, OR THAT THE SITE CONTENT IS RELIABLE, ACCURATE, OR TIMELY. F5 WILL NOT BE LIABLE FOR ANY DAMAGES OF ANY KIND ARISING FROM THE USE OF THIS SITE, INCLUDING, BUT NOT LIMITED TO DIRECT, INDIRECT, INCIDENTAL, PUNITIVE, SPECIAL, CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS OF USE, DATA OR PROFITS, ARISING OUT OF OR IN ANY WAY CONNECTED WITH THE USE OR PERFORMANCE OF THE WEB SITE, WITH THE DELAY OR INABILITY TO USE THE WEB SITE OR RELATED SERVICES, THE PROVISION OF OR FAILURE TO PROVIDE SERVICES, OR FOR ANY INFORMATION, SOFTWARE, PRODUCTS, SERVICES AND RELATED GRAPHICS OBTAINED THROUGH THE WEB SITE, OR OTHERWISE ARISING OUT OF THE USE OF THE WEB SITE, WHETHER BASED ON CONTRACT, TORT, NEGLIGENCE, STRICT LIABILITY OR OTHERWISE, EVEN IF F5 OR ANY OF ITS SUPPLIERS HAS BEEN ADVISED OF THE POSSIBILITY OF DAMAGES. BECAUSE SOME STATES/JURISDICTIONS DO NOT ALLOW THE EXCLUSION OR LIMITATION OF LIABILITY FOR CONSEQUENTIAL OR INCIDENTAL DAMAGES, THE ABOVE LIMITATION MAY NOT APPLY TO YOU. WHILE THIS SITE MAY PROVIDE LINKS TO THIRD PARTY SITES, F5 DOES NOT CONTROL OR ENDORSE ANY THIRD PARTY SITE AND DISCLAIMS ANY RESPONSIBILITY FOR ITS FUNCTIONALITY OR CONTENT. THESE DISCLAIMERS AND LIMITATIONS ARE MADE IN ADDITION TO THOSE MADE IN AND APPLICABLE TO VARIOUS PAGES OR SECTIONS OF THIS SITE. 6 | 7 | 8 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking=False 3 | executable=bash 4 | roles_path=./roles 5 | library=./library 6 | 7 | -------------------------------------------------------------------------------- /bin/f5aws: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | 5 | Chris Mutzel 6 | Alex Applebaum 7 | 8 | For more information on usage see README.md 9 | 10 | """ 11 | 12 | import os 13 | import sys 14 | import ansible.utils 15 | import ansible.errors 16 | from ansible.callbacks import display 17 | 18 | local_module_path = os.path.abspath( 19 | os.path.join(os.path.dirname(__file__), '..', 'src') 20 | ) 21 | sys.path = [local_module_path] + sys.path 22 | 23 | import f5_aws.cli as f5_aws_cli 24 | from f5_aws.exceptions import ValidationError, ExecutionError, LifecycleError 25 | 26 | if __name__ == "__main__": 27 | display(" ", log_only=True) 28 | display(" ".join(sys.argv), log_only=True) 29 | display(" ", log_only=True) 30 | 31 | try: 32 | cli = f5_aws_cli.CLI() 33 | handlers = { 34 | 'init': cli.init, 35 | 'list': cli.list, 36 | 'deploy': cli.deploy, 37 | 'login': cli.login, 38 | 'resources': cli.resources, 39 | 'inventory': cli.inventory, 40 | 'start_traffic': cli.start_traffic, 41 | 'stop_traffic': cli.stop_traffic, 42 | 'remove': cli.remove, 43 | 'teardown': cli.teardown, 44 | } 45 | 46 | parser = f5_aws_cli.get_parser() 47 | args = parser.parse_args() 48 | 49 | # leverage ansible debugging and logging 50 | for i in range(args.verbose): 51 | print 'Incrementing debug to level %s' % i 52 | ansible.utils.increment_debug(False, False, False, False) 53 | 54 | handlers[args.cmd](args) 55 | 56 | except ValidationError, e: 57 | display("INPUT ERROR: %s" % e, color='red', stderr=True) 58 | except ExecutionError, e: 59 | display("RUNTIME ERROR: %s" % e, color='red', stderr=True) 60 | except LifecycleError, e: 61 | display("LIFECYCLE ERROR: %s" % e, color='red', stderr=True) 62 | except ansible.errors.AnsibleError, e: 63 | display("ANSIBLE ERROR: %s" % e, color='red', stderr=True) 64 | sys.exit(1) 65 | except KeyboardInterrupt, e: 66 | display("ERROR: interrupted", color='red', stderr=True) 67 | sys.exit(1) 68 | except Exception, e: 69 | display("ERROR: %s" % e, color='red', stderr=True) 70 | sys.exit(1) 71 | 72 | 73 | -------------------------------------------------------------------------------- /bin/get_bigip_image_id.py: -------------------------------------------------------------------------------- 1 | #get_bigip_image_id.py 2 | import os 3 | import sys 4 | import argparse 5 | from f5_aws import image_finder 6 | 7 | """ 8 | Script to look up the ami-id of big-ip in the Amazon EC2 marketplace. 9 | 10 | Use examples 11 | 12 | Look up the 11.6, Best, Hourly AMI. If multiple AMIs are returned with a version matching 11.6, 13 | the latest matching version will be used. 14 | python get_bigip_image_id.py --region us-west-1 --license hourly --package best --throughput 1gbps --version 11.6 15 | 16 | python get_bigip_image_id.py --region us-west-1 --license hourly --package best --throughput 1gbps --version 11.6 --oldest 17 | 18 | BYOL images do not have defined throughput: 19 | python get_bigip_image_id.py --region us-west-1 --license byol --package best --version 11.6 20 | """ 21 | 22 | REGIONS = [ 23 | 'ap-northeast-1', 24 | 'ap-southeast-1', 25 | 'ap-southeast-2', 26 | 'eu-central-1', 27 | 'sa-east-1', 28 | 'us-east-1', 29 | 'us-west-1', 30 | 'us-west-2', 31 | ] 32 | 33 | 34 | parser = argparse.ArgumentParser(description='Get AMI Ids in AWS for F5 Networks.') 35 | parser.add_argument('-r', '--region', metavar='R', required=False, 36 | help='region name', default='us-east-1') 37 | parser.add_argument('-p', '--package', metavar='P', required=False, 38 | help='good, better, best', default=None) 39 | parser.add_argument('-l', '--license', metavar='L', required=False, 40 | help='byol, hourly', default=None) 41 | parser.add_argument('-v', '--version', metavar='L', required=False, 42 | help='11, 11.6, 11.6.2, etc - latest version is provided unless left as default', default=None) 43 | parser.add_argument('-t', '--throughput', metavar='T', required=False,#choices=['25mbps', '200mbps', '1gbps'], 44 | help='Must be one of 25mbps, 200mbps, 1gbps. Ignored if license is provided as BYOL', default=None) 45 | parser.add_argument('-o', '--oldest', dest='take_newest', action='store_false', default=True, 46 | help='Take the newest order when multiple images match same version') 47 | parser.add_argument('-1', '--matchone', action='store_true', default=False, 48 | help='Take the newest order when multiple images match same version') 49 | args = vars(parser.parse_args()) 50 | 51 | try: 52 | if args['matchone'] is True: 53 | print image_finder.BigIpImageFinder().find(**args)[0]['id'] 54 | else: 55 | print 'Found ' 56 | for i in image_finder.BigIpImageFinder().find(**args): 57 | print i 58 | except IndexError: 59 | # set the exit code so that ansible knows we have failed 60 | sys.exit('No images found') 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /bin/get_cfn_stack_state.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import boto 3 | import boto.cloudformation 4 | 5 | from boto.exception import BotoServerError 6 | 7 | try: 8 | if len(sys.argv) != 3: 9 | sys.exit('Wrong number of input args!') 10 | cf_conn = boto.cloudformation.connect_to_region(sys.argv[1]) 11 | if not cf_conn: 12 | sys.exit('Could not establish boto connection!') 13 | print cf_conn.describe_stacks(stack_name_or_id=sys.argv[2])[0].stack_status.upper() 14 | sys.exit() 15 | except BotoServerError as e: 16 | # assume that this caught exception is because the stack does not exist 17 | # Example exception: 18 | # BotoServerError: 400 Bad Request 19 | # 20 | # 21 | # Sender 22 | # ValidationError 23 | # Stack with id mystack does not exist 24 | # 25 | # 4b1e324d-1ea7-11e5-b8a7-c3a427ceaa91 26 | # 27 | print 'ABSENT' 28 | sys.exit(0) 29 | except Exception as e: 30 | sys.exit('Uncaught exception: %s' % e) 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /build/README.md: -------------------------------------------------------------------------------- 1 | This directory contains code used to build AWS AMIs and VirtualBox images for easier use of the aws-deployments project. These build materials may not be relevant to the casual user, but are included in this repository for completeness. 2 | 3 | We use Packer to create the AMIs and VBox images, a tool from hashicorp 4 | -------------------------------------------------------------------------------- /build/files/.aws/credentials: -------------------------------------------------------------------------------- 1 | #ex. 2 | #[default] 3 | #aws_access_key_id = 123XXXXXXXXXX 4 | #aws_secret_access_key= 123XXXXXXXXXXXXXXXXXXXXXXXXXX 5 | 6 | [default] 7 | aws_access_key_id = 8 | aws_secret_access_key = 9 | 10 | 11 | -------------------------------------------------------------------------------- /build/files/.f5aws: -------------------------------------------------------------------------------- 1 | #Absolute path to aws-deployments directory 2 | # ex. 3 | # install_path = '/home/vagrant/aws-deployments' 4 | install_path = '/home/ubuntu/aws-deployments' 5 | 6 | ### Credentials ### 7 | # This SSH key is used to login to all the deployed hosts in EC2 8 | # It must exist in the AWS region you are using for testing. 9 | # Go to AWS Console - EC2 -> Network & Security -> Key Pairs (Create or Import) 10 | ssh_key = ~/.ssh/.pem 11 | 12 | # A seperate admin user account for the REST API will be created on BIG-IP 13 | # during the provisioning process with these credentials 14 | # NOTE: For simplicity, we also use this password for the default admin user 15 | bigip_rest_user = 'your_username_here' 16 | bigip_rest_password = 'your_password_here' 17 | 18 | # These are the AWS keys that will be placed on the BIGIP if there's a cluster. 19 | # (ex. These are used to reassign secondary IPs associated with VIPs, change route tables, etc) 20 | # Best Practice is using separate keys for BIGIP IAM user with limited permissions to modify those network elements 21 | # 22 | # http://docs.aws.amazon.com/AmazonVPC/latest/UserGuide/VPC_IAM.html 23 | # Otherwise, if using a test account, you can set the same keys you have configured for BOTO environment 24 | # If you are NOT testing a cluster, you can just leave them as empty quotes 25 | f5_aws_access_key = 'your_bigip_user_access_key_here' 26 | f5_aws_secret_key ='your_bigip_user_secret_key_here' 27 | 28 | # WARNING: If populating the f5_aws_XXXX_keys above, please ensure you use 29 | # disposable keys and use a STRONG password for bigip_rest_password 30 | # as these systems will be publicly available and you don't want to risk compromising 31 | -------------------------------------------------------------------------------- /build/packer/Builds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/build/packer/Builds -------------------------------------------------------------------------------- /build/packer/README.md: -------------------------------------------------------------------------------- 1 | This directory containers scripts and files use to build the AWS AMI and Vagrant images for execution of this demonstration code. It may not be relevant for the casual user, but we include it for completeness. -------------------------------------------------------------------------------- /build/packer/aws-ami-template.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "aws_access_key": "", 4 | "aws_secret_key": "" 5 | }, 6 | "builders": [{ 7 | "type": "amazon-ebs", 8 | "access_key": "{{user `aws_access_key`}}", 9 | "secret_key": "{{user `aws_secret_key`}}", 10 | "source_ami": "ami-a83ad99b", 11 | "instance_type": "t1.micro", 12 | "ssh_username": "ubuntu", 13 | "ami_name": "F5 AWS Deployments Environment", 14 | "associate_public_ip_address": "true", 15 | "region": "us-west-2", 16 | "ami_regions": [ "us-west-1", "us-west-1", "us-east-1", "eu-west-1" ,"ap-southeast-2", "ap-northeast-1"], 17 | "ami_groups": "all" 18 | }], 19 | "provisioners": [ 20 | { 21 | "type": "shell", 22 | "inline": [ "sleep 10" ] 23 | }, 24 | { 25 | "type": "file", 26 | "source": "files/motd", 27 | "destination": "~/motd" 28 | }, 29 | { 30 | "type": "file", 31 | "source": "files/requirements.txt", 32 | "destination": "~/requirements.txt" 33 | }, 34 | { 35 | "type": "shell", 36 | "scripts": [ 37 | "scripts/base_ubuntu_bootstrap.sh", 38 | "scripts/setup_venv.sh" 39 | ] 40 | }, 41 | { 42 | "type": "shell", 43 | "inline": [ 44 | "sudo cp motd /etc/motd", 45 | "rm -f ~/motd", 46 | "rm -f ~/requirements.txt", 47 | "sudo sed -i.bak 's/UsePAM yes/UsePAM yes\\nUseDNS no\\n/' /etc/ssh/sshd_config", 48 | "sudo service ssh restart" 49 | ] 50 | } 51 | ], 52 | "post-processors": [{ 53 | "type": "vagrant", 54 | "output": "build/aws/f5demo_{{.Provider}}.box" 55 | }] 56 | } 57 | -------------------------------------------------------------------------------- /build/packer/files/motd: -------------------------------------------------------------------------------- 1 | ############################################################### 2 | ### ### 3 | ### Welcome to F5 AWS Deployments ### 4 | ### Demo Environment ### 5 | ### For More Information, visit: ### 6 | ### devcentral.f5.com ### 7 | ### github/f5networks ### 8 | ### ### 9 | ############################################################### 10 | -------------------------------------------------------------------------------- /build/packer/files/requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==1.8.2 2 | bigsuds==1.0.2 3 | boto==2.38.0 4 | Chameleon==2.22 5 | click==5.1 6 | colander==1.0 7 | configobj==5.0.6 8 | deform==0.9.9 9 | ecdsa==0.13 10 | html==1.16 11 | iso8601==0.1.10 12 | Jinja2==2.8 13 | json2html==0.3 14 | Mako==1.0.2 15 | MarkupSafe==0.23 16 | ordereddict==1.1 17 | paramiko==1.15.2 18 | PasteDeploy==1.5.2 19 | peppercorn==0.5 20 | pexpect==3.3 21 | py==1.4.30 22 | pycrypto==2.6.1 23 | Pygments==2.0.2 24 | pyramid==1.5.7 25 | pyramid-debugtoolbar==2.4.1 26 | pyramid-jinja2==2.5 27 | pyramid-mako==1.0.2 28 | pytest==2.7.2 29 | PyYAML==3.11 30 | redis==2.10.3 31 | repoze.lru==0.6 32 | requests==2.7.0 33 | rq==0.5.5 34 | six==1.9.0 35 | suds==0.4 36 | translationstring==1.3 37 | venusian==1.0 38 | waitress==0.8.10 39 | WebOb==1.4.1 40 | wheel==0.24.0 41 | zope.deprecation==4.1.2 42 | zope.interface==4.1.2 43 | -------------------------------------------------------------------------------- /build/packer/http/ubuntu-14.04-amd64/preseed.cfg: -------------------------------------------------------------------------------- 1 | choose-mirror-bin mirror/http/proxy string 2 | 3 | d-i base-installer/kernel/override-image string linux-server 4 | 5 | d-i clock-setup/utc boolean true 6 | d-i clock-setup/utc-auto boolean true 7 | d-i time/zone string US/Pacific 8 | 9 | d-i partman-lvm/confirm boolean true 10 | d-i partman-lvm/confirm boolean true 11 | d-i partman-lvm/confirm_nooverwrite boolean true 12 | d-i partman-lvm/device_remove_lvm boolean true 13 | d-i partman-auto-lvm/guided_size string max 14 | d-i partman-auto/choose_recipe select atomic 15 | d-i partman-auto/method string lvm 16 | d-i partman/choose_partition select finish 17 | d-i partman/confirm boolean true 18 | d-i partman/confirm_nooverwrite boolean true 19 | d-i partman/confirm_write_new_label boolean true 20 | 21 | d-i passwd/user-fullname string vagrant 22 | d-i passwd/user-uid string 900 23 | d-i passwd/user-password password vagrant 24 | d-i passwd/user-password-again password vagrant 25 | d-i passwd/username string vagrant 26 | 27 | d-i pkgsel/include string openssh-server build-essential libssl-dev linux-source dkms 28 | d-i pkgsel/install-language-support boolean false 29 | 30 | d-i pkgsel/update-policy select none 31 | d-i pkgsel/upgrade select safe-upgrade 32 | d-i user-setup/encrypt-home boolean false 33 | d-i user-setup/allow-password-weak boolean true 34 | 35 | d-i grub-installer/only_debian boolean true 36 | d-i grub-installer/with_other_os boolean true 37 | d-i finish-install/reboot_in_progress note 38 | 39 | tasksel tasksel/first multiselect standard, ubuntu-server 40 | -------------------------------------------------------------------------------- /build/packer/scripts/base_ubuntu_bootstrap.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | #Add Redis repo: 4 | sudo add-apt-repository ppa:chris-lea/redis-server 5 | 6 | sudo apt-get update 7 | sudo apt-get install -y ntp 8 | sudo apt-get install -y aptitude 9 | sudo apt-get install -y git 10 | sudo apt-get install -y wget 11 | sudo apt-get install -y tar 12 | sudo apt-get install -y ca-certificates 13 | sudo apt-get install -y software-properties-common 14 | sudo apt-get install -y build-essential 15 | sudo apt-get install -y python-setuptools 16 | sudo apt-get install -y python-dev 17 | sudo apt-get install -y zlib1g-dev 18 | sudo apt-get install -y libssl-dev 19 | #Below not necessarily needed for python compilation for this particular demo but for completeness 20 | #https://renoirboulanger.com/blog/2015/04/upgrade-python-2-7-9-ubuntu-14-04-lts-making-deb-package/ 21 | sudo apt-get install -y gcc-multilib g++-multilib libffi-dev libffi6 libffi6-dbg python-crypto python-mox3 python-pil python-ply libbz2-dev libexpat1-dev libgdbm-dev dpkg-dev quilt autotools-dev libreadline-dev libtinfo-dev libncursesw5-dev tk-dev blt-dev libbz2-dev libexpat1-dev libsqlite3-dev libgpm2 mime-support netbase net-tools bzip2 22 | 23 | #Python 2.7.9 specific 24 | sudo wget https://www.python.org/ftp/python/2.7.9/Python-2.7.9.tgz 25 | sudo tar xfz Python-2.7.9.tgz 26 | rm -f Python-2.7.9.tgz 27 | cd Python-2.7.9/ 28 | sudo ./configure --prefix /usr/local/lib/python2.7.9 --enable-ipv6 29 | sudo make 30 | sudo make install 31 | sudo sh -c "wget https://bootstrap.pypa.io/ez_setup.py -O - | /usr/local/lib/python2.7.9/bin/python" 32 | sudo /usr/local/lib/python2.7.9/bin/easy_install pip 33 | sudo /usr/local/lib/python2.7.9/bin/pip install virtualenv 34 | cd ../ 35 | sudo rm -rf Python-2.7.9/ 36 | 37 | #Install Docker 38 | sudo apt-get -y install docker.io 39 | sudo ln -sf /usr/bin/docker.io /usr/local/bin/docker 40 | sudo sed -i '$acomplete -F _docker docker' /etc/bash_completion.d/docker.io 41 | 42 | # For service catalog app - not needed now 43 | # #Install Redis 44 | sudo apt-get -y install redis-server 45 | -------------------------------------------------------------------------------- /build/packer/scripts/setup_venv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | # install the necessary python dependencies 4 | #check that venv is installed and activate 5 | /usr/local/lib/python2.7.9/bin/virtualenv -p /usr/local/lib/python2.7.9/bin/python2.7 venv 6 | 7 | #Activate venv 8 | source venv/bin/activate 9 | 10 | #setup base requirements 11 | pip install -r requirements.txt 12 | pip install awscli 13 | -------------------------------------------------------------------------------- /build/packer/scripts/vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | mkdir /home/vagrant/.ssh 4 | wget --no-check-certificate -O authorized_keys 'https://github.com/mitchellh/vagrant/raw/master/keys/vagrant.pub' 5 | mv authorized_keys /home/vagrant/.ssh 6 | chown -R vagrant /home/vagrant/.ssh 7 | chmod -R go-rwsx /home/vagrant/.ssh 8 | 9 | echo 'vagrant ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers 10 | -------------------------------------------------------------------------------- /build/packer/scripts/vboxguest.sh: -------------------------------------------------------------------------------- 1 | mkdir /tmp/virtualbox 2 | VERSION=$(cat /home/vagrant/.vbox_version) 3 | mount -o loop /home/vagrant/VBoxGuestAdditions_$VERSION.iso /tmp/virtualbox 4 | sh /tmp/virtualbox/VBoxLinuxAdditions.run 5 | umount /tmp/virtualbox 6 | rmdir /tmp/virtualbox 7 | rm /home/vagrant/*.iso 8 | -------------------------------------------------------------------------------- /build/packer/virtualbox-template.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "atlas_token": "" 4 | }, 5 | "builders": [{ 6 | "type": "virtualbox-iso", 7 | "virtualbox_version_file": ".vbox_version", 8 | "headless": false, 9 | 10 | "guest_os_type": "Ubuntu_64", 11 | "disk_size": 40960, 12 | 13 | "iso_url": "http://releases.ubuntu.com/14.04/ubuntu-14.04.3-server-amd64.iso", 14 | "iso_checksum": "0501c446929f713eb162ae2088d8dc8b6426224a", 15 | "iso_checksum_type": "sha1", 16 | 17 | "boot_command": [ 18 | "", 19 | "/install/vmlinuz noapic preseed/url=http://{{ .HTTPIP }}:{{ .HTTPPort }}/ubuntu-14.04-amd64/preseed.cfg ", 20 | "debian-installer=en_US auto locale=en_US kbd-chooser/method=us ", 21 | "hostname=f5demo ", 22 | "fb=false debconf/frontend=noninteractive ", 23 | "keyboard-configuration/modelcode=SKIP keyboard-configuration/layout=USA keyboard-configuration/variant=USA console-setup/ask_detect=false ", 24 | "initrd=/install/initrd.gz -- " 25 | ], 26 | "boot_wait": "12s", 27 | 28 | "http_directory": "http", 29 | "guest_additions_path": "VBoxGuestAdditions_{{.Version}}.iso", 30 | 31 | "ssh_username": "vagrant", 32 | "ssh_password": "vagrant", 33 | "ssh_port": 22, 34 | "ssh_wait_timeout": "10000s", 35 | 36 | "vboxmanage": [ 37 | ["modifyvm", "{{.Name}}", "--memory", "512"], 38 | ["modifyvm", "{{.Name}}", "--cpus", "1"] 39 | ], 40 | 41 | "shutdown_command": "echo 'vagrant'|sudo -S /sbin/halt -h -p" 42 | }], 43 | "provisioners": [{ 44 | "type": "file", 45 | "source": "files/motd", 46 | "destination": "~/motd" 47 | }, 48 | { 49 | "type": "file", 50 | "source": "files/requirements.txt", 51 | "destination": "~/requirements.txt" 52 | }, 53 | { 54 | "type": "shell", 55 | "execute_command": "echo 'vagrant' | {{.Vars}} sudo -S -E bash '{{.Path}}'", 56 | "scripts": [ 57 | "scripts/vagrant.sh", 58 | "scripts/vboxguest.sh", 59 | "scripts/base_ubuntu_bootstrap.sh", 60 | "scripts/setup_venv.sh" 61 | ] 62 | }, 63 | { 64 | "type": "shell", 65 | "execute_command": "echo 'vagrant' | {{.Vars}} sudo -S -E bash '{{.Path}}'", 66 | "inline": [ 67 | "cp motd /etc/motd", 68 | "rm -f ~/motd", 69 | "rm -f ~/requirements.txt", 70 | "sed -i.bak 's/UsePAM yes/UsePAM yes\\nUseDNS no\\n/' /etc/ssh/sshd_config", 71 | "service ssh restart" 72 | ] 73 | } 74 | ], 75 | "push": { 76 | "name": "f5networks/f5-aws-deployments", 77 | "vcs": false, 78 | "include": [ 79 | "scripts/*", 80 | "http/ubuntu-14.04-amd64/*", 81 | "files/*" 82 | ] 83 | }, 84 | "post-processors": [ 85 | [{ 86 | "type": "vagrant", 87 | "keep_input_artifact": false 88 | }, 89 | { 90 | "type": "atlas", 91 | "token": "{{user `atlas_token`}}", 92 | "artifact": "f5networks/f5-aws-deployments", 93 | "artifact_type": "vagrant.box", 94 | "metadata": { 95 | "provider": "virtualbox" 96 | } 97 | }] 98 | ] 99 | } 100 | -------------------------------------------------------------------------------- /build/scripts/setup_env_aws.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | # This file is not actually run as the commands are instead 4 | # sent via Cloud-init metadata from a Cloud Formation Template 5 | # However, these are the essentially the final steps to prepare the environment from the base build 6 | # and update to the latest files (that change frequrently) from the git repo 7 | # Some builders run commands at various user levels so have to clean up file permissions 8 | 9 | # make sure the virtual environment is active 10 | source venv/bin/activate 11 | 12 | # clone the latest code 13 | git clone https://github.com/F5Networks/aws-deployments.git 14 | 15 | # change into the directory 16 | cd aws-deployments 17 | 18 | # install the latest python module requirements 19 | pip install -r requirements.txt 20 | 21 | # copy over the basic setup files 22 | cp ./build/files/.f5aws ~/ 23 | cp -r ./build/files/.aws ~/ 24 | 25 | # setup the bash file for first login 26 | if ! egrep activate ~/.bash_profile ; then echo 'source venv/bin/activate' >> ~/.bash_profile; fi 27 | if ! egrep aws-deployments ~/.bash_profile ; then echo 'cd aws-deployments' >> ~/.bash_profile; fi 28 | 29 | # attempt to set working directory in .f5aws to logged in user 30 | sed -i.bak "s/home\/ubuntu/home\/`whoami`/" ~/.f5aws 31 | 32 | # Clean up permissions 33 | sudo chown -R ubuntu.ubuntu ~/.* 34 | sudo chown -R ubuntu.ubuntu ~/* 35 | -------------------------------------------------------------------------------- /build/scripts/setup_env_vbox.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -x 2 | 3 | # This script is run from a Vagrantfile during startup 4 | # It contains the final steps to prepare the environment from the base build 5 | # and update to the latest files (that change frequrently) from the git repo 6 | # Some builders run commands at various user levels so have to clean up file permissions 7 | 8 | # make sure the virtual environment is active 9 | source venv/bin/activate 10 | 11 | # set project home 12 | PROJ_HOME=/home/vagrant/aws-deployments 13 | 14 | # install the python module requirements 15 | cd $PROJ_HOME 16 | pip install -r requirements.txt 17 | 18 | # install our specific modules 19 | cd $PROJ_HOME/src 20 | python setup.py install 21 | 22 | # copy over the basic credential files 23 | cp $PROJ_HOME/build/files/.f5aws ~/ 24 | cp -r $PROJ_HOME/build/files/.aws ~/ 25 | 26 | # setup the bash file for first login 27 | if ! egrep activate ~/.bash_profile ; then echo 'source venv/bin/activate' >> $HOME/.bash_profile; fi 28 | if ! egrep aws-deployments ~/.bash_profile ; then echo 'cd aws-deployments' >> $HOME/.bash_profile; fi 29 | 30 | # attempt to set working directory in .f5aws to logged in user 31 | sed -i.bak "s/home\/ubuntu/home\/`whoami`/" ~/.f5aws 32 | 33 | # clean up permissions 34 | sudo chown -R vagrant.vagrant ~/.* 35 | sudo chown -R vagrant.vagrant ~/* 36 | 37 | 38 | -------------------------------------------------------------------------------- /conf/config.ini: -------------------------------------------------------------------------------- 1 | # no user defined parameters should be placed here 2 | 3 | prog = 'f5aws' 4 | 5 | global_vars = '~/.f5aws' 6 | required_vars = 'install_path', 'ssh_key', 'bigip_rest_user', 'bigip_rest_password', 'f5_aws_access_key', 'f5_aws_secret_key' 7 | 8 | # not all regions work because of image availability 9 | regions = us-east-1 , us-west-1, us-west-2, eu-west-1, ap-southeast-2, ap-northeast-1 10 | 11 | deployment_models = 'single-standalone', 'single-cluster', 'standalone-per-zone', 'cluster-per-zone' 12 | deployment_types = 'lb_only', 'lb_and_waf' 13 | available_apps = 'mutzel/all-in-one-hackazon', 'mutzel/django-traffic-test-server' 14 | -------------------------------------------------------------------------------- /docs/Orchestrating BIG-IP in the Public Cloud with Ansible.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/docs/Orchestrating BIG-IP in the Public Cloud with Ansible.pdf -------------------------------------------------------------------------------- /inventory/group_vars/localhosts: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_connection: local 3 | ansible_python_interpreter: /usr/bin/env python 4 | -------------------------------------------------------------------------------- /inventory/host_vars/localhost: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_connection: local 3 | ansible_python_interpreter: /usr/bin/env python 4 | -------------------------------------------------------------------------------- /inventory/hosts: -------------------------------------------------------------------------------- 1 | [localhosts] 2 | localhost -------------------------------------------------------------------------------- /playbooks/cluster_bigips.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Add CFT output variables to host from persisted results from previous playbooks 4 | - hosts: bigips 5 | gather_facts: no 6 | connection: local 7 | vars_files: 8 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 9 | tasks: 10 | - name: Add CFT output variables to host from persisted results from previous playbooks 11 | set_fact: 12 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 13 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 14 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 15 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 16 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 17 | InternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.InternalInterfacePrivateIp }} 18 | DeviceName='ip-{{hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp|replace(".","-")}}.{{region}}.ec2.internal' 19 | 20 | - hosts: bigip-clusters 21 | gather_facts: no 22 | connection: local 23 | tasks: 24 | - name: create a dynamic group of seed devices using first member of each group 25 | add_host: 26 | name: "{{ item.value.0 }}" 27 | group: bigip-cluster-seeds 28 | cluster_name: "{{ item.key }}" 29 | members: "{{ item.value }}" 30 | with_dict: groups 31 | when: item.key in groups['bigip-clusters'] 32 | 33 | - hosts: bigip-cluster-seeds 34 | gather_facts: no 35 | connection: local 36 | tasks: 37 | # Difficult to selectively grab/loop through variables and construct string together 38 | # in play framework itself. Easiest way is through a template. 39 | - name: Store peers information 40 | template: src=../roles/bigip_cluster/templates/cluster_peer_info.cfg.j2 dest=~/vars/f5aws/env/{{ env_name }}/{{inventory_hostname}}_peer_info.yml 41 | delegate_to: localhost 42 | 43 | # Add peer variables to host from template output above 44 | - hosts: bigip-cluster-seeds 45 | gather_facts: no 46 | vars_files: 47 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 48 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}_peer_info.yml" ] 49 | roles: 50 | # This role configures the device object on each bigip and clusters them 51 | - bigip_cluster 52 | 53 | -------------------------------------------------------------------------------- /playbooks/deploy_analytics.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: analyticshosts 3 | gather_facts: False 4 | vars_files: 5 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 6 | tasks: 7 | - name: Add CFT output variables to host from persisted results from previous playbooks 8 | set_fact: 9 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstancePublicIp }} 10 | AnalyticsServerInstancePublicIp={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstancePublicIp }} 11 | AnalyticsServerInstancePrivateIp={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstancePrivateIp }} 12 | AnalyticsServerInstanceId={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstance }} 13 | 14 | # Wait for hosts to become ready 15 | - hosts: analyticshosts 16 | gather_facts: False 17 | sudo: false 18 | tasks: 19 | 20 | - name: Wait for ssh port to open 21 | local_action: wait_for host={{ ansible_ssh_host }} search_regex=OpenSSH delay=10 port=22 22 | 23 | # Apply analytics role 24 | - hosts: analyticshosts 25 | gather_facts: False 26 | roles: 27 | - analytics 28 | 29 | 30 | -------------------------------------------------------------------------------- /playbooks/deploy_analytics_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Deploy the docker host AMIs via CFT 3 | - hosts: analyticshosts 4 | gather_facts: no 5 | vars: 6 | ansible_connection: local 7 | ansible_python_interpreter: "/usr/bin/env python" 8 | tasks: 9 | - name: deploy analytics cft 10 | include: "{{ install_path }}/roles/infra/tasks/deploy_analyticshost_cft.yml" 11 | delegate_to: localhost 12 | -------------------------------------------------------------------------------- /playbooks/deploy_app.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: apphosts 3 | gather_facts: False 4 | vars_files: 5 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 6 | tasks: 7 | - name: Add CFT output variables to host from persisted results from previous playbooks 8 | set_fact: 9 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.WebServerInstancePublicIp }} 10 | WebServerInstancePublicIp={{ hostvars[inventory_hostname].stack_outputs.WebServerInstancePublicIp }} 11 | WebServerInstancePrivateIp={{ hostvars[inventory_hostname].stack_outputs.WebServerInstancePrivateIp }} 12 | 13 | # Wait for hosts to become ready 14 | - hosts: apphosts 15 | gather_facts: False 16 | sudo: false 17 | tasks: 18 | - name: Wait for ssh port to open 19 | local_action: wait_for host={{ ansible_ssh_host }} search_regex=OpenSSH delay=10 port=22 20 | 21 | # Create docker containers on the application hosts 22 | - hosts: apphosts 23 | gather_facts: False 24 | vars: 25 | ansible_sudo: True 26 | roles: 27 | - docker_base 28 | - app 29 | 30 | # New set of tasks without sudo 31 | - hosts: apphosts 32 | gather_facts: False 33 | tasks: 34 | - name: Store docker containers from jinja template 35 | local_action: template src=../roles/app/templates/docker_containers.cfg.j2 dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}_docker_containers.yml 36 | 37 | 38 | -------------------------------------------------------------------------------- /playbooks/deploy_app_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Deploy the docker host AMIs via CFT 3 | - hosts: apphosts 4 | gather_facts: no 5 | vars: 6 | ansible_connection: local 7 | ansible_python_interpreter: "/usr/bin/env python" 8 | tasks: 9 | - name: deploy app host cft 10 | include: "{{ install_path }}/roles/infra/tasks/deploy_apphost_cft.yml" 11 | delegate_to: localhost 12 | -------------------------------------------------------------------------------- /playbooks/deploy_apps_gtm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Add CFT output variables to host from persisted results from previous playbooks 3 | - hosts: gtms 4 | gather_facts: no 5 | vars_files: 6 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 7 | tasks: 8 | - name: Add CFT output variables to host from persisted results from previous playbooks 9 | set_fact: 10 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 11 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 12 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 13 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 14 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 15 | VipAddress={{ hostvars[inventory_hostname].stack_outputs.Vip1 }} 16 | DeviceName='ip-{{hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp|replace(".","-")}}.{{region}}.ec2.internal' 17 | region="{{region}}" 18 | 19 | - hosts: bigip-clusters 20 | gather_facts: no 21 | connection: local 22 | tasks: 23 | - name: re-create a dynamic group of seed devices using first member of each group 24 | add_host: 25 | name: "{{ item.value.0 }}" 26 | group: bigip-cluster-seeds 27 | cluster_name: "{{ item.key }}" 28 | members: "{{ item.value }}" 29 | with_dict: groups 30 | when: item.key in groups['bigip-clusters'] 31 | 32 | - hosts: bigip-cluster-seeds 33 | gather_facts: no 34 | vars_files: 35 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 36 | tasks: 37 | - name: Add CFT output variables to host from persisted results from previous playbooks 38 | set_fact: 39 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 40 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 41 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 42 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 43 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 44 | InternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.InternalInterfacePrivateIp }} 45 | AvailabilityZone={{ hostvars[inventory_hostname].stack_outputs.AvailabilityZone }} 46 | DeviceName='ip-{{hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp|replace(".","-")}}.{{region}}.ec2.internal' 47 | 48 | - hosts: bigip-cluster-seeds 49 | gather_facts: no 50 | vars_files: 51 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-vip-Vip1.yml" ] 52 | tasks: 53 | - name: Gather EIPs for VIP1 54 | set_fact: 55 | VipPrivateIp={{ hostvars[inventory_hostname].stack_outputs.privateIpAddress }} 56 | VipEip={{ hostvars[inventory_hostname].stack_outputs.eipAddress }} 57 | 58 | - hosts: gtms 59 | gather_facts: no 60 | vars: 61 | vip_id: "Vip1" 62 | roles: 63 | - gtm_app1 64 | 65 | # NOTE: To conserve EIPs for demos, we're not adding an EIP for VIP2 anymore. 66 | # Simply grab local VIP from original BIGIP CFT output vs. EIP CFT output 67 | - hosts: bigip-cluster-seeds 68 | gather_facts: no 69 | vars_files: 70 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 71 | tasks: 72 | - name: Gather EIPs for VIP2 73 | set_fact: 74 | VipPrivateIp={{ hostvars[inventory_hostname].stack_outputs.Vip2 }} 75 | 76 | - hosts: gtms 77 | gather_facts: no 78 | vars: 79 | vip_id: "Vip2" 80 | roles: 81 | - gtm_app2 82 | 83 | ##### Done with application deployment, retrieve some information from the device ####### 84 | #################################################################################### 85 | 86 | - hosts: gtms 87 | gather_facts: no 88 | vars_files: 89 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 90 | tasks: 91 | - name: Add CFT output variables to host from persisted results from previous playbooks 92 | set_fact: ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 93 | - name: Get wideip information 94 | delegate_to: localhost 95 | bigip_config: 96 | state=inspect 97 | host={{ ansible_ssh_host }} 98 | user={{ bigip_rest_user }} 99 | password={{ bigip_rest_password }} 100 | collection_path='mgmt/tm/gtm/wideip' 101 | register: result 102 | 103 | - name: Persist gtm listener data 104 | delegate_to: localhost 105 | copy: 106 | content: "{{ result['out'] | to_json }}" 107 | dest: "~/vars/f5aws/env/{{ env_name }}/facts_{{ inventory_hostname }}.json" 108 | -------------------------------------------------------------------------------- /playbooks/deploy_az_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: azs 3 | gather_facts: no 4 | vars: 5 | ansible_connection: local 6 | ansible_python_interpreter: /usr/bin/env python 7 | tasks: 8 | 9 | - name: deploy availability zone cft 10 | include: "{{ install_path }}/roles/infra/tasks/deploy_az_cft.yml" 11 | delegate_to: localhost 12 | -------------------------------------------------------------------------------- /playbooks/deploy_bigip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Add CFT output variables to host from persisted results from previous playbooks 3 | - hosts: bigips 4 | gather_facts: no 5 | vars_files: 6 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 7 | tasks: 8 | - name: Add CFT output variables to host from persisted results from previous playbooks 9 | set_fact: 10 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 11 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 12 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 13 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 14 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 15 | InternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.InternalInterfacePrivateIp }} 16 | 17 | # Pick up Analytics Server Address so can add to syslog_pool 18 | - hosts: analyticshosts 19 | gather_facts: False 20 | vars_files: 21 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 22 | tasks: 23 | - name: Add CFT output variables to host from persisted results from previous playbooks 24 | set_fact: 25 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstancePublicIp }} 26 | AnalyticsServerInstancePublicIp={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstancePublicIp }} 27 | AnalyticsServerInstancePrivateIp={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstancePrivateIp }} 28 | AnalyticsServerInstanceId={{ hostvars[inventory_hostname].stack_outputs.AnalyticsServerInstance }} 29 | 30 | # Wait for hosts to become ready 31 | - hosts: bigips 32 | gather_facts: no 33 | sudo: false 34 | tasks: 35 | - name: Wait for ssh port to open 36 | local_action: wait_for host={{ ansible_ssh_host }} search_regex=OpenSSH delay=10 port=22 37 | - name: Wait until BIG-IP is ready for provisioning 38 | raw: "run /util bash -c 'cat /var/prompt/ps1'" 39 | until: (result.stdout.find("Active") != -1 or result.stdout.find("Standby") != -1) 40 | register: result 41 | retries: 60 42 | 43 | # Basic device setup using tmsh to enable further provisioning 44 | - hosts: bigips 45 | gather_facts: no 46 | roles: 47 | # adds users via tmsh 48 | - bigip_base 49 | # provisions system globals like ntp, dns, snmp, syslog, db keys 50 | - bigip_system 51 | # sets AWS keys and disables DHCP 52 | - bigip_system_aws 53 | # sets vlans, self-ips, routes 54 | - bigip_network 55 | 56 | -------------------------------------------------------------------------------- /playbooks/deploy_bigip_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: bigips 3 | gather_facts: no 4 | vars: 5 | ansible_connection: local 6 | ansible_python_interpreter: "/usr/bin/env python" 7 | tasks: 8 | 9 | - name: deploy bigip cft 10 | include: "{{ install_path }}/roles/infra/tasks/deploy_bigip_cft.yml" 11 | delegate_to: localhost 12 | -------------------------------------------------------------------------------- /playbooks/deploy_client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: clienthosts 3 | gather_facts: no 4 | vars_files: 5 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 6 | tasks: 7 | - name: Add CFT output variables to host from persisted results from previous playbooks 8 | set_fact: 9 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePublicIp }} 10 | ClientInstancePublicIp={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePublicIp }} 11 | ClientInstancePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePrivateIp }} 12 | 13 | # Need to wait for port 22 to open up 14 | - hosts: clienthosts 15 | gather_facts: no 16 | tasks: 17 | - local_action: wait_for host={{ ansible_ssh_host }} search_regex=OpenSSH delay=10 port=22 18 | sudo: false 19 | 20 | # Now launch containers inside them 21 | # Amazon amis we're using already have docker installed so no need for install_docker role 22 | - hosts: clienthosts 23 | gather_facts: no 24 | vars: 25 | ansible_sudo: True 26 | roles: 27 | # - jumpbox 28 | - client 29 | 30 | # Modify Jmeter Scripts to hit vips or wideips 31 | # Means replacing hostnames with BIGIP-VIPs jmeter files themselves (No DNS requirement) 32 | 33 | #ex. 34 | # - replace: dest=~/simple-load-generation.jmx regexp='demo.example.com' replace='{{VIP}}' 35 | # OR 36 | # modify /etc/resolv.conf to resolve example.com at GTM Listener 37 | # so first need to load GTM listeners again (Vip1) 38 | 39 | 40 | # Add CFT output variables to host from persisted results from previous playbooks 41 | - hosts: gtms 42 | gather_facts: no 43 | vars_files: 44 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 45 | tasks: 46 | - name: Add CFT output variables to host from persisted results from previous playbooks 47 | set_fact: 48 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 49 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 50 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 51 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 52 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 53 | VipAddress={{ hostvars[inventory_hostname].stack_outputs.Vip1 }} 54 | region="{{region}}" 55 | 56 | ### Example Default /etc/resolv.conf in AWS #### 57 | 58 | # ubuntu@ip-172-16-13-4:~$ cat /etc/resolv.conf 59 | # # Dynamic resolv.conf(5) file for glibc resolver(3) generated by resolvconf(8) 60 | # # DO NOT EDIT THIS FILE BY HAND -- YOUR CHANGES WILL BE OVERWRITTEN 61 | # nameserver 172.16.0.2 62 | # search ec2.internal 63 | # 64 | 65 | # Modify Nameservers 66 | - hosts: clienthosts 67 | gather_facts: no 68 | vars: 69 | ansible_sudo: True 70 | tasks: 71 | - name: Grab number of existing name servers 72 | shell: "egrep -c nameserver /etc/resolv.conf" 73 | ignore_errors: true 74 | register: nameserver_count 75 | 76 | # Need to place GTMs first. 77 | - name: Add example.com nameservers to /etc/resolv.conf if just default one nameserver 78 | shell: "echo 'nameserver {{hostvars[item]['VipAddress']}}' >> /etc/resolvconf/resolv.conf.d/head" 79 | #shell: "echo $(echo 'nameserver {{hostvars[item]['VipAddress']}}\n' | cat - /etc/resolv.conf ) >> /etc/resolv.conf" 80 | with_items: groups['gtms'] 81 | when: nameserver_count.stdout|int < 2 82 | 83 | #Restart resolvconf to pickup changes 84 | - name: restart resolvconf 85 | shell: resolvconf -u 86 | 87 | # Install JMeter 88 | 89 | # Upload Jmeter scripts 90 | - hosts: clienthosts 91 | gather_facts: no 92 | tasks: 93 | - name: Copy over simple load generation jmeter script 94 | copy: src=../roles/client/files/simple-load-generation.jmx dest=~/ -------------------------------------------------------------------------------- /playbooks/deploy_client_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Deploy the client ami via CFT 3 | - hosts: clienthosts 4 | gather_facts: no 5 | vars: 6 | ansible_connection: local 7 | ansible_python_interpreter: "/usr/bin/env python" 8 | tasks: 9 | - name: deploy client cft 10 | include: "{{ install_path }}/roles/infra/tasks/deploy_client_cft.yml" 11 | delegate_to: localhost 12 | -------------------------------------------------------------------------------- /playbooks/deploy_gtm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Attach an EIP to the GTM Listener 3 | # This is not quite a normal VIP like we provision an EIP on a per-app basis in LTM 4 | # GTM CFT should probably just create 2 EIPs automatically. 5 | # 1 for the Self-IP (currently done in CFT) 6 | # 1 for GTM Listener VIP 7 | 8 | - hosts: gtms 9 | gather_facts: no 10 | vars: 11 | ansible_connection: local 12 | ansible_python_interpreter: "/usr/bin/env python" 13 | vip_id: "Vip1" 14 | owner: "gtm" 15 | vars_files: 16 | - "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" 17 | tasks: 18 | - name: deploy eips 19 | include: "{{ install_path }}/roles/infra/tasks/deploy_eip_cft.yml" 20 | delgate_to: localhost 21 | 22 | # Add CFT output variables to host from persisted results from previous playbooks 23 | - hosts: gtms 24 | gather_facts: no 25 | vars_files: 26 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 27 | tasks: 28 | - name: Add CFT output variables to host from persisted results from previous playbooks 29 | set_fact: 30 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 31 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 32 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 33 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 34 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 35 | VipAddress={{ hostvars[inventory_hostname].stack_outputs.Vip1 }} 36 | DeviceName='ip-{{hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp|replace(".","-")}}.{{region}}.ec2.internal' 37 | region="{{region}}" 38 | 39 | - hosts: bigips 40 | gather_facts: no 41 | vars_files: 42 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 43 | tasks: 44 | - name: Add CFT output variables to host from persisted results from previous playbooks 45 | set_fact: 46 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 47 | ManagementInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePublicIp }} 48 | ManagementInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp }} 49 | ExternalInterfacePublicIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePublicIp }} 50 | ExternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ExternalInterfacePrivateIp }} 51 | InternalInterfacePrivateIp={{ hostvars[inventory_hostname].stack_outputs.InternalInterfacePrivateIp }} 52 | DeviceName='ip-{{hostvars[inventory_hostname].stack_outputs.ManagementInterfacePrivateIp|replace(".","-")}}.{{region}}.ec2.internal' 53 | region="{{region}}" 54 | 55 | 56 | # Re-construct bigip_clusters info for adding BIGIP cluster objects to GTM 57 | # Credit to Tim Rupp 58 | - hosts: bigip-clusters 59 | gather_facts: no 60 | connection: local 61 | tasks: 62 | - name: create a dynamic group of seed devices using first member of each group 63 | add_host: 64 | name: "{{ item.value.0 }}" 65 | group: bigip-cluster-seeds 66 | cluster_name: "{{ item.key }}" 67 | members: "{{ item.value }}" 68 | with_dict: groups 69 | when: item.key in groups['bigip-clusters'] 70 | 71 | # Add peer variables to seeds from variables when originally created bigip clusters 72 | - hosts: bigip-cluster-seeds 73 | gather_facts: no 74 | vars_files: 75 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 76 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}_peer_info.yml" ] 77 | tasks: 78 | - name: loading variables for cluster seeds 79 | debug: msg="loading variables for cluster seed {{inventory_hostname}}" 80 | 81 | # Wait for hosts to become ready 82 | - hosts: gtms 83 | gather_facts: no 84 | sudo: false 85 | tasks: 86 | - name: Wait for ssh port to open 87 | local_action: wait_for host={{ ansible_ssh_host }} search_regex=OpenSSH delay=10 port=22 88 | - name: Wait until BIG-IP is ready for provisioning 89 | raw: "run /util bash -c 'cat /var/prompt/ps1'" 90 | until: (result.stdout.find("Active") != -1 or result.stdout.find("Standby") != -1) 91 | register: result 92 | retries: 40 93 | 94 | # Basic device setup using tmsh to enable further provisioning 95 | - hosts: gtms 96 | gather_facts: no 97 | roles: 98 | # adds users via tmsh 99 | - bigip_base 100 | # provisions system globals like ntp, dns, snmp, syslog, db keys 101 | - bigip_system 102 | # sets AWS keys and disables DHCP 103 | - bigip_system_aws 104 | # sets vlans, self-ips, routes 105 | - gtm_network 106 | # setup gtm configuration for this network topology 107 | - gtm_conf 108 | # setup up gtms in cluster, they share route information 109 | - gtm_cluster 110 | -------------------------------------------------------------------------------- /playbooks/deploy_gtm_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: gtms 3 | gather_facts: no 4 | vars: 5 | ansible_connection: local 6 | ansible_python_interpreter: "/usr/bin/env python" 7 | tasks: 8 | - name: deploy gtm cft 9 | include: "{{ install_path }}/roles/infra/tasks/deploy_gtm_cft.yml" 10 | delegate_to: localhost 11 | -------------------------------------------------------------------------------- /playbooks/deploy_vpc_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: vpc-manager 3 | gather_facts: no 4 | vars: 5 | ansible_connection: local 6 | ansible_python_interpreter: /usr/bin/env python 7 | tasks: 8 | - name: deploy vpc cft 9 | include: "{{ install_path }}/roles/infra/tasks/deploy_vpc_cft.yml" 10 | delegate_to: localhost 11 | -------------------------------------------------------------------------------- /playbooks/init.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook creates a inventory for a new deployment, and creates the necessary template and variable files. 3 | 4 | - hosts: localhosts 5 | gather_facts: False 6 | roles: 7 | - inventory_manager -------------------------------------------------------------------------------- /playbooks/remove.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhosts 3 | gather_facts: False 4 | tasks: 5 | - name: teardown env 6 | include: "{{ install_path }}/roles/inventory_manager/tasks/teardown.yml" 7 | -------------------------------------------------------------------------------- /playbooks/start_traffic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: clienthosts 3 | gather_facts: no 4 | vars_files: 5 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 6 | tasks: 7 | - name: Add CFT output variables to host from persisted results from previous playbooks 8 | set_fact: 9 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePublicIp }} 10 | ClientInstancePublicIp={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePublicIp }} 11 | ClientInstancePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePrivateIp }} 12 | 13 | # Launch Jmeter test client in background 14 | - hosts: clienthosts 15 | gather_facts: no 16 | tasks: 17 | - name: Grab number of jmeter processes lauched to make sure not launching more than 1 18 | shell: "ps -ef | egrep -v egrep | egrep -c jmeter" 19 | ignore_errors: true 20 | register: jmeter_count 21 | #TODO: why red? 22 | - name: Launch jmeter script in background 23 | shell: nohup jmeter -n -t ~/simple-load-generation.jmx & 24 | when: jmeter_count.stdout|int < 1 25 | -------------------------------------------------------------------------------- /playbooks/stop_traffic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: clienthosts 3 | gather_facts: no 4 | vars_files: 5 | - [ "~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml" ] 6 | tasks: 7 | - name: Add CFT output variables to host from persisted results from previous playbooks 8 | set_fact: 9 | ansible_ssh_host={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePublicIp }} 10 | ClientInstancePublicIp={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePublicIp }} 11 | ClientInstancePrivateIp={{ hostvars[inventory_hostname].stack_outputs.ClientInstancePrivateIp }} 12 | 13 | # Kill Jmeter test clients 14 | - hosts: clienthosts 15 | gather_facts: no 16 | tasks: 17 | 18 | - name: Kill Jmeter 19 | #shell: kill `ps -ef | egrep jmeter | egrep -v egrep | awk '{print $2}'` 20 | shell: killall java 21 | -------------------------------------------------------------------------------- /playbooks/teardown_all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | #This playbook dismantles all deployed EC2 resources. 3 | - hosts: analyticshosts 4 | gather_facts: no 5 | vars: 6 | ansible_connection: local 7 | ansible_python_interpreter: /usr/bin/env python 8 | tasks: 9 | - name: Teardown analyticshosts 10 | include: "{{ install_path }}/roles/infra/tasks/teardown_analyticshost_cft.yml" 11 | 12 | - hosts: clienthosts 13 | gather_facts: no 14 | vars: 15 | ansible_connection: local 16 | ansible_python_interpreter: /usr/bin/env python 17 | tasks: 18 | - name: Teardown client 19 | include: "{{ install_path }}/roles/infra/tasks/teardown_client_cft.yml" 20 | 21 | - hosts: gtms 22 | vars: 23 | vip_id: Vip1 24 | ansible_connection: local 25 | ansible_python_interpreter: /usr/bin/env python 26 | gather_facts: no 27 | tasks: 28 | - name: Teardown eip associated with Vip1 29 | include: "{{ install_path }}/roles/infra/tasks/teardown_eip_cft.yml" 30 | 31 | - hosts: gtms 32 | gather_facts: no 33 | vars: 34 | ansible_connection: local 35 | ansible_python_interpreter: /usr/bin/env python 36 | tasks: 37 | - name: Teardown gtms 38 | include: "{{ install_path }}/roles/infra/tasks/teardown_gtm_cft.yml" 39 | 40 | - hosts: bigip-cluster-seeds 41 | vars: 42 | ansible_connection: local 43 | ansible_python_interpreter: /usr/bin/env python 44 | vip_id: Vip1 45 | gather_facts: no 46 | tasks: 47 | - name: Teardown eip associated with Vip1 48 | include: "{{ install_path }}/roles/infra/tasks/teardown_eip_cft.yml" 49 | 50 | - hosts: bigips 51 | gather_facts: no 52 | vars: 53 | ansible_connection: local 54 | ansible_python_interpreter: /usr/bin/env python 55 | tasks: 56 | - name: Teardown bigips 57 | include: "{{ install_path }}/roles/infra/tasks/teardown_bigip_cft.yml" 58 | 59 | - hosts: apphosts 60 | gather_facts: no 61 | vars: 62 | ansible_connection: local 63 | ansible_python_interpreter: /usr/bin/env python 64 | tasks: 65 | - name: Teardown apphosts 66 | include: "{{ install_path }}/roles/infra/tasks/teardown_apphost_cft.yml" 67 | 68 | - hosts: azs 69 | gather_facts: no 70 | vars: 71 | ansible_connection: local 72 | ansible_python_interpreter: /usr/bin/env python 73 | tasks: 74 | - name: Teardown az 75 | include: "{{ install_path }}/roles/infra/tasks/teardown_az_cft.yml" 76 | 77 | - hosts: vpc-manager 78 | gather_facts: no 79 | vars: 80 | ansible_connection: local 81 | ansible_python_interpreter: /usr/bin/env python 82 | tasks: 83 | - name: Teardown vpc 84 | include: "{{ install_path }}/roles/infra/tasks/teardown_vpc_cft.yml" 85 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==1.8.2 2 | bigsuds==1.0.2 3 | boto==2.38.0 4 | configobj==5.0.6 5 | ecdsa==0.13 6 | iso8601==0.1.10 7 | Jinja2==2.8 8 | Mako==1.0.2 9 | ordereddict==1.1 10 | paramiko==1.15.2 11 | pexpect==3.3 12 | py==1.4.30 13 | pycrypto==2.6.1 14 | Pygments==2.0.2 15 | pytest==2.7.2 16 | PyYAML==3.11 17 | requests==2.7.0 18 | six==1.9.0 19 | suds==0.4 20 | wheel==0.24.0 21 | 22 | -------------------------------------------------------------------------------- /roles/analytics/files/F5AccessApp.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/roles/analytics/files/F5AccessApp.tgz -------------------------------------------------------------------------------- /roles/analytics/files/SplunkforF5Networks.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/roles/analytics/files/SplunkforF5Networks.tgz -------------------------------------------------------------------------------- /roles/analytics/files/SplunkforF5Security.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/roles/analytics/files/SplunkforF5Security.tgz -------------------------------------------------------------------------------- /roles/analytics/files/inputs.conf: -------------------------------------------------------------------------------- 1 | [default] 2 | host = analytics-1.example.com 3 | 4 | [udp://514] 5 | connection_host = dns 6 | sourcetype = syslog 7 | source = udp:514 8 | 9 | [tcp://514] 10 | connection_host = dns 11 | sourcetype = syslog 12 | source = tcp:514 13 | 14 | [udp://515] 15 | connection_host = dns 16 | sourcetype = asm_log 17 | source = udp:515 18 | 19 | [tcp://515] 20 | connection_host = dns 21 | sourcetype = asm_log 22 | source = tcp:515 23 | -------------------------------------------------------------------------------- /roles/analytics/files/modify_admin_pass_expect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' 4 | Used to Get Around Password Prompt: 5 | [ec2-user@ip-172-16-14-166 ~]$ sudo /opt/splunk/bin/splunk edit user admin -password NEWPASSWORD -auth admin:i-00cce3c5 6 | The administrator requires you to change your password. 7 | Please enter a new password: 8 | Please confirm new password: 9 | User admin edited. 10 | ''' 11 | 12 | import sys 13 | import time 14 | import pexpect 15 | 16 | MY_TIMEOUT=5 17 | SSH_NEWKEY = 'Are you sure you want to continue connecting' 18 | 19 | ssh_key = sys.argv[1] 20 | user = sys.argv[2] 21 | host = sys.argv[3] 22 | old_password = sys.argv[4] 23 | new_password = sys.argv[5] 24 | 25 | # print "ssh_key: " + ssh_key 26 | # print "user: " + user 27 | # print "host: " + host 28 | # print "old_password: " + old_password 29 | # print "new_password: " + new_password 30 | 31 | print "Launching SSH session with command: ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i " + ssh_key + " " + user + "@" + host 32 | conn = pexpect.spawn("ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -i " + ssh_key + " " + user + "@" + host) 33 | 34 | match_value = conn.expect([SSH_NEWKEY, '$', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 35 | time.sleep(1) 36 | 37 | #print "match_value = " + str(match_value) 38 | if match_value == 0: 39 | print "Matched new key warning" 40 | conn.sendline ( "yes" ) 41 | if match_value == 1: 42 | print "Matched CLI prompt. Sending Password Command" 43 | #print "sudo /opt/splunk/bin/splunk edit user admin -password '" + new_password + "' -role admin -auth admin:'" + old_password + "'" 44 | conn.sendline ( "sudo /opt/splunk/bin/splunk edit user admin -password '" + new_password + "' -role admin -auth admin:'" + old_password + "'" ) 45 | 46 | 47 | match_value = conn.expect(['Please enter a new password:','Login failed', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 48 | print "match_value = " + str(match_value) 49 | time.sleep(1) 50 | 51 | if match_value == 0: 52 | print "Matched Password prompt. Sending Password" 53 | conn.sendline ( new_password ) 54 | match_value = conn.expect(['Please confirm new password:', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 55 | if match_value == 0: 56 | print "Matched Password Confirm. Resending Password" 57 | conn.sendline ( new_password ) 58 | match_value = conn.expect(['User admin edited.', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 59 | if match_value == 0: 60 | print "User password successfully changed. Exiting" 61 | conn.sendline ( 'exit' ) 62 | elif match_value == 1: 63 | print "User login failed. Probably already set from previous run. Try confirming new password" 64 | #print "Sending: sudo /opt/splunk/bin/splunk edit user admin -password '" + new_password + "' -role admin -auth admin:'" + new_password + "'" 65 | conn.sendline ( "sudo /opt/splunk/bin/splunk edit user admin -password '" + new_password + "' -role admin -auth admin:'" + new_password + "'" ) 66 | match_value = conn.expect(['User admin edited.', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 67 | if match_value == 0: 68 | print "Confirmed admin password successfully changed to new password. Exiting" 69 | conn.sendline ( 'exit' ) 70 | else: 71 | print "Something is wrong. New Password is not set." 72 | sys.exit(1) 73 | else: 74 | print "Something is wrong. New Password is not set." 75 | sys.exit(1) 76 | 77 | 78 | -------------------------------------------------------------------------------- /roles/analytics/files/splunk-add-on-for-f5-big-ip_230.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/roles/analytics/files/splunk-add-on-for-f5-big-ip_230.tgz -------------------------------------------------------------------------------- /roles/analytics/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # Tip from Splunk to change admin password and avoid password change error: 4 | #[ec2-user@ip-172-16-14-236 ~]$ curl -sk -u admin:i-3c97bf10 -H Content-type: application/json -X POST -d '{"username":"admin","password”:”PASSWORD"}' https://localhost:8089/services/storage/passwords/admin 5 | # 6 | # 7 | # 8 | # For security reasons, the admin on this account has requested that you change your password 9 | # 10 | # 11 | # - name: Touch file so change_password not required (SPLUNK_HOME/etc/.ui_login) 12 | # file: path=/opt/splunk/etc/.ui_login state=touch owner=splunk group=splunk 13 | 14 | # Unfortunately above didn't work and can't get past initial Password Change 15 | # request in REST UI so had to resort to yet another expect script. 16 | 17 | - name: Change admin password 18 | script: ../roles/analytics/files/modify_admin_pass_expect.py {{ ansible_ssh_private_key_file }} ec2-user {{ ansible_ssh_host }} {{ AnalyticsServerInstanceId }} \'{{ bigip_rest_password }}\' 19 | delegate_to: localhost 20 | 21 | # Set Fact ansible_sudo: True 22 | - name: Set Sudo to true for rest of tasks 23 | set_fact: ansible_sudo=True 24 | 25 | # Upload new inputs.conf 26 | - name: Copy over inputs.conf 27 | copy: src=inputs.conf dest=/opt/splunk/etc/system/local/inputs.conf 28 | 29 | # UPLOAD F5 APPS 30 | - name: Copy over Splunk for F5 Networks App 31 | copy: src=SplunkforF5Networks.tgz dest=/opt/splunk/etc/apps/ 32 | 33 | - name: Copy over Splunk for F5 Security App 34 | copy: src=SplunkforF5Security.tgz dest=/opt/splunk/etc/apps/ 35 | 36 | - name: Copy over Splunk for F5 Access App 37 | copy: src=F5AccessApp.tgz dest=/opt/splunk/etc/apps/ 38 | 39 | - name: Copy over Splunk Add-On. NOTE-> Add-On Not Implemented yet 40 | copy: src=splunk-add-on-for-f5-big-ip_230.tgz dest=/opt/splunk/etc/apps/ 41 | 42 | - name: unzip F5 Networks app 43 | unarchive: src=/opt/splunk/etc/apps/SplunkforF5Networks.tgz dest=/opt/splunk/etc/apps/ copy=no 44 | 45 | - name: unzip F5 Networks Security app 46 | unarchive: src=/opt/splunk/etc/apps/SplunkforF5Security.tgz dest=/opt/splunk/etc/apps/ copy=no 47 | 48 | - name: unzip F5 Networks Access app 49 | unarchive: src=/opt/splunk/etc/apps/F5AccessApp.tgz dest=/opt/splunk/etc/apps/ copy=no 50 | 51 | - name: unzip Splunk Add-On for F5 Networks 52 | unarchive: src=/opt/splunk/etc/apps/splunk-add-on-for-f5-big-ip_230.tgz dest=/opt/splunk/etc/apps/ copy=no 53 | 54 | - name: change ownership to splunk 55 | file: path=/opt/splunk/etc/apps/SplunkforF5Networks owner=splunk group=splunk 56 | 57 | - name: change ownership to splunk 58 | file: path=/opt/splunk/etc/apps/SplunkforF5Security owner=splunk group=splunk 59 | 60 | - name: change ownership to splunk 61 | file: path=/opt/splunk/etc/apps/F5AccessApp owner=splunk group=splunk 62 | 63 | - name: change ownership to splunk 64 | file: path=/opt/splunk/etc/apps/Splunk_TA_f5-bigip owner=splunk group=splunk 65 | 66 | # RESTART SPLUNK 67 | - name: restart Splunk 68 | command: /opt/splunk/bin/splunk restart 69 | 70 | -------------------------------------------------------------------------------- /roles/app/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for app role 3 | # ensures the set of containers are running 4 | - docker: 5 | name: "hackazon" 6 | image: "{{ image_id }}" 7 | count: 1 8 | ports: 80:80 9 | command: supervisord -n 10 | state: running 11 | tags: 12 | - launch_containers 13 | register: launched_containers 14 | until: launched_containers["failed"] == false 15 | retries: 5 16 | delay: 10 17 | 18 | -------------------------------------------------------------------------------- /roles/app/templates/docker_containers.cfg.j2: -------------------------------------------------------------------------------- 1 | --- 2 | "docker_containers": 3 | {{ launched_containers.ansible_facts.docker_containers | to_nice_yaml }} 4 | 5 | -------------------------------------------------------------------------------- /roles/bigip_app1/files/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | This directory contains the JSON REST payload we will post to /mgmt/tm/sys/application/template to deploy a new iApp template. See app_f5_http_backport_template.json 4 | 5 | Here are the steps we following for this particular iApp template: 6 | 7 | 1) Download the template from DevCentral codeshare or otherwise. 8 | 2) Make sure the iApp doesn't contain any TCL packages in the .tmpl file, if so, resolve these issues... :( 9 | 3) Import the payload via the Configuration Utility (GUI) 10 | 3.a) Export to JSON via iControlRest - https://52.22.206.179/mgmt/tm/sys/application/template/f5.http.backport.1.1.2?expandSubcollections=true 11 | 3.b) Remove extanious fields like "selfLink", "generation", etc 12 | 3.c Rename field actionsReference -> actions 13 | 4) Repost as necessary 14 | -------------------------------------------------------------------------------- /roles/bigip_app1/files/eip.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Allocates and EIP and assigns it to a Private IP Secondary", 4 | "Parameters": { 5 | "vpc": { 6 | "Type": "AWS::EC2::VPC::Id", 7 | "ConstraintDescription": "Must be an existing VPC within the supplied region." 8 | }, 9 | "NetworkInterfaceId": { 10 | "Description": "Network Interface ID to which this Secondary IP will be added", 11 | "Type": "String", 12 | "ConstraintDescription": "Must be a Secondary IP " 13 | }, 14 | "PrivateIpAddress": { 15 | "Description": "Private IP typically secondary for VIPs that the EIP will be attached to", 16 | "Type": "String", 17 | "ConstraintDescription": "Must be a Private IP" 18 | } 19 | }, 20 | "Resources": { 21 | "EipAddress": { 22 | "Type": "AWS::EC2::EIP", 23 | "Properties": { 24 | "Domain": "vpc" 25 | } 26 | }, 27 | "EipAssociation": { 28 | "Type": "AWS::EC2::EIPAssociation", 29 | "Properties": { 30 | "NetworkInterfaceId": { 31 | "Ref": "NetworkInterfaceId" 32 | }, 33 | "PrivateIpAddress": { 34 | "Ref": "PrivateIpAddress" 35 | }, 36 | "AllocationId": { 37 | "Fn::GetAtt": [ 38 | "EipAddress", 39 | "AllocationId" 40 | ] 41 | } 42 | } 43 | } 44 | }, 45 | "Outputs": { 46 | "privateIpAddress": { 47 | "Description": "Private IP Address", 48 | "Value": { 49 | "Ref": "PrivateIpAddress" 50 | } 51 | }, 52 | "eipAddress": { 53 | "Description": "EIP NAT Address", 54 | "Value": { 55 | "Ref": "EipAddress" 56 | } 57 | }, 58 | "eipAllocationId": { 59 | "Description": "EIP Allocation ID", 60 | "Value": { 61 | "Fn::GetAtt": [ 62 | "EipAddress", 63 | "AllocationId" 64 | ] 65 | } 66 | } 67 | } 68 | } -------------------------------------------------------------------------------- /roles/bigip_app1/files/irule_demo_analytics.tcl: -------------------------------------------------------------------------------- 1 | when CLIENT_ACCEPTED { 2 | set client [IP::client_addr] 3 | } 4 | 5 | when HTTP_REQUEST { 6 | set vhost [HTTP::host]:[TCP::local_port] 7 | set url [HTTP::uri] 8 | set method [HTTP::method] 9 | set http_version [HTTP::version] 10 | set user_agent [HTTP::header "User-Agent"] 11 | set tcp_start_time [clock clicks -milliseconds] 12 | set req_start_time [clock format [clock seconds] -format "%Y/%m/%d %H:%M:%S"] 13 | set req_elapsed_time 0 14 | set virtual_server [LB::server] 15 | 16 | if { [HTTP::header Content-Length] > 0 } then { 17 | set req_length [HTTP::header "Content-Length"] 18 | if {$req_length > 4000000} then { 19 | set $req_length 4000000 20 | } 21 | HTTP::collect $req_length 22 | } else { 23 | set req_length 0 24 | } 25 | 26 | if { [HTTP::header "Referer"] ne "" } then { 27 | set referer [HTTP::header "Referer"] 28 | } else { 29 | set referer - 30 | } 31 | } 32 | 33 | when HTTP_REQUEST_DATA { 34 | set req_elapsed_time [expr {[clock clicks -milliseconds] - $tcp_start_time}] 35 | HTTP::release 36 | } 37 | 38 | when HTTP_RESPONSE { 39 | set hsl [HSL::open -proto TCP -pool syslog_pool] 40 | set resp_start_time [clock format [clock seconds] -format "%Y/%m/%d %H:%M:%S"] 41 | set node [IP::server_addr]:[TCP::server_port] 42 | set status [HTTP::status] 43 | set req_elapsed_time [expr {[clock clicks -milliseconds] - $tcp_start_time}] 44 | 45 | if { [HTTP::header Content-Length] > 0 } then { 46 | set response_length [HTTP::header "Content-Length"] 47 | } else { 48 | set response_length 0 49 | } 50 | 51 | HSL::send $hsl "<190>|$vhost|device_product=Splunk Web Access iRule|$client|$method|\"$url\"|HTTP/$http_version|$user_agent|\"$referer\"|$req_start_time|$req_length|$req_elapsed_time|$node|$status|$resp_start_time|$response_length|$virtual_server\r\n" 52 | } 53 | -------------------------------------------------------------------------------- /roles/bigip_app1/files/irule_sorry_page.tcl: -------------------------------------------------------------------------------- 1 | when HTTP_REQUEST { 2 | set VSPool [LB::server pool] 3 | if { [active_members $VSPool] < 1 } { 4 | log local0. "Client [IP::client_addr] requested [HTTP::uri] no active nodes available..." 5 | if { [HTTP::uri] ends_with "sorry.png" } { 6 | HTTP::respond 200 content [b64decode [class element -name 0 sorry_images]] "Content-Type" "image/png" 7 | } else { 8 | if { [HTTP::uri] ends_with "background.png" } { 9 | HTTP::respond 200 content [b64decode [class element -name 0 background_images]] "Content-Type" "image/png" 10 | } else { 11 | HTTP::respond 200 content " 12 | 13 | 14 | 15 | Oouchhh! 16 | 17 | 18 | 46 | 47 |
48 |

Ouchhhhh! Snap! Something went terribly wrong!!!

49 |

In the mean while, go here to entertain yourself while we figure out what just happened :-)

50 | 51 |

Wish us luck!

52 |
53 | " 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /roles/bigip_app1/tasks/provision_waf_depends.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # We need to retry this tasks until it succeeds, ASM takes longer to boot 3 | # than some of the other LTM functionality we have been provisioning against 4 | # so far. 5 | - name: Create the ASM policy 6 | delegate_to: localhost 7 | bigip_config: 8 | name="Create the ASM policy" 9 | state=present 10 | host={{ ansible_ssh_host }} 11 | user={{ bigip_rest_user }} 12 | password={{ bigip_rest_password }} 13 | collection_path='mgmt/tm/asm/policies' 14 | resource_key="name" 15 | payload='{"name":"{{ asm_policy_name }}","applicationLanguage":"utf-8","caseInsensitive":true}' 16 | register: result 17 | until: result|changed 18 | retries: 40 19 | delay: 5 20 | 21 | - name: Save the policy self link 22 | set_fact: policySelfLink="{{ result['out']['selfLink'] }}" 23 | 24 | - name: Save the policy ID 25 | set_fact: policyID="{{ result['out']['id'] }}" 26 | 27 | - name: Import our policy over the one existing above 28 | delegate_to: localhost 29 | bigip_config: 30 | name="Import our policy over the one existing above " 31 | state=present 32 | host={{ ansible_ssh_host }} 33 | user={{ bigip_rest_user }} 34 | password={{ bigip_rest_password }} 35 | collection_path='mgmt/tm/asm/tasks/import-policy' 36 | payload='{"file":"{{ asm_policy_linux_high_base64[0:-1] }}","isBase64":true,"policyReference":{"link":"{{ policySelfLink }}" } }' 37 | register: result 38 | 39 | - name: Save the async task ID 40 | set_fact: taskId="{{ result['out']['id'] }}" 41 | 42 | - name: Determine whether the asm policy import task is complete 43 | delegate_to: localhost 44 | bigip_config: 45 | name="Determine whether the asm policy import task is complete" 46 | state=inspect 47 | host={{ ansible_ssh_host }} 48 | user={{ bigip_rest_user }} 49 | password={{ bigip_rest_password }} 50 | collection_path='mgmt/tm/asm/tasks/import-policy/{{ taskId }}' 51 | register: importResult 52 | until: importResult.out.status.find("COMPLETED") != -1 53 | retries: 40 54 | delay: 2 55 | 56 | - name: Apply the ASM policy 57 | delegate_to: localhost 58 | bigip_config: 59 | name="Apply the ASM policy" 60 | state=present 61 | host={{ ansible_ssh_host }} 62 | user={{ bigip_rest_user }} 63 | password={{ bigip_rest_password }} 64 | collection_path='mgmt/tm/asm/tasks/apply-policy' 65 | payload='{"policyReference":{"link":"{{ policySelfLink }}"} }' 66 | register: result 67 | - name: Save the async task ID 68 | set_fact: taskId="{{ result['out']['id'] }}" 69 | 70 | - name: Determine whether the asm policy apply task is complete 71 | delegate_to: localhost 72 | bigip_config: 73 | name="Determine whether the asm policy apply task is complete" 74 | state=inspect 75 | host={{ ansible_ssh_host }} 76 | user={{ bigip_rest_user }} 77 | password={{ bigip_rest_password }} 78 | collection_path='mgmt/tm/asm/tasks/apply-policy/{{ taskId }}' 79 | register: applyResult 80 | until: applyResult.out.status.find("COMPLETED") != -1 81 | retries: 40 82 | delay: 2 83 | 84 | # Added Step to Publish to Drafts first for v12 85 | - name: Create an LTM policy for use with by iApp which associates the ASM policy 86 | delegate_to: localhost 87 | bigip_config: 88 | name="Create an LTM policy for use with by iApp which associates the ASM policy" 89 | state=present 90 | host={{ ansible_ssh_host }} 91 | user={{ bigip_rest_user }} 92 | password={{ bigip_rest_password }} 93 | collection_path='mgmt/tm/ltm/policy' 94 | payload='{"name":"{{ ltm_policy_name }}","partition":"Common","subPath":"Drafts","controls":["asm"],"requires":["http"],"strategy":"/Common/first-match","rules":[{"name":"rule-1","ordinal":1,"actions":[{"name":"0","asm":true,"code":0,"enable":true,"policy":"/Common/{{ asm_policy_name }}","port":0,"request":true,"status":0,"vlanId":0}],"conditions":[]}]}' 95 | resource_key="name" 96 | 97 | - name: Publish the LTM policy 98 | delegate_to: localhost 99 | bigip_config: 100 | name="Publish the LTM policy" 101 | state=present 102 | host={{ ansible_ssh_host }} 103 | user={{ bigip_rest_user }} 104 | password={{ bigip_rest_password }} 105 | collection_path='mgmt/tm/ltm/policy' 106 | payload='{"name":"{{ ltm_policy_name }}","command":"publish"}' 107 | resource_key="name" 108 | 109 | -------------------------------------------------------------------------------- /roles/bigip_app1/templates/README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | This directory contains the JSON REST payload we will post to /mgmt/tm/sys/application/service to deploy a new iapp service instance from an iapp template. See app_f5_http_backport_service.json.j2 4 | 5 | Here are the steps we followed to create this payload: 6 | 7 | 1) Deploy a new iApp through the UI 8 | 2) Query iApp via iControlREST 9 | 3) Clean up the JSON payload such that it can be reposted: 10 | 3.a) Delete selfLinks, and any other redundant or extranious fields 11 | 3.b) Change all none/None values -> "" 12 | 4) Repost as necessary 13 | 14 | In our case we use jinja2 templating macros to customize the JSON payload for each deployment. -------------------------------------------------------------------------------- /roles/bigip_app1/templates/analytics_pool_payload.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name":"syslog_pool", 3 | "members": 4 | [ 5 | {% if deploy_analytics == "true" %} 6 | {% for host in groups["analyticshosts"] %} 7 | { 8 | "name": "{{ hostvars[host]['AnalyticsServerInstancePrivateIp'] }}:514", 9 | "address": "{{ hostvars[host]['AnalyticsServerInstancePrivateIp'] }}" 10 | }{% if not loop.last %},{% endif %} 11 | {% endfor %} 12 | {% endif %} 13 | ], 14 | "monitor":"tcp" 15 | } 16 | -------------------------------------------------------------------------------- /roles/bigip_app1/templates/asm_logging_profile_payload.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name": "asm_log_to_splunk", 3 | "application": [ 4 | { 5 | "name": "asm_log_to_splunk", 6 | "facility": "local0", 7 | "filter": [ 8 | { 9 | "name": "protocol", 10 | "values": [ 11 | "all" 12 | ] 13 | }, 14 | { 15 | "name": "request-type", 16 | "values": [ 17 | "all" 18 | ] 19 | }, 20 | { 21 | "name": "search-all" 22 | } 23 | ], 24 | "format": { 25 | "fieldDelimiter": ",", 26 | "type": "predefined" 27 | }, 28 | "guaranteeLogging": "disabled", 29 | "guaranteeResponseLogging": "disabled", 30 | "localStorage": "disabled", 31 | "logicOperation": "or", 32 | "maximumEntryLength": "2k", 33 | "maximumHeaderSize": "any", 34 | "maximumQuerySize": "any", 35 | "maximumRequestSize": "any", 36 | "partition": "Common", 37 | "protocol": "tcp", 38 | "remoteStorage": "splunk", 39 | "reportAnomalies": "disabled", 40 | "responseLogging": "none", 41 | "servers": [ 42 | {% if deploy_analytics == "true" %} 43 | {% for host in groups["analyticshosts"] %} 44 | { 45 | "name": "{{ hostvars[host]['AnalyticsServerInstancePrivateIp'] }}:515" 46 | }{% if not loop.last %},{% endif %} 47 | {% endfor %} 48 | {% endif %} 49 | ] 50 | } 51 | ] 52 | } 53 | -------------------------------------------------------------------------------- /roles/bigip_app1/templates/bigip_pool_members_from_containers.cfg.j2: -------------------------------------------------------------------------------- 1 | {"name":"{{vip_id}}_pool","monitor":"http","members":[{% for host in groups['apphosts'] %}{% for container in hostvars[host]['docker_containers'] %}{% if container.NetworkSettings is defined %}{"name":"{{hostvars[host]['WebServerInstancePrivateIp']}}:{{container['NetworkSettings']['Ports']['80/tcp'][0]['HostPort']}}","address":"{{hostvars[host]['WebServerInstancePrivateIp']}}","description":"Name={{container['Name']}},ContainerHostname={{container['Config']['Hostname']}},Image={{ container['Config']['Image']}}"},{% endif %}{% endfor %}{% endfor %}]} 2 | -------------------------------------------------------------------------------- /roles/bigip_app1/templates/iapp_f5_http_backport_service.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "name": "{{ iapp_service_name }}", 3 | "partition": "Common", 4 | "inheritedDevicegroup": "true", 5 | "inheritedTrafficGroup": "true", 6 | "strictUpdates": "enabled", 7 | "template": "/Common/f5.http.backport.1.1.2", 8 | "templateModified": "no", 9 | "lists": [ 10 | { 11 | "name": "irules__irules", 12 | "encrypted": "no", 13 | "value": [ 14 | "/Common/irule_demo_analytics", 15 | "/Common/irule_sorry_page" 16 | ] 17 | } 18 | ], 19 | "tables": [ 20 | { 21 | "name": "basic__snatpool_members" 22 | }, 23 | { 24 | "name": "net__snatpool_members" 25 | }, 26 | { 27 | "name": "optimizations__hosts" 28 | }, 29 | { 30 | "name": "pool__hosts", 31 | "columnNames": [ 32 | "name" 33 | ], 34 | "rows": [ 35 | { 36 | "row": [ 37 | "demo.example.com" 38 | ] 39 | } 40 | ] 41 | }, 42 | { 43 | "name": "pool__members" 44 | }, 45 | { 46 | "name": "server_pools__servers" 47 | } 48 | ], 49 | "variables": [ 50 | {% if deploy_analytics == "true" and deployment_type == "lb_and_waf" %} 51 | { 52 | "name": "asm__security_logging", 53 | "encrypted": "no", 54 | "value": "asm_log_to_splunk" 55 | }, 56 | {% else %} 57 | { 58 | "name": "asm__security_logging", 59 | "encrypted": "no", 60 | "value": "Log all requests" 61 | }, 62 | {% endif %} 63 | {% if deployment_type == "lb_and_waf" %} 64 | { 65 | "name": "asm__use_asm", 66 | "encrypted": "no", 67 | "value": "/Common/{{ ltm_policy_name }}" 68 | }, 69 | {% else %} 70 | { 71 | "name": "asm__use_asm", 72 | "encrypted": "no", 73 | "value": "/#do_not_use#" 74 | }, 75 | {% endif %} 76 | { 77 | "name": "client__http_compression", 78 | "encrypted": "no", 79 | "value": "/#do_not_use#" 80 | }, 81 | { 82 | "name": "client__standard_caching_without_wa", 83 | "encrypted": "no", 84 | "value": "/#do_not_use#" 85 | }, 86 | { 87 | "name": "client__tcp_wan_opt", 88 | "encrypted": "no", 89 | "value": "/Common/tcp-ssl-wan-optimized" 90 | }, 91 | { 92 | "name": "net__client_mode", 93 | "encrypted": "no", 94 | "value": "wan" 95 | }, 96 | { 97 | "name": "net__route_to_bigip", 98 | "encrypted": "no", 99 | "value": "no" 100 | }, 101 | { 102 | "name": "net__same_subnet", 103 | "encrypted": "no", 104 | "value": "no" 105 | }, 106 | { 107 | "name": "net__server_mode", 108 | "encrypted": "no", 109 | "value": "lan" 110 | }, 111 | { 112 | "name": "net__snat_type", 113 | "encrypted": "no", 114 | "value": "automap" 115 | }, 116 | { 117 | "name": "net__vlan_mode", 118 | "encrypted": "no", 119 | "value": "all" 120 | }, 121 | { 122 | "name": "pool__addr", 123 | "encrypted": "no", 124 | "value": "{{VipAddress1}}" 125 | }, 126 | { 127 | "name": "pool__http", 128 | "encrypted": "no", 129 | "value": "/#create_new#" 130 | }, 131 | { 132 | "name": "pool__mask", 133 | "encrypted": "no", 134 | "value": "" 135 | }, 136 | { 137 | "name": "pool__persist", 138 | "encrypted": "no", 139 | "value": "/#cookie#" 140 | }, 141 | { 142 | "name": "pool__pool_to_use", 143 | "encrypted": "no", 144 | "value": "/Common/{{ iapp_vs_pool_name }}" 145 | }, 146 | { 147 | "name": "pool__port_secure", 148 | "encrypted": "no", 149 | "value": "443" 150 | }, 151 | { 152 | "name": "pool__redirect_port", 153 | "encrypted": "no", 154 | "value": "80" 155 | }, 156 | { 157 | "name": "pool__redirect_to_https", 158 | "encrypted": "no", 159 | "value": "yes" 160 | }, 161 | { 162 | "name": "pool__xff", 163 | "encrypted": "no", 164 | "value": "yes" 165 | }, 166 | { 167 | "name": "server__oneconnect", 168 | "encrypted": "no", 169 | "value": "/#do_not_use#" 170 | }, 171 | { 172 | "name": "server__tcp_lan_opt", 173 | "encrypted": "no", 174 | "value": "/Common/tcp-wan-optimized" 175 | }, 176 | { 177 | "name": "server__tcp_req_queueing", 178 | "encrypted": "no", 179 | "value": "no" 180 | }, 181 | { 182 | "name": "ssl__cert", 183 | "encrypted": "no", 184 | "value": "/Common/default.crt" 185 | }, 186 | { 187 | "name": "ssl__client_ssl_profile", 188 | "encrypted": "no", 189 | "value": "/#create_new#" 190 | }, 191 | { 192 | "name": "ssl__key", 193 | "encrypted": "no", 194 | "value": "/Common/default.key" 195 | }, 196 | { 197 | "name": "ssl__mode", 198 | "encrypted": "no", 199 | "value": "client_ssl" 200 | }, 201 | { 202 | "name": "ssl__use_chain_cert", 203 | "encrypted": "no", 204 | "value": "/#do_not_use#" 205 | }, 206 | { 207 | "name": "ssl_encryption_questions__advanced", 208 | "encrypted": "no", 209 | "value": "yes" 210 | }, 211 | { 212 | "name": "ssl_encryption_questions__help", 213 | "encrypted": "no", 214 | "value": "hide" 215 | }, 216 | { 217 | "name": "stats__analytics", 218 | "encrypted": "no", 219 | "value": "/Common/{{ analytics_profile_name }}" 220 | }, 221 | { 222 | "name": "stats__request_logging", 223 | "encrypted": "no", 224 | "value": "/#do_not_use#" 225 | } 226 | ] 227 | } 228 | -------------------------------------------------------------------------------- /roles/bigip_app2/files/irule_random_snat.tcl: -------------------------------------------------------------------------------- 1 | when RULE_INIT { 2 | expr srand("[clock clicks]") 3 | set static::TARGET_VIP "/Common/Vip1_iApp.app/Vip1_iApp_vs" 4 | } 5 | when CLIENT_ACCEPTED { 6 | 7 | set a [expr int(223*rand())] 8 | set b [expr int(255*rand())] 9 | set c [expr int(255*rand())] 10 | set d [expr int(255*rand())] 11 | 12 | while { $a == 192 || $a == 172 || $a == 10 } { 13 | #log local0. "changing first octet from $a" 14 | set a [expr int(223*rand())] 15 | } 16 | #log local0. $a.$b.$c.$d 17 | snat $a.$b.$c.$d 18 | 19 | virtual $static::TARGET_VIP 20 | } 21 | -------------------------------------------------------------------------------- /roles/bigip_app2/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploying/updating webserver pool 3 | delegate_to: localhost 4 | bigip_config: 5 | name="Deploying/updating webserver pool" 6 | state=present 7 | host={{ ansible_ssh_host }} 8 | user={{ bigip_rest_user }} 9 | password={{ bigip_rest_password }} 10 | payload='{{lookup('file', '~/vars/f5aws/env/' + env_name + '/' + vip_id + '_pool_from_containers.json')}}' 11 | collection_path='mgmt/tm/ltm/pool' 12 | resource_key="name" 13 | 14 | - name: Uploading iRules ... irule_random_snat 15 | delegate_to: localhost 16 | bigip_config: 17 | name="Uploading iRules ... irule_random_snat" 18 | state=present 19 | host={{ ansible_ssh_host }} 20 | user={{ bigip_rest_user }} 21 | password={{ bigip_rest_password }} 22 | collection_path='mgmt/tm/ltm/rule' 23 | resource_key="name" 24 | payload='{"name":"irule_random_snat","apiAnonymous":"{{irule_random_snat|replace("\\","\\\\")|replace("\"","\\\"")|replace("\n","\\n")}}"}' 25 | 26 | - name: Setup the HTTP virtual server 27 | delegate_to: localhost 28 | bigip_config: 29 | name="Setup the HTTP virtual server" 30 | state=present 31 | host={{ ansible_ssh_host }} 32 | user={{ bigip_rest_user }} 33 | password={{ bigip_rest_password }} 34 | collection_path='mgmt/tm/ltm/virtual' 35 | resource_key="name" 36 | payload='{"name":"{{vip_id}}_http","destination":"/Common/{{VipAddress2}}:80","mask":"255.255.255.255","ipProtocol":"tcp","pool":"/Common/{{vip_id}}_pool","translateAddress":"enabled","translatePort":"enabled","sourceAddressTranslation":{"type":"automap"},"rules":["/Common/irule_random_snat"], "profiles":[{"name":"http"},{"name":"tcp-wan-optimized","context":"clientside"},{"name":"tcp-lan-optimized","context":"serverside"}]}' 37 | 38 | - name: Setup the HTTPS virtual server 39 | delegate_to: localhost 40 | bigip_config: 41 | name="Setup the HTTPS virtual server" 42 | state=present 43 | host={{ ansible_ssh_host }} 44 | user={{ bigip_rest_user }} 45 | password={{ bigip_rest_password }} 46 | collection_path='mgmt/tm/ltm/virtual' 47 | resource_key="name" 48 | payload='{"name":"{{vip_id}}_https","destination":"/Common/{{VipAddress2}}:443","mask":"255.255.255.255","ipProtocol":"tcp","pool":"/Common/{{vip_id}}_pool","translateAddress":"enabled","translatePort":"enabled","sourceAddressTranslation":{"type":"automap"},"rules":["/Common/irule_random_snat"], "profiles":[{"name":"tcp-ssl-wan-optimized","context":"clientside"},{"name":"tcp-ssl-lan-optimized","context":"serverside"}]}' 49 | -------------------------------------------------------------------------------- /roles/bigip_app2/templates/bigip_pool_members_from_containers.cfg.j2: -------------------------------------------------------------------------------- 1 | {"name":"{{vip_id}}_pool","monitor":"http","members":[{% for host in groups['apphosts'] %}{% for container in hostvars[host]['docker_containers'] %}{% if container.NetworkSettings is defined %}{"name":"{{hostvars[host]['WebServerInstancePrivateIp']}}:{{container['NetworkSettings']['Ports']['80/tcp'][0]['HostPort']}}","address":"{{hostvars[host]['WebServerInstancePrivateIp']}}","description":"Name={{container['Name']}},ContainerHostname={{container['Config']['Hostname']}},Image={{ container['Config']['Image']}}"},{% endif %}{% endfor %}{% endfor %}]} 2 | -------------------------------------------------------------------------------- /roles/bigip_base/tasks/add_rest_user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Checking if REST user already exists 3 | raw: list auth user {{ bigip_rest_user }} 4 | register: list_user 5 | ignore_errors: True 6 | 7 | #Conditionally create a user if not found 8 | - name: Creating user for REST 9 | raw: create auth user {{ bigip_rest_user }} password '{{ bigip_rest_password }}' partition-access add { all-partitions { role admin } } shell tmsh; save sys config 10 | when: list_user.stdout | search("was not found") 11 | 12 | #One more time in case of partial runs/saves. 13 | - name: Ensuring user has correct permissions 14 | raw: modify auth user {{ bigip_rest_user }} partition-access modify { all-partitions { role admin } } shell tmsh; save sys config 15 | 16 | - name: Saving tmsh config 17 | raw: save sys config 18 | -------------------------------------------------------------------------------- /roles/bigip_base/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ## Some initial setup for big ip once we can login. We need a user for rest calls. We may also want the default shell 3 | ## to be bash, rather than tmsh. The latter is difficult to use ansible... 4 | 5 | # We specifically us the bigip_facts library because it does some checking 6 | # to seeif bigip is available. Rather than 7 | 8 | - include: add_rest_user.yml 9 | - include: modify_admin.yml 10 | - name: Wait for BIG-IP to be available 11 | local_action: bigip_facts include=system_info 12 | server={{ ansible_ssh_host }} 13 | user={{ bigip_rest_user }} 14 | password={{ bigip_rest_password }} 15 | validate_certs=no 16 | delegate_to: localhost 17 | register: result 18 | until: result.ansible_facts.system_info.product_information.product_version is defined 19 | -------------------------------------------------------------------------------- /roles/bigip_base/tasks/modify_admin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Updating the admin password 4 | raw: modify auth user admin password '{{ bigip_rest_password }}' 5 | ignore_errors: True 6 | 7 | - name: Saving tmsh config 8 | raw: save sys config 9 | -------------------------------------------------------------------------------- /roles/bigip_cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configuring DSC Device objects and clustering the bigips if more than one. 3 | This can take a couple of minutes... 4 | delegate_to: localhost 5 | bigip_cluster: 6 | state: present 7 | username: "{{ bigip_rest_user }}" 8 | password: "{{ bigip_rest_password }}" 9 | action: cluster 10 | ha_type: pair 11 | bigip_cluster_name: my_sync_failover_group 12 | device_names: "{{ cluster_device_names }}" 13 | api_addrs: "{{ cluster_api_addrs }}" 14 | mgmt_addrs: "{{ cluster_mgmt_addrs }}" 15 | ha_addrs: "{{ cluster_ha_addrs }}" 16 | mirror_addrs: "{{ cluster_mirror_addrs }}" 17 | 18 | -------------------------------------------------------------------------------- /roles/bigip_cluster/templates/cluster_peer_info.cfg.j2: -------------------------------------------------------------------------------- 1 | --- 2 | cluster_device_names: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['DeviceName'] }}{% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 3 | cluster_api_addrs: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['ManagementInterfacePublicIp'] }} {% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 4 | cluster_mgmt_addrs: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['ManagementInterfacePrivateIp'] }} {% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 5 | cluster_ha_addrs: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['ExternalInterfacePrivateIp'] }} {% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 6 | cluster_mirror_addrs: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['ExternalInterfacePrivateIp'] }} {% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 7 | cluster_public_self_addrs: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['ExternalInterfacePrivateIp'] }} {% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 8 | cluster_public_nat_addrs: [ {% for member in hostvars[inventory_hostname]['members'] %}{% if member is defined %}{{ hostvars[member]['ExternalInterfacePublicIp'] }} {% endif %}{% if not loop.last %}, {% endif %}{% endfor %}] 9 | -------------------------------------------------------------------------------- /roles/bigip_network/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # --------------------------------- 3 | # Configure network, LTM, and ASM settings which are specific to each device. 4 | # --------------------------------- 5 | 6 | - name: Adding/updating internal vlan 7 | delegate_to: localhost 8 | bigip_config: 9 | name="Adding/updating internal vlan" 10 | state=present 11 | host={{ ansible_ssh_host }} 12 | user={{ bigip_rest_user }} 13 | password={{ bigip_rest_password }} 14 | payload='{"name":"private", "interfaces":"1.2"}' 15 | collection_path='mgmt/tm/net/vlan' 16 | resource_key="name" 17 | 18 | - name: Adding/updating external vlan 19 | delegate_to: localhost 20 | bigip_config: 21 | name="Adding/updating external vlan" 22 | state=present 23 | host={{ ansible_ssh_host }} 24 | user={{ bigip_rest_user }} 25 | password={{ bigip_rest_password }} 26 | payload='{"name":"public", "interfaces":"1.1"}' 27 | collection_path='mgmt/tm/net/vlan' 28 | resource_key="name" 29 | 30 | # THe address we give for local only self-ip is provided by Amazon (InternalInterfacePrivateIp) 31 | - name: Adding/updating internal selfip 32 | delegate_to: localhost 33 | bigip_config: 34 | name="Adding/updating internal selfip" 35 | state=present 36 | host={{ ansible_ssh_host }} 37 | user={{ bigip_rest_user }} 38 | password={{ bigip_rest_password }} 39 | payload='{"name":"private", "address":"{{ InternalInterfacePrivateIp }}/24", "vlan":"private", "trafficGroup":"traffic-group-local-only", "allowService":"default"}' 40 | collection_path='mgmt/tm/net/self' 41 | resource_key="name" 42 | 43 | # THe address we give for external, local-only self-ip is provided by Amazon (ExternalInterfacePrivateIp) 44 | - name: Adding/updating external selfip 45 | delegate_to: localhost 46 | bigip_config: 47 | name="Adding/updating external selfip" 48 | state=present 49 | host={{ ansible_ssh_host }} 50 | user={{ bigip_rest_user }} 51 | password={{ bigip_rest_password }} 52 | payload='{"name":"public", "address":"{{ ExternalInterfacePrivateIp }}/24", "vlan":"public", "trafficGroup":"traffic-group-local-only", "allowService":["tcp:4353"]}' 53 | collection_path='mgmt/tm/net/self' 54 | resource_key="name" 55 | 56 | # Gateway pools only needed in Cluster spanned across AZs and will be shared on all devices 57 | # Commenting out now but will just need to add separate more specific roles or conditions inside the roles 58 | # - name: Adding/updating default gateway pool 59 | # delegate_to: localhost 60 | # bigip_config: 61 | # state=present 62 | # host={{ ansible_ssh_host }} 63 | # user={{ bigip_rest_user }} 64 | # password={{ bigip_rest_password }} 65 | # payload='{"name":"default_gateway_pool", "members":[{"name":"172.16.2.1:0","address":"172.16.2.1"}, {"name":"172.16.12.1:0","address":"172.16.12.1"}], "monitor":"gateway_icmp"}' 66 | # collection_path='mgmt/tm/ltm/pool' 67 | # resource_key="name" 68 | 69 | # - name: Adding/updating pool members for default gateway pool 70 | # delegate_to: localhost 71 | # bigip_config: 72 | # state=present 73 | # host={{ ansible_ssh_host }} 74 | # user={{ bigip_rest_user }} 75 | # password={{ bigip_rest_password }} 76 | # payload='{ "name":"pool_member_gateway_pool", "members":[{"name":"172.16.3.1:0","address":"172.16.3.1"}, {"name":"172.16.13.1:0","address":"172.16.13.1"}], "monitor":"gateway_icmp" }' 77 | # collection_path='mgmt/tm/ltm/pool' 78 | # resource_key="name" 79 | 80 | 81 | # for each bigip the routes may be different 82 | # In Amazon, the Default Gateway is 83 | # the first IP on the subnet network, i.e. 172.16.12.1 and 172.16.22.1 84 | # So we'll hardcode and just replace the '0' with a '1' from the variable 'public_cidr'. 85 | # This might not be true in other environments, in which case we'll want a more robust solution. 86 | 87 | # BZID 510170 - Must include partition 88 | - name: Setting default route using default_gateway or gateway_pool 89 | delegate_to: localhost 90 | bigip_config: 91 | name="Setting default route using default_gateway or gateway_pool" 92 | state=present 93 | host={{ ansible_ssh_host }} 94 | user={{ bigip_rest_user }} 95 | password={{ bigip_rest_password }} 96 | payload='{"name":"default_route", "network":"default", "partition":"Common", "gw":"{{ [ExternalInterfacePrivateIp[0:9],1]|join('.') }}" }' 97 | collection_path='/mgmt/tm/net/route' 98 | resource_key="name" 99 | -------------------------------------------------------------------------------- /roles/bigip_system/README.md: -------------------------------------------------------------------------------- 1 | bigip_system 2 | ============= 3 | 4 | This role will configure general system settings such as: 5 | 6 | - NTP 7 | - DNS 8 | - Syslog 9 | - HTTP 10 | - SSH 11 | - SNMP 12 | - Traffic Profiles (tcp, fastL4, etc.) that can be shared by all 13 | - DB keys 14 | - Module Resource Provisioning (for generic modules like MGMT/AVR/etc.) 15 | Note: Other advanced modules like ASM, APM, GTM, should be probably be handled in a separate role 16 | -------------------------------------------------------------------------------- /roles/bigip_system/tasks/provision_module.yml: -------------------------------------------------------------------------------- 1 | - name: Provisioning module 2 | delegate_to: localhost 3 | bigip_config: 4 | state=present 5 | host={{ ansible_ssh_host }} 6 | user={{ bigip_rest_user }} 7 | password={{ bigip_rest_password }} 8 | payload='{"level":"nominal"}' 9 | collection_path='mgmt/tm/sys/provision/{{module}}' 10 | 11 | - name: Wait for module completion to avoid any race conditions 12 | pause: seconds=90 -------------------------------------------------------------------------------- /roles/bigip_system_aws/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # --------------------------------- 3 | # AWS-specific system variables 4 | # --------------------------------- 5 | 6 | - name: Adding/updating AWS access and secret keys 7 | delegate_to: localhost 8 | bigip_config: 9 | name="Adding/updating AWS access and secret keys" 10 | state=present 11 | host={{ ansible_ssh_host }} 12 | user={{ bigip_rest_user }} 13 | password={{ bigip_rest_password }} 14 | payload='{"awsAccessKey":"{{ f5_aws_access_key }}", "awsSecretKey":"{{ f5_aws_secret_key }}"}' 15 | collection_path='mgmt/tm/sys/global-settings' 16 | 17 | - name: Disabling dhcp 18 | delegate_to: localhost 19 | bigip_config: 20 | name="Disabling dhcp" 21 | state=present 22 | host={{ ansible_ssh_host }} 23 | user={{ bigip_rest_user }} 24 | password={{ bigip_rest_password }} 25 | payload='{ "value":"disable" }' 26 | collection_path='mgmt/tm/sys/db/dhclient.mgmt' 27 | 28 | 29 | -------------------------------------------------------------------------------- /roles/client/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure package manager is up-to-date 3 | apt: update_cache=yes 4 | ignore_errors: True 5 | 6 | - name: Make sure apt-get resources are up-to-date 7 | apt: update_cache=yes 8 | 9 | - name: Install list of packages required for jmeter 10 | apt: pkg={{item}} state=installed 11 | with_items: 12 | - python-setuptools 13 | - aptitude 14 | - software-properties-common 15 | - jmeter 16 | 17 | -------------------------------------------------------------------------------- /roles/docker_base/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Since we are installed docker ourselves on our 3 | # home rolled Ubuntu host, lets be careful to lock down all 4 | # aspects so we don't need to keep testing this stuff... 5 | 6 | - name: Ensuring 'pip' is available for python package upgrades 7 | easy_install: name=pip 8 | 9 | - name: Ensuring 'pip' up to date 10 | pip: name=pip state=present extra_args='--upgrade' executable=/usr/local/bin/pip 11 | 12 | # docker ansible module only compat with version 1.1.0 13 | # See https://github.com/ansible/ansible/issues/10879 14 | - name: Ensuring correct version of python 'docker' module 15 | pip: name=docker-py state=present version=1.1.0 executable=/usr/local/bin/pip 16 | 17 | - name: Make sure apt-get resources are up-to-date 18 | apt: update_cache=yes 19 | 20 | - name: Install docker 21 | apt: name=docker=1.5-1 state=present force=yes 22 | 23 | - name: 24 | apt: name=docker.io=1.6.2~dfsg1-1ubuntu4~14.04.1 state=present force=yes -------------------------------------------------------------------------------- /roles/gtm_app1/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # - name: Gather EIPs for VIP1 2 | # set_fact: 3 | # VipPrivateIp={{ hostvars[inventory_hostname].stack_outputs.privateIpAddress }} 4 | # VipEip={{ hostvars[inventory_hostname].stack_outputs.eipAddress }} 5 | 6 | - name: Setup the App1 Virtual Server on each BIG-IP Object 7 | delegate_to: localhost 8 | bigip_config: 9 | name="Setup the App1 Virtual Server on each BIG-IP Object" 10 | state=present 11 | host={{ ansible_ssh_host }} 12 | user={{ bigip_rest_user }} 13 | password={{ bigip_rest_password }} 14 | payload='{"name":"{{vip_id}}","destination":"{{hostvars[item]["VipEip"]}}:80","translationAddress":"{{hostvars[item]["VipPrivateIp"]}}"}' 15 | collection_path='mgmt/tm/gtm/server/~Common~{{hostvars[item]["cluster_name"]}}/virtual-servers' 16 | resource_key="name" 17 | with_items: groups['bigip-cluster-seeds'] 18 | 19 | - name: Setup the AZ pools 20 | delegate_to: localhost 21 | bigip_config: 22 | name="Setup the AZ pools" 23 | state=present 24 | host={{ ansible_ssh_host }} 25 | user={{ bigip_rest_user }} 26 | password={{ bigip_rest_password }} 27 | payload='{"name":"{{vip_id}}_pool_{{hostvars[item]["AvailabilityZone"]}}","loadBalancingMode":"round-robin","members":[{"name":"{{hostvars[item]["cluster_name"]}}:{{vip_id}}"}]}' 28 | collection_path='mgmt/tm/gtm/pool/a' 29 | resource_key="name" 30 | with_items: groups['bigip-cluster-seeds'] 31 | 32 | #Difficult to selectively grab variables for various hosts and string together in play framework. Easiest way is through a template. 33 | - name: Store create wideip pool string from jinja 2 template 34 | template: src=../roles/gtm_conf/templates/wideip_pool.cfg.j2 dest=~/vars/f5aws/env/{{ env_name }}/{{vip_id}}_wideip_pool_string.json 35 | delegate_to: localhost 36 | 37 | - shell: "cat ~/vars/f5aws/env/{{ env_name }}/{{vip_id}}_wideip_pool_string.json" 38 | register: wideip_pool_string 39 | delegate_to: localhost 40 | 41 | - name: Setup the Wideip for Vip1 42 | delegate_to: localhost 43 | bigip_config: 44 | name="Setup the Wideip for Vip1" 45 | state=present 46 | host={{ ansible_ssh_host }} 47 | user={{ bigip_rest_user }} 48 | password={{ bigip_rest_password }} 49 | payload='{"name":"{{vip_id}}.example.com","poolLbMode":"topology","ipv6NoErrorResponse":"enabled","pools":[{{wideip_pool_string.stdout}}]}' 50 | collection_path='mgmt/tm/gtm/wideip/a' 51 | resource_key="name" 52 | 53 | -------------------------------------------------------------------------------- /roles/gtm_app2/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup the App2 Virtual Server on each BIG-IP object 3 | delegate_to: localhost 4 | bigip_config: 5 | name="Setup the App2 Virtual Server on each BIG-IP object " 6 | state=present 7 | host={{ ansible_ssh_host }} 8 | user={{ bigip_rest_user }} 9 | password={{ bigip_rest_password }} 10 | payload='{"name":"{{vip_id}}","destination":"{{hostvars[item]["VipPrivateIp"]}}:80"}' 11 | collection_path='mgmt/tm/gtm/server/~Common~{{hostvars[item]["cluster_name"]}}/virtual-servers' 12 | resource_key="name" 13 | with_items: groups['bigip-cluster-seeds'] 14 | 15 | - name: Setup the AZ pools 16 | delegate_to: localhost 17 | bigip_config: 18 | name="Setup the AZ pools" 19 | state=present 20 | host={{ ansible_ssh_host }} 21 | user={{ bigip_rest_user }} 22 | password={{ bigip_rest_password }} 23 | payload='{"name":"{{vip_id}}_pool_{{hostvars[item]["AvailabilityZone"]}}","loadBalancingMode":"round-robin","members":[{"name":"{{hostvars[item]["cluster_name"]}}:{{vip_id}}"}]}' 24 | collection_path='mgmt/tm/gtm/pool/a' 25 | resource_key="name" 26 | with_items: groups['bigip-cluster-seeds'] 27 | 28 | - name: Store create wideip pool string from jinja 2 template 29 | template: src=../roles/gtm_conf/templates/wideip_pool.cfg.j2 dest=~/vars/f5aws/env/{{ env_name }}/{{vip_id}}_wideip_pool_string.json 30 | delegate_to: localhost 31 | 32 | - shell: "cat ~/vars/f5aws/env/{{ env_name }}/{{vip_id}}_wideip_pool_string.json" 33 | register: wideip_pool_string 34 | delegate_to: localhost 35 | 36 | - name: Setup the Wideip for Vip2 37 | delegate_to: localhost 38 | bigip_config: 39 | name="Setup the Wideip for Vip2" 40 | state=present 41 | host={{ ansible_ssh_host }} 42 | user={{ bigip_rest_user }} 43 | password={{ bigip_rest_password }} 44 | payload='{"name":"{{vip_id}}.example.com","poolLbMode":"topology","ipv6NoErrorResponse":"enabled","pools":[{{wideip_pool_string.stdout}}]}' 45 | collection_path='mgmt/tm/gtm/wideip/a' 46 | resource_key="name" 47 | 48 | - name: Setup the Wideip for demo.example.com 49 | delegate_to: localhost 50 | bigip_config: 51 | name="Setup the Wideip for demo.example.com" 52 | state=present 53 | host={{ ansible_ssh_host }} 54 | user={{ bigip_rest_user }} 55 | password={{ bigip_rest_password }} 56 | payload='{"name":"demo.example.com","poolLbMode":"topology","ipv6NoErrorResponse":"enabled","pools":[{{wideip_pool_string.stdout}}]}' 57 | collection_path='mgmt/tm/gtm/wideip/a' 58 | resource_key="name" 59 | -------------------------------------------------------------------------------- /roles/gtm_cluster/README.md: -------------------------------------------------------------------------------- 1 | gtm_cluster 2 | ============= 3 | 4 | This role will: 5 | - add the GTMs to a sync group using gtm_add 6 | - add the BIG-IPs to the sync_group using bigip_add 7 | -------------------------------------------------------------------------------- /roles/gtm_cluster/files/gtm_expect.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | """ 4 | Quick and dirty approach to clustering BIG-IP 5 | BIG-IP offers no API based approach to clustering that 6 | can be performed over REST/SOAP. Instead we use expect scripts. 7 | a.applebaum@f5.com 8 | """ 9 | 10 | def debug_conn ( conn ): 11 | print "Before Match:" 12 | print conn.before 13 | print "After Match:" 14 | print conn.after 15 | print "" 16 | 17 | 18 | import sys 19 | import time 20 | import pexpect 21 | #import pexpect.pxssh as pxssh 22 | 23 | #TODO: use optparse instead 24 | user = sys.argv[1] 25 | host = sys.argv[2] 26 | command = sys.argv[3] 27 | peer_user = sys.argv[4] 28 | peer_host = sys.argv[5] 29 | password = sys.argv[6] 30 | print_debug = 0 31 | 32 | if print_debug == 1: 33 | print "user: " + user 34 | print "host: " + host 35 | print "command: " + command 36 | print "peer_user: " + peer_user 37 | print "peer_host: " + peer_host 38 | print "password: " + password 39 | 40 | 41 | if host == peer_host: 42 | print "Exiting. Not running as target and destination are the same" 43 | sys.exit() 44 | 45 | 46 | MY_TIMEOUT = 30 47 | SSH_NEWKEY = 'Are you sure you want to continue connecting' 48 | 49 | print "SSH'ing to : " + user + "@" + host 50 | conn = pexpect.spawn("ssh -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null " + user + "@" + host) 51 | 52 | match_value = conn.expect([SSH_NEWKEY, '[Pp]assword:', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 53 | #print "match_value = " + str(match_value) 54 | if print_debug == 1: 55 | debug_conn(conn) 56 | 57 | time.sleep(1) 58 | if match_value == 0: 59 | print "Matched new key warning" 60 | conn.sendline ( "yes" ) 61 | elif match_value == 1: 62 | print "Matched Password prompt. Sending Password" 63 | conn.sendline ( password ) 64 | time.sleep(1) 65 | 66 | #Hopefully eventually get here 67 | match_value = conn.expect('\(tmos\)#', timeout=MY_TIMEOUT) 68 | 69 | if print_debug == 1: 70 | debug_conn(conn) 71 | 72 | if match_value == 0: 73 | #bash prompt 74 | #conn.expect('~ #', timeout=MY_TIMEOUT) 75 | #SOL14495: The bigip_add and gtm_add scripts now accept a user name 76 | print "Matched tmsh prompt! Now adding bigip peer with command \"run gtm " + command + " -a " + peer_user + "@" + peer_host + "\""; 77 | conn.sendline("run gtm " + command + " -a " + peer_user + "@" + peer_host) 78 | 79 | if command == "gtm_add": 80 | conn.expect ('Are you absolutely sure you want to do this?') 81 | print "Confirming will wipe away this config and use peer GTM's config instead" 82 | conn.sendline ('y') 83 | time.sleep(3); 84 | 85 | #Otherwise will get a insecure key warning for the first attempt for either command 86 | match_value = conn.expect([SSH_NEWKEY, pexpect.EOF, pexpect.TIMEOUT], timeout = MY_TIMEOUT) 87 | 88 | if print_debug == 1: 89 | debug_conn(conn) 90 | 91 | if match_value == 0: 92 | print "Matched new key warning" 93 | conn.sendline ( "yes" ) 94 | 95 | #Subsequent attempts will just get a password prompt 96 | match_value = conn.expect([ '[Pp]assword:', pexpect.EOF, pexpect.TIMEOUT], timeout = MY_TIMEOUT) 97 | 98 | if print_debug == 1: 99 | debug_conn(conn) 100 | 101 | if match_value == 0: 102 | print "Matched Password prompt. Sending Password" 103 | conn.sendline ( password ) 104 | 105 | # Expect "==> Done <==" as sign of success 106 | match_value = conn.expect(['==> Done <==', '\(tmos\)#', pexpect.EOF, pexpect.TIMEOUT], timeout=MY_TIMEOUT); 107 | 108 | if print_debug == 1: 109 | debug_conn(conn) 110 | 111 | if match_value == 0: 112 | print "Received \"==> Done <==\" : " + "command " + command + " successful" 113 | print "exiting cleanly" 114 | sys.exit(0) 115 | elif match_value == 1: 116 | print "Recived tmsh prompt? Really need to check results" 117 | sys.exit(1) 118 | else: 119 | #anything else, fail 120 | sys.exit(1) -------------------------------------------------------------------------------- /roles/gtm_cluster/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # ---------------------------------------------------------------------- 3 | # Add GTMs and BIGIPs to the gtm sync group 4 | # ---------------------------------------------------------------------- 5 | 6 | # In the following two tasks, the deployed gtms are clustered. 7 | # As these are interactive CLI tools on BIG-IP, we'll need to leverage expect scripts, 8 | # ex. http://marvelley.com/blog/2014/04/23/handling-interactive-ansible-tasks/ 9 | # NOTE: expect modules don't exist in anywhere on bigip, so we'll do this as a local action 10 | 11 | #./gtm_expect.py admin 1.1.1.1 bigip_add admin 2.2.2.2 'mypassword' 12 | # Note, in production, you really want to use MGMT ports for inital gtm_add/bigip_add functions 13 | # as they requires ssh and you only want to expose 4353 (big3d communication) on any pubic self-ips 14 | - name: Run bigip_add on all bigips with expect script 15 | script: ../roles/gtm_cluster/files/gtm_expect.py admin {{ ansible_ssh_host }} bigip_add admin {{hostvars[item]["ansible_ssh_host"]}} \'{{ bigip_rest_password }}\' 16 | with_items: groups['bigips'] 17 | delegate_to: localhost 18 | register: expect_output 19 | 20 | 21 | # Need to only run when > 1 GTM deployed 22 | - name: Run gtm_add to all GTMs with expect script from one seed gtm 23 | script: ../roles/gtm_cluster/files/gtm_expect.py admin {{hostvars[groups['gtms'][0]]["ansible_ssh_host"]}} gtm_add admin {{hostvars[item.1]["ansible_ssh_host"]}} \'{{ bigip_rest_password }}\' 24 | delegate_to: localhost 25 | with_indexed_items: groups['gtms'] 26 | when: item.0 > 0 27 | run_once: true 28 | ignore_errors: true 29 | 30 | # Script not working on first time. Attempt to run again but need to fix soon 31 | - name: Run bigip_add on all bigips with expect script 32 | script: ../roles/gtm_cluster/files/gtm_expect.py admin {{ ansible_ssh_host }} bigip_add admin {{hostvars[item]["ansible_ssh_host"]}} \'{{ bigip_rest_password }}\' 33 | with_items: groups['bigips'] 34 | delegate_to: localhost 35 | 36 | 37 | -------------------------------------------------------------------------------- /roles/gtm_conf/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Configure GTM UDP Listener 3 | delegate_to: localhost 4 | bigip_config: 5 | name="Configure GTM UDP Listener" 6 | state=present 7 | host={{ ansible_ssh_host }} 8 | user={{ bigip_rest_user }} 9 | password={{ bigip_rest_password }} 10 | payload='{"name":"gtm_listener_udp","address":"{{VipAddress}}","mask":"255.255.255.255","ipProtocol":"udp","translateAddress":"disabled","translatePort":"disabled","profiles":[{"name":"dns"},{"name":"udp_gtm_dns"}]}' 11 | collection_path='mgmt/tm/gtm/listener' 12 | resource_key="name" 13 | 14 | - name: Configure GTM TCP Listener 15 | delegate_to: localhost 16 | bigip_config: 17 | name="Configure GTM TCP Listener" 18 | state=present 19 | host={{ ansible_ssh_host }} 20 | user={{ bigip_rest_user }} 21 | password={{ bigip_rest_password }} 22 | payload='{"name":"gtm_listener_tcp","address":"{{VipAddress}}","mask":"255.255.255.255","ipProtocol":"tcp","translateAddress":"disabled","translatePort":"disabled","profiles":[{"name":"dns"},{"name":"tcp"}]}' 23 | collection_path='mgmt/tm/gtm/listener' 24 | resource_key="name" 25 | 26 | - name: Setup the Datacenter 27 | delegate_to: localhost 28 | bigip_config: 29 | name="Setup the Datacenter" 30 | state=present 31 | host={{ ansible_ssh_host }} 32 | user={{ bigip_rest_user }} 33 | password={{ bigip_rest_password }} 34 | payload='{"name":"{{region}}"}' 35 | collection_path='mgmt/tm/gtm/datacenter' 36 | resource_key="name" 37 | 38 | # NOTE: Useful Product types = single-bigip, redundant-bigip, generic-host 39 | - name: Setup the BIGIP-Object for GTMs (as Standalone BIGIP) 40 | delegate_to: localhost 41 | bigip_config: 42 | name="Setup the BIGIP-Object for GTMs (as Standalone BIGIP)" 43 | state=present 44 | host={{ ansible_ssh_host }} 45 | user={{ bigip_rest_user }} 46 | password={{ bigip_rest_password }} 47 | payload='{"product":"single-bigip","name":"{{item}}","addresses":[{"deviceName":"{{hostvars[item]["DeviceName"]}}","name":"{{hostvars[item]["ExternalInterfacePublicIp"]}}","translation":"{{hostvars[item]["ExternalInterfacePrivateIp"]}}"}],"datacenter":"{{region}}","monitor":"/Common/bigip","virtualServerDiscovery":"disabled"}' 48 | collection_path='mgmt/tm/gtm/server' 49 | resource_key="name" 50 | with_items: groups['gtms'] 51 | 52 | # NOTE: Useful Product types = single-bigip, redundant-bigip, generic-host 53 | # Note, each standalone bigip object will be called by its cluster name as a 54 | # cluster can be a cluster of one. 55 | # NOTE: the BIG-IP object config will eventually need to be modified to pull from a jinja template 56 | # when GTM better supports Scale-N clusters. 57 | # Right now easier to construct object manually since it's just one or two BIG-IPs 58 | 59 | - name: Setup the BIGIP-Object for BIGIPs (Standalone BIGIP) 60 | delegate_to: localhost 61 | bigip_config: 62 | name="Setup the BIGIP-Object for BIGIPs (Standalone BIGIP)" 63 | state=present 64 | host={{ ansible_ssh_host }} 65 | user={{ bigip_rest_user }} 66 | password={{ bigip_rest_password }} 67 | payload='{"product":"single-bigip","name":"{{hostvars[item]["cluster_name"]}}","addresses":[{"deviceName":"{{hostvars[item]["DeviceName"]}}","name":"{{hostvars[item]["ExternalInterfacePublicIp"]}}","translation":"{{hostvars[item]["ExternalInterfacePrivateIp"]}}"}],"datacenter":"{{region}}","monitor":"/Common/bigip","virtualServerDiscovery":"disabled"}' 68 | collection_path='mgmt/tm/gtm/server' 69 | resource_key="name" 70 | with_items: groups['bigips'] 71 | when: hostvars[inventory_hostname].deployment_model != "cluster-per-zone" 72 | 73 | 74 | - name: Setup the BIGIP-Object for BIGIPs (Clustered BIGIPs) 75 | delegate_to: localhost 76 | bigip_config: 77 | name="Setup the BIGIP-Object for BIGIPs (Clustered BIGIPs)" 78 | state=present 79 | host={{ ansible_ssh_host }} 80 | user={{ bigip_rest_user }} 81 | password={{ bigip_rest_password }} 82 | payload='{"product":"redundant-bigip","name":"{{hostvars[item]["cluster_name"]}}","addresses":[{"deviceName":"{{hostvars[item]["cluster_device_names"][0]}}","name":"{{hostvars[item]["cluster_public_nat_addrs"][0]}}","translation":"{{hostvars[item]["cluster_public_self_addrs"][0]}}"},{"deviceName":"{{hostvars[item]["cluster_device_names"][1]}}","name":"{{hostvars[item]["cluster_public_nat_addrs"][1]}}","translation":"{{hostvars[item]["cluster_public_self_addrs"][1]}}"} ],"datacenter":"{{region}}","monitor":"/Common/bigip","virtualServerDiscovery":"disabled"}' 83 | collection_path='mgmt/tm/gtm/server' 84 | resource_key="name" 85 | with_items: groups['bigip-cluster-seeds'] 86 | when: hostvars[inventory_hostname].deployment_model == "cluster-per-zone" 87 | 88 | 89 | -------------------------------------------------------------------------------- /roles/gtm_conf/templates/wideip_pool.cfg.j2: -------------------------------------------------------------------------------- 1 | {% for host in groups['bigip-cluster-seeds'] %}{% if hostvars[host]['AvailabilityZone'] is defined %}{"name":"{{vip_id}}_pool_{{hostvars[host]['AvailabilityZone']}}"}{% endif %}{% if not loop.last %},{% endif %}{% endfor %} 2 | -------------------------------------------------------------------------------- /roles/gtm_network/README.md: -------------------------------------------------------------------------------- 1 | bigip_network 2 | ============= 3 | 4 | This role will configure base networking items such as vlans, self-ips and routes. 5 | -------------------------------------------------------------------------------- /roles/gtm_network/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # --------------------------------- 3 | # Configure settings which are specific to each device. 4 | # --------------------------------- 5 | - name: Add external vlan 6 | delegate_to: localhost 7 | bigip_config: 8 | name="Add external vlan" 9 | state=present 10 | host={{ ansible_ssh_host }} 11 | user={{ bigip_rest_user }} 12 | password={{ bigip_rest_password }} 13 | payload='{"name":"public", "interfaces":"1.1"}' 14 | collection_path='mgmt/tm/net/vlan' 15 | resource_key="name" 16 | 17 | # The address we give for external, local-only self-ip is provided by Amazon (ExternalInterfacePrivateIp) 18 | - name: Add unique external selfip 19 | delegate_to: localhost 20 | bigip_config: 21 | name="Add unique external selfip" 22 | state=present 23 | host={{ ansible_ssh_host }} 24 | user={{ bigip_rest_user }} 25 | password={{ bigip_rest_password }} 26 | payload='{"name":"public", "address":"{{ ExternalInterfacePrivateIp }}/24", "vlan":"public", "trafficGroup":"traffic-group-local-only", "allowService":["tcp:4353"]}' 27 | 28 | collection_path='mgmt/tm/net/self' 29 | resource_key="name" 30 | 31 | # for each bigip the routes may be different 32 | # In Amazon, the Default Gateway is the first IP on the subnet network, 33 | # i.e. 172.16.12.1 and 172.16.22.1 34 | # So we'll hardcode and just replace the '0' with a '1' from the variable 'public_cidr'. 35 | # This might not be true in other environments, in which case we'll want a more robust solution. 36 | - name: Set default route using the default_gateway or gateway_pool 37 | delegate_to: localhost 38 | bigip_config: 39 | name="Set default route using the default_gateway or gateway_pool" 40 | state=present 41 | host={{ ansible_ssh_host }} 42 | user={{ bigip_rest_user }} 43 | password={{ bigip_rest_password }} 44 | payload='{"name":"default_route", "network":"default", "partition":"Common", "gw":"{{ [ExternalInterfacePrivateIp[0:9],1]|join('.') }}" }' 45 | collection_path='/mgmt/tm/net/route' 46 | resource_key="name" 47 | 48 | 49 | -------------------------------------------------------------------------------- /roles/infra/files/analyticshost.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "AWS CloudFormation Template for Docker Host Instances. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", 4 | "Parameters": { 5 | "envName": { 6 | "Type": "String", 7 | "Description": "Used to tag all resources created within this CFT" 8 | }, 9 | "vpc": { 10 | "Type": "AWS::EC2::VPC::Id", 11 | "ConstraintDescription": "Must be an existing VPC within working region." 12 | }, 13 | "applicationSubnet": { 14 | "Description": "Private subnet for application servers", 15 | "Type": "AWS::EC2::Subnet::Id", 16 | "ConstraintDescription": "Must be an existing subnet" 17 | }, 18 | "instanceType": { 19 | "Description": "AnalyticsServer EC2 instance type", 20 | "Type": "String", 21 | "Default": "c3.large", 22 | "AllowedValues": [ 23 | "t2.micro", 24 | "t2.small", 25 | "t2.medium", 26 | "m3.large", 27 | "m3.xlarge", 28 | "m2.xlarge", 29 | "m3.2xlarge", 30 | "c3.large", 31 | "c3.xlarge" 32 | ], 33 | "ConstraintDescription": "must be a valid EC2 instance type." 34 | }, 35 | "keyName": { 36 | "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", 37 | "Type": "AWS::EC2::KeyPair::KeyName" 38 | }, 39 | "SSHLocation": { 40 | "Description": " The IP address range that can be used to SSH to the EC2 instances", 41 | "Type": "String", 42 | "MinLength": "9", 43 | "MaxLength": "18", 44 | "Default": "0.0.0.0/0", 45 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 46 | "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." 47 | } 48 | }, 49 | "Mappings": { 50 | "AWSRegionArch2AMI": { 51 | "us-east-1": { 52 | "AMI": "ami-431e5a26" 53 | }, 54 | "us-west-1": { 55 | "AMI": "ami-d54d8a91" 56 | }, 57 | "us-west-2": { 58 | "AMI": "ami-b7534987" 59 | }, 60 | "eu-west-1": { 61 | "AMI": "ami-8f2c06f8" 62 | }, 63 | "sa-east-1": { 64 | "AMI": "ami-c79a0dda" 65 | }, 66 | "ap-southeast-1": { 67 | "AMI": "ami-f8a3b4aa" 68 | }, 69 | "ap-southeast-2": { 70 | "AMI": "ami-75b5fc4f" 71 | }, 72 | "ap-northeast-1": { 73 | "AMI": "ami-eada44ea" 74 | }, 75 | "eu-central-1": { 76 | "AMI": "ami-9e202283" 77 | } 78 | } 79 | }, 80 | "Resources": { 81 | "AnalyticsServerSecurityGroup": { 82 | "Type": "AWS::EC2::SecurityGroup", 83 | "Properties": { 84 | "VpcId": { 85 | "Ref": "vpc" 86 | }, 87 | "GroupDescription": "Enable access Splunk ports", 88 | "SecurityGroupIngress": [ 89 | { 90 | "IpProtocol": "tcp", 91 | "FromPort": "22", 92 | "ToPort": "22", 93 | "CidrIp": "0.0.0.0/0" 94 | }, 95 | { 96 | "IpProtocol": "tcp", 97 | "FromPort": "443", 98 | "ToPort": "443", 99 | "CidrIp": "0.0.0.0/0" 100 | }, 101 | { 102 | "IpProtocol": "tcp", 103 | "FromPort": "514", 104 | "ToPort": "520", 105 | "CidrIp": "172.16.0.0/16" 106 | }, 107 | { 108 | "IpProtocol": "udp", 109 | "FromPort": "514", 110 | "ToPort": "520", 111 | "CidrIp": "172.16.0.0/16" 112 | }, 113 | { 114 | "IpProtocol": "tcp", 115 | "FromPort": "8000", 116 | "ToPort": "8000", 117 | "CidrIp": "0.0.0.0/0" 118 | }, 119 | { 120 | "IpProtocol": "tcp", 121 | "FromPort": "8089", 122 | "ToPort": "8089", 123 | "CidrIp": "0.0.0.0/0" 124 | }, 125 | { 126 | "IpProtocol": "tcp", 127 | "FromPort": "9997", 128 | "ToPort": "9997", 129 | "CidrIp": "0.0.0.0/0" 130 | }, 131 | { 132 | "IpProtocol": "icmp", 133 | "FromPort": "-1", 134 | "ToPort": "-1", 135 | "CidrIp": "0.0.0.0/0" 136 | } 137 | ], 138 | "Tags": [ 139 | { 140 | "Key": "EnvName", 141 | "Value": {"Ref": "envName"} 142 | } 143 | ] 144 | } 145 | }, 146 | "AnalyticsServerInstance": { 147 | "Type": "AWS::EC2::Instance", 148 | "Properties": { 149 | "ImageId": { 150 | "Fn::FindInMap": [ 151 | "AWSRegionArch2AMI", 152 | { 153 | "Ref": "AWS::Region" 154 | }, 155 | "AMI" 156 | ] 157 | }, 158 | "InstanceType": { 159 | "Ref": "instanceType" 160 | }, 161 | "KeyName": { 162 | "Ref": "keyName" 163 | }, 164 | "NetworkInterfaces": [ { 165 | "AssociatePublicIpAddress": "true", 166 | "DeviceIndex": "0", 167 | "GroupSet": [{ "Ref" : "AnalyticsServerSecurityGroup" }], 168 | "SubnetId": { "Ref" : "applicationSubnet" } 169 | } ], 170 | "Tags": [ 171 | { 172 | "Key": "EnvName", 173 | "Value": {"Ref": "envName"} 174 | } 175 | ] 176 | } 177 | } 178 | }, 179 | "Outputs": { 180 | "AnalyticsServerInstance": { 181 | "Description": "Analyticsserver instance id", 182 | "Value": { "Ref": "AnalyticsServerInstance" } 183 | }, 184 | "AnalyticsServerInstancePrivateIp": { 185 | "Description": "Internally routable IP of Analytics server instance", 186 | "Value": { "Fn::GetAtt": [ "AnalyticsServerInstance", "PrivateIp"]} 187 | }, 188 | "AnalyticsServerInstancePublicIp": { 189 | "Description": "Publicly routable IP of Analytics server instance", 190 | "Value": { "Fn::GetAtt": [ "AnalyticsServerInstance", "PublicIp"]} 191 | }, 192 | "AnalyticsServerInstancePublicDnsName": { 193 | "Description": "Publicly routable DNS Name of Analytics server instance", 194 | "Value": { "Fn::GetAtt": [ "AnalyticsServerInstance", "PublicDnsName"]} 195 | } 196 | } 197 | } 198 | -------------------------------------------------------------------------------- /roles/infra/files/apphost.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "AWS CloudFormation Template for Docker Host Instances. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", 4 | "Parameters": { 5 | "envName": { 6 | "Type": "String", 7 | "Description": "Used to tag all resources created within this CFT" 8 | }, 9 | "vpc": { 10 | "Type": "AWS::EC2::VPC::Id", 11 | "ConstraintDescription": "Must be an existing VPC within working region." 12 | }, 13 | "applicationSubnet": { 14 | "Description": "Private subnet for application servers", 15 | "Type": "AWS::EC2::Subnet::Id", 16 | "ConstraintDescription": "Must be an existing subnet" 17 | }, 18 | "instanceType": { 19 | "Description": "WebServer EC2 instance type", 20 | "Type": "String", 21 | "Default": "t2.micro", 22 | "AllowedValues": [ 23 | "t2.micro", 24 | "t2.small", 25 | "t2.medium", 26 | "m3.medium", 27 | "m3.large", 28 | "m3.xlarge", 29 | "m2.xlarge", 30 | "m3.2xlarge", 31 | "c3.large", 32 | "c3.xlarge" 33 | ], 34 | "ConstraintDescription": "must be a valid EC2 instance type." 35 | }, 36 | "keyName": { 37 | "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", 38 | "Type": "AWS::EC2::KeyPair::KeyName" 39 | }, 40 | "SSHLocation": { 41 | "Description": " The IP address range that can be used to SSH to the EC2 instances", 42 | "Type": "String", 43 | "MinLength": "9", 44 | "MaxLength": "18", 45 | "Default": "0.0.0.0/0", 46 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 47 | "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." 48 | } 49 | }, 50 | "Mappings": { 51 | "AWSRegionArch2AMI": { 52 | "us-east-1": { 53 | "AMI": "ami-63efbd06" 54 | }, 55 | "us-west-1": { 56 | "AMI": "ami-eba86baf" 57 | }, 58 | "us-west-2": { 59 | "AMI": "ami-f638dbc5" 60 | }, 61 | "eu-west-1": { 62 | "AMI": "ami-f3b98584" 63 | }, 64 | "ap-southeast-2": { 65 | "AMI": "ami-098dc733" 66 | }, 67 | "ap-northeast-1": { 68 | "AMI": "ami-28bbd928" 69 | } 70 | } 71 | }, 72 | "Resources": { 73 | "WebServerSecurityGroup": { 74 | "Type": "AWS::EC2::SecurityGroup", 75 | "Properties": { 76 | "VpcId": { 77 | "Ref": "vpc" 78 | }, 79 | "GroupDescription": "Enable access for all ports for docker instances", 80 | "SecurityGroupIngress": [ 81 | { 82 | "IpProtocol": "tcp", 83 | "FromPort": "1", 84 | "ToPort": "65535", 85 | "CidrIp": "0.0.0.0/0" 86 | }, 87 | { 88 | "IpProtocol": "icmp", 89 | "FromPort": "-1", 90 | "ToPort": "-1", 91 | "CidrIp": "0.0.0.0/0" 92 | } 93 | ], 94 | "Tags": [ 95 | { 96 | "Key": "envName", 97 | "Value": { 98 | "Ref": "envName" 99 | } 100 | } 101 | ] 102 | } 103 | }, 104 | "WebServerInstance": { 105 | "Type": "AWS::EC2::Instance", 106 | "Properties": { 107 | "ImageId": { 108 | "Fn::FindInMap": [ 109 | "AWSRegionArch2AMI", 110 | { 111 | "Ref": "AWS::Region" 112 | }, 113 | "AMI" 114 | ] 115 | }, 116 | "InstanceType": { 117 | "Ref": "instanceType" 118 | }, 119 | "KeyName": { 120 | "Ref": "keyName" 121 | }, 122 | "NetworkInterfaces": [ { 123 | "AssociatePublicIpAddress": "true", 124 | "DeviceIndex": "0", 125 | "GroupSet": [{ "Ref" : "WebServerSecurityGroup" }], 126 | "SubnetId": { "Ref" : "applicationSubnet" } 127 | } ], 128 | "Tags": [ 129 | { 130 | "Key": "envName", 131 | "Value": { 132 | "Ref": "envName" 133 | } 134 | } 135 | ] 136 | } 137 | } 138 | }, 139 | "Outputs": { 140 | "WebServerInstance": { 141 | "Description": "Webserver instance id", 142 | "Value": { "Ref": "WebServerInstance" } 143 | }, 144 | "WebServerInstancePrivateIp": { 145 | "Description": "Internally routable IP of web server instance", 146 | "Value": { "Fn::GetAtt": [ "WebServerInstance", "PrivateIp"]} 147 | }, 148 | "WebServerInstancePublicIp": { 149 | "Description": "Publicly routable IP of web server instance", 150 | "Value": { "Fn::GetAtt": [ "WebServerInstance", "PublicIp"]} 151 | }, 152 | "WebServerInstancePublicDnsName": { 153 | "Description": "Publicly routable DNS Name of web server instance", 154 | "Value": { "Fn::GetAtt": [ "WebServerInstance", "PublicDnsName"]} 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /roles/infra/files/client.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "AWS CloudFormation Template for Client/Jumpbox Host Instance. **WARNING** This template creates an Amazon EC2 instance. You will be billed for the AWS resources used if you create a stack from this template.", 4 | "Parameters": { 5 | "envName": { 6 | "Type": "String", 7 | "Description": "Used to tag all resources created within this CFT" 8 | }, 9 | "vpc": { 10 | "Type": "AWS::EC2::VPC::Id", 11 | "ConstraintDescription": "Must be an existing VPC within working region." 12 | }, 13 | "clientSubnet": { 14 | "Description": "Subnet for Client/Jumpbox", 15 | "Type": "AWS::EC2::Subnet::Id", 16 | "ConstraintDescription": "Must be an existing subnet" 17 | }, 18 | "instanceType": { 19 | "Description": "Client/Jumpbox EC2 instance type", 20 | "Type": "String", 21 | "Default": "t2.micro", 22 | "AllowedValues": [ 23 | "t2.micro", 24 | "t2.small", 25 | "t2.medium", 26 | "m3.medium", 27 | "m3.large", 28 | "m3.xlarge", 29 | "m2.xlarge", 30 | "m3.2xlarge", 31 | "c3.large", 32 | "c3.xlarge" 33 | ], 34 | "ConstraintDescription": "must be a valid EC2 instance type." 35 | }, 36 | "keyName": { 37 | "Description": "Name of an existing EC2 KeyPair to enable SSH access to the instance", 38 | "Type": "AWS::EC2::KeyPair::KeyName" 39 | }, 40 | "SSHLocation": { 41 | "Description": " The IP address range that can be used to SSH to the EC2 instances", 42 | "Type": "String", 43 | "MinLength": "9", 44 | "MaxLength": "18", 45 | "Default": "0.0.0.0/0", 46 | "AllowedPattern": "(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})\\.(\\d{1,3})/(\\d{1,2})", 47 | "ConstraintDescription": "must be a valid IP CIDR range of the form x.x.x.x/x." 48 | } 49 | }, 50 | "Mappings": { 51 | "AWSRegionArch2AMI": { 52 | "us-east-1": { 53 | "AMI": "ami-63efbd06" 54 | }, 55 | "us-west-1": { 56 | "AMI": "ami-eba86baf" 57 | }, 58 | "us-west-2": { 59 | "AMI": "ami-f638dbc5" 60 | }, 61 | "eu-west-1": { 62 | "AMI": "ami-f3b98584" 63 | }, 64 | "ap-southeast-2": { 65 | "AMI": "ami-098dc733" 66 | }, 67 | "ap-northeast-1": { 68 | "AMI": "ami-28bbd928" 69 | } 70 | } 71 | }, 72 | "Resources": { 73 | "ClientSecurityGroup": { 74 | "Type": "AWS::EC2::SecurityGroup", 75 | "Properties": { 76 | "VpcId": { 77 | "Ref": "vpc" 78 | }, 79 | "GroupDescription": "Enable access for Client instance", 80 | "SecurityGroupIngress": [ 81 | { 82 | "IpProtocol": "tcp", 83 | "FromPort": "1", 84 | "ToPort": "65535", 85 | "CidrIp": "0.0.0.0/0" 86 | }, 87 | { 88 | "IpProtocol": "icmp", 89 | "FromPort": "-1", 90 | "ToPort": "-1", 91 | "CidrIp": "0.0.0.0/0" 92 | } 93 | ], 94 | "Tags": [ 95 | { 96 | "Key": "envName", 97 | "Value": { 98 | "Ref": "envName" 99 | } 100 | } 101 | ] 102 | } 103 | }, 104 | "ClientInstance": { 105 | "Type": "AWS::EC2::Instance", 106 | "Properties": { 107 | "ImageId": { 108 | "Fn::FindInMap": [ 109 | "AWSRegionArch2AMI", 110 | { 111 | "Ref": "AWS::Region" 112 | }, 113 | "AMI" 114 | ] 115 | }, 116 | "InstanceType": { 117 | "Ref": "instanceType" 118 | }, 119 | "KeyName": { 120 | "Ref": "keyName" 121 | }, 122 | "NetworkInterfaces": [ { 123 | "AssociatePublicIpAddress": "true", 124 | "DeviceIndex": "0", 125 | "GroupSet": [{ "Ref" : "ClientSecurityGroup" }], 126 | "SubnetId": { "Ref" : "clientSubnet" } 127 | } ], 128 | "Tags": [ 129 | { 130 | "Key": "envName", 131 | "Value": { 132 | "Ref": "envName" 133 | } 134 | } 135 | ] 136 | } 137 | } 138 | }, 139 | "Outputs": { 140 | "ClientInstance": { 141 | "Description": "Client instance id", 142 | "Value": { "Ref": "ClientInstance" } 143 | }, 144 | "ClientInstancePrivateIp": { 145 | "Description": "Internally routable IP of client instance", 146 | "Value": { "Fn::GetAtt": [ "ClientInstance", "PrivateIp"]} 147 | }, 148 | "ClientInstancePublicIp": { 149 | "Description": "Publicly routable IP of client instance", 150 | "Value": { "Fn::GetAtt": [ "ClientInstance", "PublicIp"]} 151 | }, 152 | "ClientInstancePublicDnsName": { 153 | "Description": "Publicly routable DNS Name of client instance", 154 | "Value": { "Fn::GetAtt": [ "ClientInstance", "PublicDnsName"]} 155 | } 156 | } 157 | } 158 | -------------------------------------------------------------------------------- /roles/infra/files/eip.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "Allocates and EIP and assigns it to a Private IP Secondary", 4 | "Parameters": { 5 | "envName": { 6 | "Type": "String", 7 | "Description": "Used to tag all resources created within this CFT" 8 | }, 9 | "vpc": { 10 | "Type": "AWS::EC2::VPC::Id", 11 | "ConstraintDescription": "Must be an existing VPC within the supplied region." 12 | }, 13 | "NetworkInterfaceId": { 14 | "Description": "Network Interface ID to which this Secondary IP will be added", 15 | "Type": "String", 16 | "ConstraintDescription": "Must be a Secondary IP " 17 | }, 18 | "PrivateIpAddress": { 19 | "Description": "Private IP typically secondary for VIPs that the EIP will be attached to", 20 | "Type": "String", 21 | "ConstraintDescription": "Must be a Private IP" 22 | } 23 | }, 24 | "Resources": { 25 | "EipAddress": { 26 | "Type": "AWS::EC2::EIP", 27 | "Properties": { 28 | "Domain": "vpc" 29 | } 30 | }, 31 | "EipAssociation": { 32 | "Type": "AWS::EC2::EIPAssociation", 33 | "Properties": { 34 | "NetworkInterfaceId": { 35 | "Ref": "NetworkInterfaceId" 36 | }, 37 | "PrivateIpAddress": { 38 | "Ref": "PrivateIpAddress" 39 | }, 40 | "AllocationId": { 41 | "Fn::GetAtt": [ 42 | "EipAddress", 43 | "AllocationId" 44 | ] 45 | } 46 | } 47 | } 48 | }, 49 | "Outputs": { 50 | "privateIpAddress": { 51 | "Description": "Private IP Address", 52 | "Value": { 53 | "Ref": "PrivateIpAddress" 54 | } 55 | }, 56 | "eipAddress": { 57 | "Description": "EIP NAT Address", 58 | "Value": { 59 | "Ref": "EipAddress" 60 | } 61 | }, 62 | "eipAllocationId": { 63 | "Description": "EIP Allocation ID", 64 | "Value": { 65 | "Fn::GetAtt": [ 66 | "EipAddress", 67 | "AllocationId" 68 | ] 69 | } 70 | } 71 | } 72 | } -------------------------------------------------------------------------------- /roles/infra/files/vpc.json: -------------------------------------------------------------------------------- 1 | { 2 | "AWSTemplateFormatVersion": "2010-09-09", 3 | "Description": "AWS VPC", 4 | "Parameters": { 5 | "envName": { 6 | "Type": "String", 7 | "Description": "Used to tag all resources created within this CFT" 8 | }, 9 | "envName": { 10 | "Type": "String", 11 | "Description": "Environment name from f5Aws tool deployment." 12 | }, 13 | "cidrBlock": { 14 | "Type": "String", 15 | "Description": "CIDR block for the VPC" 16 | } 17 | }, 18 | "Resources": { 19 | "Vpc": { 20 | "Type": "AWS::EC2::VPC", 21 | "Properties": { 22 | "CidrBlock": { "Ref": "cidrBlock" }, 23 | "EnableDnsSupport": "true", 24 | "EnableDnsHostnames": "true", 25 | "InstanceTenancy": "default", 26 | "Tags": [ 27 | { 28 | "Key": "envName", 29 | "Value": { 30 | "Ref": "envName" 31 | } 32 | } 33 | ] 34 | } 35 | }, 36 | "InternetGateway": { 37 | "Type": "AWS::EC2::InternetGateway", 38 | "Properties": { 39 | "Tags": [ 40 | { 41 | "Key": "envName", 42 | "Value": { 43 | "Ref": "envName" 44 | } 45 | } 46 | ] 47 | } 48 | }, 49 | "InternetGateWayAttachment": { 50 | "Type": "AWS::EC2::VPCGatewayAttachment", 51 | "Properties": { 52 | "InternetGatewayId": { 53 | "Ref": "InternetGateway" 54 | }, 55 | "VpcId": { 56 | "Ref": "Vpc" 57 | } 58 | } 59 | } 60 | }, 61 | "Outputs": { 62 | "vpcId": { 63 | "Description": "Id of VPC", 64 | "Value": { "Ref": "Vpc" } 65 | }, 66 | "internetGatewayId": { 67 | "Description": "Id of internet gateway attachment", 68 | "Value": { "Ref": "InternetGateway" } 69 | } 70 | } 71 | } 72 | -------------------------------------------------------------------------------- /roles/infra/library/cloudformation_state.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | """ 4 | ansible module for managing configuration resources on BIG-IP over iControlREST. 5 | """ 6 | 7 | import sys 8 | import json 9 | import requests 10 | from copy import deepcopy 11 | 12 | from requests.exceptions import ConnectionError, HTTPError, Timeout, TooManyRedirects 13 | 14 | class CloudFormationState(object): 15 | def __init__(self, module): 16 | self.stack_name = module.params["host"] 17 | self.state = module.params["state"] 18 | 19 | def resource_exists(self): 20 | #for collections that we want to patch (for example sys/global-settings) 21 | # there is no resource_key which we can use to determine existance 22 | if self.resource_key is None: 23 | return False 24 | 25 | exists = False 26 | (rc, out, err) = self.http("get", self.collection_path) 27 | 28 | if rc != 0: 29 | raise ValueError("Bad return code from HTTP GET. %s " % err) 30 | 31 | items = out.get("items", None) 32 | if items is not None: 33 | for i in items: 34 | if i[self.resource_key] == self.payload[self.resource_key]: 35 | exists = True 36 | break 37 | return exists 38 | 39 | 40 | def main(): 41 | 42 | print 'main()' 43 | module = AnsibleModule( 44 | argument_spec = dict( 45 | state=dict(default='present', choices=['present', 'absent'], type='str'), 46 | user=dict(required=True, default=None, type='str'), 47 | host=dict(required=True, default=None, type='str'), 48 | password=dict(required=True, default=None, type='str'), 49 | collection_path=dict(required=False, default=None, type='str'), 50 | # specific to state=present 51 | payload=dict(required=False, default=None, type='str'), 52 | resource_id=dict(required=False, default=None, type='str'), 53 | resource_key=dict(required=False, default=None, type='str'), 54 | ), 55 | mutually_exclusive = [['resource_id','resource_key']], 56 | supports_check_mode=True 57 | ) 58 | 59 | cfstate = CloudFormationState(module) 60 | 61 | rc = None 62 | out = '' 63 | err = '' 64 | result = {} 65 | result['collection_path'] = bigip_config.collection_path 66 | result['state'] = bigip_config.state 67 | 68 | if bigip_config.state == 'absent': 69 | if bigip_config.resource_exists(): 70 | if module.check_mode: 71 | module.exit_json(changed=True) 72 | (rc, out, err) = bigip_config.delete_resource() 73 | if rc != 0: 74 | module.fail_json(name=bigip_config.collection_path, msg=err, rc=rc) 75 | elif bigip_config.state == 'present': 76 | (rc, out, err) = bigip_config.create_or_update_resource() 77 | 78 | if rc != 0: 79 | module.fail_json(name=bigip_config.collection_path, msg=err, rc=rc) 80 | 81 | if rc is None: 82 | result['changed'] = False 83 | else: 84 | result['changed'] = True 85 | if out: 86 | result['out'] = out 87 | if err: 88 | result['err'] = err 89 | 90 | module.exit_json(**result) 91 | 92 | # import module snippets 93 | from ansible.module_utils.basic import * 94 | main() 95 | 96 | 97 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_analyticshost_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/analyticshost.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | # We could have used the ec2 module here from ansible. It works well when 15 | # deploying "generic" compute resources but we'll use cloudformation 16 | # instead for consistency. 17 | - name: Launch Analytics hosts 18 | action: cloudformation 19 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 20 | state=present 21 | region="{{ region }}" 22 | template={{ install_path }}/roles/infra/files/analyticshost.json 23 | args: 24 | tags: 25 | envName: "{{ env_name }}" 26 | host: "{{ inventory_hostname }}" 27 | instanceType: "Analytics" 28 | Name: "{{ inventory_hostname }}-{{env_name}}" 29 | template_parameters: 30 | vpc: "{{ vpc_id }}" 31 | envName: "{{ env_name }}" 32 | applicationSubnet: "{{ application_subnet }}" 33 | instanceType: "{{ analytics_instance_type }}" 34 | keyName: "{{ key_name }}" 35 | register: analytics_deploy_results 36 | 37 | # Persist data to disk for use across plays 38 | - copy: content="{{ analytics_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 39 | - copy: content="{{ analytics_deploy_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.json 40 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_apphost_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/apphost.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | # We could have used the ec2 module here from ansible. It works well when 15 | # deploying "generic" compute resources but we'll use cloudformation 16 | # instead for consistency. 17 | - name: Launch App hosts 18 | action: cloudformation 19 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 20 | state=present 21 | region="{{ region }}" 22 | template={{ install_path }}/roles/infra/files/apphost.json 23 | args: 24 | tags: 25 | envName: "{{ env_name }}" 26 | host: "{{ inventory_hostname }}" 27 | instanceType: "ApplicationHost" 28 | Name: "{{ inventory_hostname }}-{{env_name}}" 29 | template_parameters: 30 | envName: "{{ env_name }}" 31 | vpc: "{{ vpc_id }}" 32 | applicationSubnet: "{{ application_subnet }}" 33 | instanceType: "{{ apphost_instance_type }}" 34 | keyName: "{{ key_name }}" 35 | register: apphost_deploy_results 36 | 37 | # Persist data to disk for use across plays 38 | - copy: content="{{ apphost_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 39 | - copy: content="{{ apphost_deploy_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.json 40 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_az.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/az.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | - name: Creating subnets in all availability zones 15 | cloudformation: 16 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 17 | state=present 18 | region="{{ region }}" 19 | template={{ install_path }}/roles/infra/files/az.json 20 | args: 21 | tags: 22 | envName: "{{ env_name }}" 23 | inventoryHostname: "{{ inventory_hostname }}" 24 | template_parameters: 25 | envName: "{{ env_name }}" 26 | vpc: "{{ vpc_id }}" 27 | internetGateway: "{{ internet_gateway_id }}" 28 | availabilityZone: "{{ availability_zone }}" 29 | managementSubnetCidr: "{{ management_cidr }}" 30 | privateSubnetCidr: "{{ private_cidr }}" 31 | publicSubnetCidr: "{{ public_cidr }}" 32 | applicationSubnetCidr: "{{ application_cidr }}" 33 | register: az_deploy_results 34 | 35 | - name: Persisting variable data 36 | template: src='host-managers.j2' dest=~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/{{zone_id}}-az-managers 37 | 38 | - copy: content="{{ az_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 39 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_az_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/az.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | - name: Creating subnets in all availability zones 15 | cloudformation: 16 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 17 | state=present 18 | region="{{ region }}" 19 | template={{ install_path }}/roles/infra/files/az.json 20 | args: 21 | tags: 22 | envName: "{{ env_name }}" 23 | inventoryHostname: "{{ inventory_hostname }}" 24 | template_parameters: 25 | envName: "{{ env_name }}" 26 | vpc: "{{ vpc_id }}" 27 | internetGateway: "{{ internet_gateway_id }}" 28 | availabilityZone: "{{ availability_zone }}" 29 | managementSubnetCidr: "{{ management_cidr }}" 30 | privateSubnetCidr: "{{ private_cidr }}" 31 | publicSubnetCidr: "{{ public_cidr }}" 32 | applicationSubnetCidr: "{{ application_cidr }}" 33 | register: az_deploy_results 34 | 35 | - name: Persisting variable data 36 | template: src='az.j2' dest=~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/{{zone_id}} 37 | 38 | - copy: content="{{ az_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 39 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_bigip_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: delete stacks in 'rollback_complete' from previous executions 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/bigip.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | # Run python script to find out which AMI we should use based on user inputs 15 | # This is easier than the ugly mapping sections in CloudFormation templates 16 | # with high cardinality 17 | - shell: "python {{ install_path }}/bin/get_bigip_image_id.py --region {{ region }} --version {{ version }} --license {{ license_model }} --package {{ license_package }} --throughput {{ license_throughput }} --matchone" 18 | register: output 19 | 20 | - name: Launching a big-ip from CFT 21 | action: cloudformation 22 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 23 | state=present 24 | region="{{ region }}" 25 | template={{ install_path }}/roles/infra/files/bigip.json 26 | args: 27 | tags: 28 | envName: "{{ env_name }}" 29 | host: "{{ inventory_hostname }}" 30 | instanceType: "BIG-IP" 31 | Name: "{{ inventory_hostname }}-{{env_name}}" 32 | template_parameters: 33 | envName: "{{ env_name }}" 34 | region: "{{ region }}" 35 | vpc: "{{ vpc_id }}" 36 | availabilityZone: "{{ availability_zone }}" 37 | managementSubnet: "{{ management_subnet }}" 38 | privateSubnet: "{{ private_subnet }}" 39 | publicSubnet: "{{ public_subnet }}" 40 | instanceType: "{{ instance_type }}" 41 | amiId: "{{ output['stdout'] }}" 42 | keyName: "{{ key_name }}" 43 | register: bigip_deploy_results 44 | 45 | # Persist data to disk for use across plays 46 | - name: Persist variable data 47 | copy: content="{{ bigip_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 48 | 49 | - copy: content="{{ bigip_deploy_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.json 50 | 51 | 52 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_client_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/client.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | # We could have used the ec2 module here from ansible. It works well when 15 | # deploying "generic" compute resources but we'll use cloudformation 16 | # instead for consistency. 17 | - name: Launch client hosts 18 | action: cloudformation 19 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 20 | state=present 21 | region="{{ region }}" 22 | template={{ install_path }}/roles/infra/files/client.json 23 | args: 24 | tags: 25 | EnvName: "{{ env_name }}" 26 | Host: "{{ inventory_hostname }}" 27 | InstanceType: "Client" 28 | Name: "{{ inventory_hostname }}-{{env_name}}" 29 | template_parameters: 30 | envName: "{{ env_name }}" 31 | vpc: "{{ vpc_id }}" 32 | clientSubnet: "{{ public_subnet }}" 33 | instanceType: "{{ client_instance_type }}" 34 | keyName: "{{ key_name }}" 35 | 36 | register: client_deploy_results 37 | 38 | #- name: debug client_deploy_results var 39 | # debug: var=client_deploy_results 40 | 41 | # Persist data to disk for use across plays 42 | - copy: content="{{ client_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 43 | - copy: content="{{ client_deploy_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.json 44 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_clienthost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/client.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | # We could have used the ec2 module here from ansible. It works well when 15 | # deploying "generic" compute resources but we'll use cloudformation 16 | # instead for consistency. 17 | - name: Launch client hosts 18 | action: cloudformation 19 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 20 | state=present 21 | region="{{ region }}" 22 | template={{ install_path }}/roles/infra/files/client.json 23 | args: 24 | tags: 25 | EnvName: "{{ env_name }}" 26 | Host: "{{ inventory_hostname }}" 27 | InstanceType: "ClientHost" 28 | template_parameters: 29 | envName: "{{ env_name }}" 30 | vpc: "{{ vpc_id }}" 31 | clientSubnet: "{{ public_subnet }}" 32 | instanceType: "{{ clienthost_instance_type }}" 33 | keyName: "{{ key_name }}" 34 | 35 | register: clienthost_deploy_results 36 | 37 | #- name: debug clienthost_deploy_results var 38 | # debug: var=clienthost_deploy_results 39 | 40 | # Persist data to disk for use across plays 41 | - copy: content="{{ clienthost_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 42 | - copy: content="{{ clienthost_deploy_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.json 43 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_eip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create EIP for a Private IP 3 | cloudformation: 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}-eip-{{vip_id}}" 5 | state=present 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/eip.json 8 | args: 9 | template_parameters: 10 | vpc: "{{ vpc_id }}" 11 | NetworkInterfaceId: "{{ stack_outputs['ExternalInterfaceId'] }}" 12 | #CFT not able to OUTPUT ExternalSecondaryIps attribute as not string 13 | #PrivateIPAddress: "{{ stack_outputs["ExternalSecondaryIps"][vip_id] }}" 14 | #test first with hardcoded index 15 | PrivateIpAddress: "{{ stack_outputs[vip_id] }}" 16 | register: eip_results 17 | 18 | - name: create a file in which to persist results 19 | copy: content="{{ eip_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-eip-{{owner}}.yml 20 | - copy: content="{{ eip_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-vip-{{vip_id}}.json 21 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_eip_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create EIP for a Private IP 3 | cloudformation: 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}-vip-{{vip_id}}" 5 | state=present 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/eip.json 8 | args: 9 | template_parameters: 10 | envName: "{{ env_name }}" 11 | vpc: "{{ vpc_id }}" 12 | NetworkInterfaceId: "{{ stack_outputs['ExternalInterfaceId'] }}" 13 | #CFT not able to OUTPUT ExternalSecondaryIps attribute as not string 14 | #PrivateIPAddress: "{{ stack_outputs["ExternalSecondaryIps"][vip_id] }}" 15 | #test first with hardcoded index 16 | PrivateIpAddress: "{{ stack_outputs[vip_id] }}" 17 | register: eip_results 18 | 19 | #- debug: var=eip_results 20 | 21 | - name: create a file in which to persist results 22 | copy: content="{{ eip_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-vip-{{vip_id}}.yml 23 | - copy: content="{{ eip_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-vip-{{vip_id}}.json 24 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_gtm_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/gtm.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | # Run python script to find out which AMI we should use based on user inputs 15 | # This is easier than the ugly mapping sections in CloudFormation templates 16 | # with high cardinality 17 | - shell: "python {{ install_path }}/bin/get_bigip_image_id.py --region {{ region }} --version {{ version }} --license {{ license_model }} --package {{ license_package }} --throughput {{ license_throughput }} --matchone" 18 | register: output 19 | 20 | - name: Launching a gtm in each AZ 21 | action: cloudformation 22 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 23 | state=present 24 | region="{{ region }}" 25 | template={{ install_path }}/roles/infra/files/gtm.json 26 | args: 27 | tags: 28 | envName: "{{ env_name }}" 29 | host: "{{ inventory_hostname }}" 30 | instanceType: "GTM" 31 | Name: "{{ inventory_hostname }}-{{env_name}}" 32 | template_parameters: 33 | envName: "{{ env_name }}" 34 | region: "{{ region }}" 35 | vpc: "{{ vpc_id }}" 36 | availabilityZone: "{{ availability_zone }}" 37 | managementSubnet: "{{ management_subnet }}" 38 | publicSubnet: "{{ public_subnet }}" 39 | instanceType: "{{ instance_type }}" 40 | amiId: "{{ output['stdout'] }}" #us-east-1. eventually we'll want to leverage the mappings in cft 41 | keyName: "{{ key_name }}" 42 | register: gtm_deploy_results 43 | 44 | - name: Persisting variable data 45 | copy: content="{{ gtm_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 46 | - copy: content="{{ gtm_deploy_results['stack_outputs'] | to_json }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.json 47 | 48 | 49 | -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_vpc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/vpc.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | - name: Creating Vpc 15 | cloudformation: 16 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 17 | state=present 18 | region="{{ region }}" 19 | template={{ install_path }}/roles/infra/files/vpc.json 20 | args: 21 | tags: 22 | envName: "{{ env_name }}" 23 | inventoryHostname: "{{ inventory_hostname }}" 24 | template_parameters: 25 | envName: "{{ env_name }}" 26 | cidrBlock: "{{ vpc_cidr }}" 27 | register: vpc_deploy_results 28 | 29 | - name: Persisting variable data 30 | template: src='managers.j2' dest='~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/managers' 31 | 32 | - copy: content="{{ vpc_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml -------------------------------------------------------------------------------- /roles/infra/tasks/deploy_vpc_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determining state of deployed cloudformation stacks 3 | shell: "python {{ install_path }}/bin/get_cfn_stack_state.py {{ region }} {{ env_name }}-{{ inventory_hostname }}" 4 | register: initial_cfn_state 5 | 6 | - name: Teardown previous stack if necessary 7 | cloudformation: 8 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 9 | state=absent 10 | region="{{ region }}" 11 | template={{ install_path }}/roles/infra/files/vpc.json 12 | when: "'{{ initial_cfn_state['stdout'] }}' == 'ROLLBACK_COMPLETE'" 13 | 14 | - name: Creating Vpc 15 | cloudformation: 16 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 17 | state=present 18 | region="{{ region }}" 19 | template={{ install_path }}/roles/infra/files/vpc.json 20 | args: 21 | tags: 22 | envName: "{{ env_name }}" 23 | inventoryHostname: "{{ inventory_hostname }}" 24 | template_parameters: 25 | envName: "{{ env_name }}" 26 | cidrBlock: "{{ vpc_cidr }}" 27 | register: vpc_deploy_results 28 | 29 | - name: Persisting variable data 30 | template: src='vpc.j2' dest='~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/vpc' 31 | 32 | - copy: content="{{ vpc_deploy_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 33 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_analyticshost_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting analytics hosts cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/analyticshost.json 8 | register: analytics_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ analytics_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_apphost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting app hosts cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/apphost.json 8 | register: apphost_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ apphost_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_apphost_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting app hosts cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/apphost.json 8 | register: apphost_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ apphost_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_az.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting availability zone cfn stack(s) 3 | cloudformation: 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/az.json 8 | register: az_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ az_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | 13 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_az_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting availability zone cfn stack(s) 3 | cloudformation: 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/az.json 8 | register: az_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ az_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | 13 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_bigip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting BIG-IP cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/bigip.json 8 | register: bigip_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ bigip_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | 13 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_bigip_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting BIG-IP cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/bigip.json 8 | register: bigip_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ bigip_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | 13 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting clienthost cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/client.json 8 | register: client_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ client_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_client_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting client cfn stack(s) 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/client.json 8 | register: client_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ client_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_eip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete EIP for a Private IP 3 | cloudformation: 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}-vip-{{vip_id}}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/eip.json 8 | register: eip_teardown_results 9 | ignore_errors: yes 10 | 11 | #- debug: var=eip_teardown_results 12 | 13 | - name: create a file in which to persist results 14 | copy: content="{{ eip_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-vip-{{vip_id}}.yml 15 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_eip_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete EIP for a Private IP 3 | cloudformation: 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}-vip-{{vip_id}}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/eip.json 8 | register: eip_teardown_results 9 | ignore_errors: yes 10 | 11 | #- debug: var=eip_teardown_results 12 | 13 | - name: create a file in which to persist results 14 | copy: content="{{ eip_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}-vip-{{vip_id}}.yml 15 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_gtm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Teardown BIG-IP GTM cfn stack(s) 4 | action: cloudformation 5 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 6 | state=absent 7 | region="{{ region }}" 8 | template={{ install_path }}/roles/infra/files/gtm.json 9 | register: gtm_teardown_results 10 | ignore_errors: yes 11 | - copy: content="{{ gtm_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | 13 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_gtm_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Teardown BIG-IP GTM cfn stack(s) 4 | action: cloudformation 5 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 6 | state=absent 7 | region="{{ region }}" 8 | template={{ install_path }}/roles/infra/files/gtm.json 9 | register: gtm_teardown_results 10 | ignore_errors: yes 11 | 12 | - copy: content="{{ gtm_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 13 | 14 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_vpc.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting VPC cfn stack 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/vpc.json 8 | register: vpc_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ vpc_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/tasks/teardown_vpc_cft.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deleting VPC cfn stack 3 | action: cloudformation 4 | stack_name="{{ env_name }}-{{ inventory_hostname }}" 5 | state=absent 6 | region="{{ region }}" 7 | template={{ install_path }}/roles/infra/files/vpc.json 8 | register: vpc_teardown_results 9 | ignore_errors: yes 10 | 11 | - copy: content="{{ vpc_teardown_results | to_yaml }}" dest=~/vars/f5aws/env/{{ env_name }}/{{ inventory_hostname }}.yml 12 | -------------------------------------------------------------------------------- /roles/infra/templates/az.j2: -------------------------------------------------------------------------------- 1 | --- 2 | management_subnet: "{{ az_deploy_results['stack_outputs']['ManagementSubnet'] }}" 3 | private_subnet: "{{ az_deploy_results['stack_outputs']['PrivateSubnet'] }}" 4 | public_subnet: "{{ az_deploy_results['stack_outputs']['PublicSubnet'] }}" 5 | application_subnet: "{{ az_deploy_results['stack_outputs']['ApplicationSubnet'] }}" 6 | -------------------------------------------------------------------------------- /roles/infra/templates/host-managers.j2: -------------------------------------------------------------------------------- 1 | --- 2 | management_subnet: "{{ az_deploy_results['stack_outputs']['ManagementSubnet'] }}" 3 | private_subnet: "{{ az_deploy_results['stack_outputs']['PrivateSubnet'] }}" 4 | public_subnet: "{{ az_deploy_results['stack_outputs']['PublicSubnet'] }}" 5 | application_subnet: "{{ az_deploy_results['stack_outputs']['ApplicationSubnet'] }}" 6 | -------------------------------------------------------------------------------- /roles/infra/templates/managers.j2: -------------------------------------------------------------------------------- 1 | --- 2 | vpc_id: "{{ vpc_deploy_results['stack_outputs']['vpcId'] }}" 3 | internet_gateway_id: "{{ vpc_deploy_results['stack_outputs']['internetGatewayId'] }}" -------------------------------------------------------------------------------- /roles/infra/templates/vpc.j2: -------------------------------------------------------------------------------- 1 | --- 2 | vpc_id: "{{ vpc_deploy_results['stack_outputs']['vpcId'] }}" 3 | internet_gateway_id: "{{ vpc_deploy_results['stack_outputs']['internetGatewayId'] }}" 4 | -------------------------------------------------------------------------------- /roles/inventory_manager/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This file contains default variables for nearly 3 | # everything that can be configured by a user. 4 | # We expect that many of these will be used as is. 5 | # The resolution priority of anisible variables is 6 | # such that the vars in this file are the last to 7 | # be used. Thus, any configuration provided 8 | # via the command line overrides the below. 9 | 10 | # deployment 11 | deployment_model: "single-standalone" 12 | deployment_type: "lb_only" 13 | deploy_analytics: "false" 14 | region: us-east-1 15 | 16 | # networking 17 | vpc_cidr_prefix: "172.16" 18 | apphosts_per_zone: 1 19 | 20 | # default AMI is F5 BIG-IP Virtual Edition 25 Mbps - Better 21 | # To accept EULA, go to: 22 | # https://aws.amazon.com/marketplace/pp/B00JL3Q2VI 23 | 24 | bigip_version: "12.1" 25 | bigip_license_throughput: "25mbps" 26 | bigip_license_model: "hourly" 27 | bigip_license_package: "better" 28 | bigip_instance_type: "m3.xlarge" 29 | bigip_modules: 30 | - ltm 31 | - avr 32 | 33 | gtm_version: "12.1" 34 | gtm_license_throughput: "25mbps" 35 | gtm_license_model: "hourly" 36 | gtm_license_package: "better" 37 | gtm_instance_type: "m3.xlarge" 38 | gtm_modules: 39 | - avr 40 | - gtm 41 | 42 | # default AMI is Ubuntu 14.04 LTS 43 | # To accept EULA, go to: 44 | # https://aws.amazon.com/marketplace/pp/B00JV9JBDS 45 | # app deployment 46 | # have to use an m3 instance type, because it works with paravirtualized instances 47 | client_instance_type: "m3.medium" 48 | 49 | # default AMI is ECS-Optimized AMI (2015.03.g) 50 | # To accept EULA, go to: 51 | # https://aws.amazon.com/marketplace/pp/B00U6QTYI2 52 | # have to use an m3 instance type, because it works with paravirtualized instances 53 | apphost_instance_type: "m3.medium" 54 | containers_per_app_host: 1 55 | image_id: "mutzel/all-in-one-hackazon:postinstall2" 56 | 57 | # splunk 58 | analytics_instance_type: "c3.large" 59 | 60 | standalone_per_zone: 61 | clustering: none 62 | bigips_per_zone: 1 63 | gtms_per_zone: 1 64 | 65 | cluster_per_zone: 66 | clustering: same-az 67 | bigips_per_zone: 2 68 | gtms_per_zone: 1 69 | 70 | 71 | -------------------------------------------------------------------------------- /roles/inventory_manager/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # if we are deploying a WAF as part of our BIG-IP config, 3 | # we need the BEST license and asm to be provisioned after boot 4 | - include: provision_inventory.yml 5 | vars: 6 | bigip_license_package: best 7 | bigip_modules: 8 | - avr 9 | - asm 10 | - ltm 11 | when: deployment_type.lower() == "lb_and_waf" 12 | 13 | - include: provision_inventory.yml 14 | when: deployment_type.lower() == "lb_only" 15 | 16 | - include: provision_analytics.yml 17 | when: deploy_analytics.lower() == "true" -------------------------------------------------------------------------------- /roles/inventory_manager/tasks/provision_analytics.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - template: src="group_vars_analyticshosts.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/analyticshosts" -------------------------------------------------------------------------------- /roles/inventory_manager/tasks/provision_inventory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Create the directory structure for this deployment 3 | - file: path="~/vars/f5aws/env/{{ env_name }}" state="directory" 4 | - file: path="~/vars/f5aws/env/{{ env_name }}/inventory" state="directory" 5 | - file: path="~/vars/f5aws/env/{{ env_name }}/inventory/host_vars" state="directory" 6 | - file: path="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars" state="directory" 7 | 8 | # Creating group_vars files for 'localhosts' 9 | 10 | - file: path="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/apphosts" state="touch" 11 | - template: src="group_vars_localhosts.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/localhosts" 12 | 13 | # Adding group vars for dynamic groups that will be provisioned 14 | - template: src="group_vars_clienthosts.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/clienthosts" 15 | - template: src="group_vars_apphosts.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/apphosts" 16 | - template: src="group_vars_bigips.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/bigips" 17 | - template: src="group_vars_gtms.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/group_vars/gtms" 18 | 19 | # Adding hosts file from template 20 | - template: src="hosts-{{ deployment_model }}.j2" dest="~/vars/f5aws/env/{{ env_name }}/inventory/hosts" 21 | -------------------------------------------------------------------------------- /roles/inventory_manager/tasks/teardown.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # make a directory for this new deployment 3 | - file: path="~/vars/f5aws/env/{{ env_name }}" state="absent" -------------------------------------------------------------------------------- /roles/inventory_manager/templates/group_vars_analyticshosts.j2: -------------------------------------------------------------------------------- 1 | ansible_ssh_user: ec2-user 2 | ansible_ssh_private_key_file: {{ ssh_key }} 3 | analytics_instance_type: {{ analytics_instance_type }} 4 | -------------------------------------------------------------------------------- /roles/inventory_manager/templates/group_vars_apphosts.j2: -------------------------------------------------------------------------------- 1 | ansible_ssh_user: ubuntu 2 | ansible_ssh_private_key_file: {{ ssh_key }} 3 | apphost_instance_type: {{ apphost_instance_type }} -------------------------------------------------------------------------------- /roles/inventory_manager/templates/group_vars_bigips.j2: -------------------------------------------------------------------------------- 1 | # ansible group vars for all bigips which are provisioned 2 | # note: need m3.2xlarge to provision AVR 3 | ansible_ssh_user: admin 4 | version: {{ bigip_version }} 5 | license_throughput: {{ bigip_license_throughput }} 6 | license_model: {{ bigip_license_model }} 7 | license_package: {{ bigip_license_package }} 8 | instance_type: {{ bigip_instance_type }} 9 | modules: {{ bigip_modules }} 10 | ansible_ssh_private_key_file: {{ ssh_key }} -------------------------------------------------------------------------------- /roles/inventory_manager/templates/group_vars_clienthosts.j2: -------------------------------------------------------------------------------- 1 | # ansible group vars for all client hosts which are provisioned 2 | ansible_ssh_user: ubuntu 3 | ansible_ssh_private_key_file: {{ ssh_key }} 4 | client_instance_type: {{ client_instance_type }} -------------------------------------------------------------------------------- /roles/inventory_manager/templates/group_vars_gtms.j2: -------------------------------------------------------------------------------- 1 | # ansible group vars for all gtm bigips which are provisioned 2 | ansible_ssh_user: admin 3 | version: {{ gtm_version }} 4 | license_throughput: {{ gtm_license_throughput }} 5 | license_model: {{ gtm_license_model }} 6 | license_package: {{ gtm_license_package }} 7 | instance_type: {{ gtm_instance_type }} 8 | modules: 9 | {% for item in gtm_modules %} 10 | - {{ item }} 11 | {% endfor %} 12 | ansible_ssh_private_key_file: {{ ssh_key }} -------------------------------------------------------------------------------- /roles/inventory_manager/templates/group_vars_localhosts.j2: -------------------------------------------------------------------------------- 1 | --- 2 | ansible_connection: local 3 | ansible_python_interpreter: /usr/bin/env python 4 | 5 | -------------------------------------------------------------------------------- /roles/inventory_manager/templates/hosts-cluster-per-zone.j2: -------------------------------------------------------------------------------- 1 | ; ansible "hosts" inventory for {{ env_name }} 2 | ; This is an auto-generated file for this deployment. 3 | 4 | ; hosts and groups 5 | 6 | vpc-manager 7 | 8 | {% for zone in zones %} 9 | [zone{{ loop.index }}] 10 | zone{{ loop.index }}-az 11 | zone{{ loop.index }}-clienthost1 12 | zone{{ loop.index }}-apphost1 13 | zone{{ loop.index }}-gtm1 14 | {% set outer_loop = loop %} 15 | {% for bigipnum in range(1, cluster_per_zone['bigips_per_zone'] + 1) %} 16 | zone{{ outer_loop.index }}-bigip{{ loop.index }} 17 | {% endfor %} 18 | {% if deploy_analytics.lower() == "true" %} 19 | zone{{ loop.index }}-analyticshost1 20 | {% endif %} 21 | {% endfor %} 22 | 23 | [vpc] 24 | 25 | [vpc:children] 26 | {% for zone in zones %} 27 | zone{{ loop.index }} 28 | {% endfor %} 29 | 30 | [azs] 31 | {% for zone in zones %} 32 | zone{{ loop.index }}-az 33 | {% endfor %} 34 | 35 | [apphosts] 36 | {% for zone in zones %} 37 | {% set outer_loop = loop %} 38 | {% for apphostnum in range(1, apphosts_per_zone + 1) %} 39 | zone{{ outer_loop.index }}-apphost{{ loop.index }} 40 | {% endfor %} 41 | {% endfor %} 42 | 43 | [bigips] 44 | {% for zone in zones %} 45 | {% set outer_loop = loop %} 46 | {% for bigipnum in range(1, cluster_per_zone['bigips_per_zone'] + 1) %} 47 | zone{{ outer_loop.index }}-bigip{{ loop.index }} 48 | {% endfor %} 49 | {% endfor %} 50 | 51 | [gtms] 52 | {% for zone in zones %} 53 | {% set outer_loop = loop %} 54 | {% for gtmnum in range(1, cluster_per_zone['gtms_per_zone'] + 1) %} 55 | zone{{ outer_loop.index }}-gtm{{ loop.index }} 56 | {% endfor %} 57 | {% endfor %} 58 | 59 | [clienthosts] 60 | zone1-clienthost1 61 | 62 | {% if deploy_analytics.lower() == "true" %} 63 | [analyticshosts] 64 | zone1-analyticshost1 65 | {% endif %} 66 | 67 | {% for zone in zones %} 68 | {% set outer_loop = loop %} 69 | [zone{{ outer_loop.index }}-bigip-cluster] 70 | {% for bigipnum in range(1, cluster_per_zone['bigips_per_zone'] + 1) %} 71 | zone{{ outer_loop.index }}-bigip{{ loop.index }} 72 | {% endfor %} 73 | 74 | {% endfor %} 75 | 76 | 77 | [bigip-clusters] 78 | {% for zone in zones %} 79 | zone{{ loop.index }}-bigip-cluster 80 | {% endfor %} 81 | 82 | 83 | [bigip-cluster-seeds] 84 | {% for zone in zones %} 85 | zone{{ loop.index }}-bigip1 86 | {% endfor %} 87 | 88 | 89 | ; group vars 90 | 91 | [all:vars] 92 | env_name={{ env_name }} 93 | image_id={{ image_id }} 94 | container_cnt={{ containers_per_app_host }} 95 | deployment_model={{ deployment_model.lower() }} 96 | deployment_type={{ deployment_type.lower() }} 97 | deploy_analytics={{ deploy_analytics.lower()}} 98 | region={{ region.lower() }} 99 | vpc_cidr={{ vpc_cidr_prefix }}.0.0/16 100 | key_name={{ ssh_key_name }} 101 | 102 | {% for zone in zones %} 103 | [zone{{ loop.index }}:vars] 104 | zone_id=zone{{ loop.index }} 105 | availability_zone={{ zone }} 106 | zone_cidr_prefix="{{ vpc_cidr_prefix }}.{{ loop.index }}" 107 | management_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}1.0/24" 108 | private_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}2.0/24" 109 | public_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}3.0/24" 110 | application_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}4.0/24" 111 | 112 | {% endfor %} 113 | 114 | [localhost:vars] 115 | ansible_connection=local 116 | ansible_python_interpreter="/usr/bin/env python" 117 | -------------------------------------------------------------------------------- /roles/inventory_manager/templates/hosts-single-cluster.j2: -------------------------------------------------------------------------------- 1 | ; ansible "hosts" inventory for {{ env_name }} 2 | ; This is an auto-generated file for this deployment. 3 | 4 | ; hosts and groups 5 | 6 | vpc-manager 7 | 8 | 9 | [zone1] 10 | zone1-az 11 | zone1-apphost1 12 | zone1-bigip1 13 | zone1-bigip2 14 | {% if deploy_analytics.lower() == "true" %} 15 | zone1-analyticshost1 16 | {% endif %} 17 | 18 | [vpc] 19 | 20 | [vpc:children] 21 | zone1 22 | 23 | [azs] 24 | zone1-az 25 | 26 | [apphosts] 27 | zone1-apphost1 28 | 29 | [bigips] 30 | zone1-bigip1 31 | zone1-bigip2 32 | 33 | {% if deploy_analytics.lower() == "true" %} 34 | [analyticshosts] 35 | zone1-analyticshost1 36 | {% endif %} 37 | 38 | [zone1-bigip-cluster] 39 | zone1-bigip1 40 | zone1-bigip2 41 | 42 | [bigip-clusters] 43 | zone1-bigip-cluster 44 | 45 | [bigip-cluster-seeds] 46 | zone1-bigip1 47 | 48 | ; group vars 49 | 50 | [all:vars] 51 | env_name={{ env_name }} 52 | image_id={{ image_id }} 53 | container_cnt={{ containers_per_app_host }} 54 | deployment_model={{ deployment_model.lower() }} 55 | deployment_type={{ deployment_type.lower() }} 56 | deploy_analytics={{ deploy_analytics.lower() }} 57 | region={{ region.lower() }} 58 | vpc_cidr={{ vpc_cidr_prefix }}.0.0/16 59 | key_name={{ ssh_key_name }} 60 | 61 | [zone1:vars] 62 | zone_id=zone1 63 | availability_zone="{{ zone }}" 64 | zone_cidr_prefix="{{ vpc_cidr_prefix }}.1" 65 | management_cidr="{{ vpc_cidr_prefix }}.11.0/24" 66 | private_cidr="{{ vpc_cidr_prefix }}.12.0/24" 67 | public_cidr="{{ vpc_cidr_prefix }}.13.0/24" 68 | application_cidr="{{ vpc_cidr_prefix }}.14.0/24" 69 | 70 | 71 | 72 | [localhost:vars] 73 | ansible_connection=local 74 | ansible_python_interpreter="/usr/bin/env python" 75 | -------------------------------------------------------------------------------- /roles/inventory_manager/templates/hosts-single-standalone.j2: -------------------------------------------------------------------------------- 1 | ; ansible "hosts" inventory for {{ env_name }} 2 | ; This is an auto-generated file for this deployment. 3 | 4 | ; hosts and groups 5 | 6 | vpc-manager 7 | 8 | 9 | [zone1] 10 | zone1-az 11 | zone1-apphost1 12 | zone1-bigip1 13 | {% if deploy_analytics.lower() == "true" %} 14 | zone1-analyticshost1 15 | {% endif %} 16 | 17 | [vpc] 18 | 19 | [vpc:children] 20 | zone1 21 | 22 | [azs] 23 | zone1-az 24 | 25 | [apphosts] 26 | zone1-apphost1 27 | 28 | [bigips] 29 | zone1-bigip1 30 | 31 | {% if deploy_analytics.lower() == "true" %} 32 | [analyticshosts] 33 | zone1-analyticshost1 34 | {% endif %} 35 | 36 | 37 | [zone1-bigip-cluster] 38 | zone1-bigip1 39 | 40 | [bigip-clusters] 41 | zone1-bigip-cluster 42 | 43 | [bigip-cluster-seeds] 44 | zone1-bigip1 45 | 46 | 47 | ; group vars 48 | 49 | [all:vars] 50 | env_name={{ env_name }} 51 | image_id={{ image_id }} 52 | container_cnt={{ containers_per_app_host }} 53 | deployment_model={{ deployment_model.lower() }} 54 | deployment_type={{ deployment_type.lower() }} 55 | deploy_analytics={{ deploy_analytics.lower() }} 56 | region={{ region.lower() }} 57 | vpc_cidr={{ vpc_cidr_prefix }}.0.0/16 58 | key_name={{ ssh_key_name }} 59 | 60 | [zone1:vars] 61 | zone_id=zone1 62 | availability_zone="{{ zone }}" 63 | zone_cidr_prefix="{{ vpc_cidr_prefix }}.1" 64 | management_cidr="{{ vpc_cidr_prefix }}.11.0/24" 65 | private_cidr="{{ vpc_cidr_prefix }}.12.0/24" 66 | public_cidr="{{ vpc_cidr_prefix }}.13.0/24" 67 | application_cidr="{{ vpc_cidr_prefix }}.14.0/24" 68 | 69 | 70 | [localhost:vars] 71 | ansible_connection=local 72 | ansible_python_interpreter="/usr/bin/env python" 73 | -------------------------------------------------------------------------------- /roles/inventory_manager/templates/hosts-standalone-per-zone.j2: -------------------------------------------------------------------------------- 1 | ; ansible "hosts" inventory for {{ env_name }} 2 | ; This is an auto-generated file for this deployment. 3 | 4 | ; hosts and groups 5 | 6 | vpc-manager 7 | 8 | {% for zone in zones %} 9 | [zone{{ loop.index }}] 10 | zone{{ loop.index }}-az 11 | zone{{ loop.index }}-clienthost1 12 | zone{{ loop.index }}-apphost1 13 | zone{{ loop.index }}-bigip1 14 | zone{{ loop.index }}-gtm1 15 | {% if deploy_analytics.lower() == "true" %} 16 | zone{{ loop.index }}-analyticshost1 17 | {% endif %} 18 | 19 | {% endfor %} 20 | 21 | [vpc] 22 | 23 | [vpc:children] 24 | {% for zone in zones %} 25 | zone{{ loop.index }} 26 | {% endfor %} 27 | 28 | [azs] 29 | {% for zone in zones %} 30 | zone{{ loop.index }}-az 31 | {% endfor %} 32 | 33 | [apphosts] 34 | {% for zone in zones %} 35 | {% set outer_loop = loop %} 36 | {% for apphostnum in range(1, apphosts_per_zone + 1) %} 37 | zone{{ outer_loop.index }}-apphost{{ loop.index }} 38 | {% endfor %} 39 | {% endfor %} 40 | 41 | [bigips] 42 | {% for zone in zones %} 43 | {% set outer_loop = loop %} 44 | {% for bigipnum in range(1, standalone_per_zone['bigips_per_zone'] + 1) %} 45 | zone{{ outer_loop.index }}-bigip{{ loop.index }} 46 | {% endfor %} 47 | {% endfor %} 48 | 49 | [gtms] 50 | {% for zone in zones %} 51 | {% set outer_loop = loop %} 52 | {% for gtmnum in range(1, standalone_per_zone['gtms_per_zone'] + 1) %} 53 | zone{{ outer_loop.index }}-gtm{{ loop.index }} 54 | {% endfor %} 55 | {% endfor %} 56 | 57 | [clienthosts] 58 | zone1-clienthost1 59 | 60 | {% if deploy_analytics.lower() == "true" %} 61 | [analyticshosts] 62 | zone1-analyticshost1 63 | {% endif %} 64 | 65 | {% for zone in zones %} 66 | {% set outer_loop = loop %} 67 | [zone{{ outer_loop.index }}-bigip-cluster] 68 | {% for bigipnum in range(1, standalone_per_zone['bigips_per_zone'] + 1) %} 69 | zone{{ outer_loop.index }}-bigip{{ loop.index }} 70 | {% endfor %} 71 | 72 | {% endfor %} 73 | 74 | 75 | [bigip-clusters] 76 | {% for zone in zones %} 77 | zone{{ loop.index }}-bigip-cluster 78 | {% endfor %} 79 | 80 | 81 | [bigip-cluster-seeds] 82 | {% for zone in zones %} 83 | zone{{ loop.index }}-bigip1 84 | {% endfor %} 85 | 86 | 87 | ; group vars 88 | 89 | [all:vars] 90 | env_name={{ env_name }} 91 | image_id={{ image_id }} 92 | container_cnt={{ containers_per_app_host }} 93 | deployment_model={{ deployment_model.lower() }} 94 | deployment_type={{ deployment_type.lower() }} 95 | deploy_analytics={{ deploy_analytics.lower() }} 96 | region={{ region.lower() }} 97 | vpc_cidr={{ vpc_cidr_prefix }}.0.0/16 98 | key_name={{ ssh_key_name }} 99 | 100 | {% for zone in zones %} 101 | [zone{{ loop.index }}:vars] 102 | zone_id=zone{{ loop.index }} 103 | availability_zone={{ zone }} 104 | zone_cidr_prefix="{{ vpc_cidr_prefix }}.{{ loop.index }}" 105 | management_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}1.0/24" 106 | private_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}2.0/24" 107 | public_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}3.0/24" 108 | application_cidr="{{ vpc_cidr_prefix }}.{{ loop.index }}4.0/24" 109 | 110 | {% endfor %} 111 | 112 | [localhost:vars] 113 | ansible_connection=local 114 | ansible_python_interpreter="/usr/bin/env python" 115 | -------------------------------------------------------------------------------- /src/MANIFEST.IN: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/src/MANIFEST.IN -------------------------------------------------------------------------------- /src/README.txt: -------------------------------------------------------------------------------- 1 | This directory contains two python modules: 2 | 3 | f5_aws 4 | This module provides a CLI and python class interface to manage 5 | deployments of F5 services in AWS. 6 | 7 | service_catalog (removed, see commit 8522697d4beacdf0abea1e881abc21c25b7ed6e3) 8 | This module uses pyramid from the pylons project to run an example 9 | service catalog. This service catalog is meant to provide a nice 10 | visualization layer for the f5_aws module above. 11 | 12 | See the /docs directory in the root level of this project for more information. -------------------------------------------------------------------------------- /src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/src/__init__.py -------------------------------------------------------------------------------- /src/f5_aws/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/f5devcentral/aws-deployments/3a55af06fca11508230263207a2ed402fca3cb29/src/f5_aws/__init__.py -------------------------------------------------------------------------------- /src/f5_aws/config.py: -------------------------------------------------------------------------------- 1 | # config.py 2 | 3 | import os 4 | import yaml 5 | 6 | from configobj import ConfigObj 7 | 8 | class Config(object): 9 | def __init__(self): 10 | # our basic program variables 11 | self.config = ConfigObj('./conf/config.ini') 12 | 13 | # get user supplied variables 14 | self.config.merge(ConfigObj(os.path.expanduser(self.config['global_vars']))) 15 | 16 | # check that we got everything we need 17 | for v in self.config['required_vars']: 18 | if not v in self.config: 19 | raise Exception( 20 | 'Required variable "{}" not found in {}'.format(v, self.config['global_vars'])) 21 | 22 | self.config['vars_path'] = os.path.expanduser( 23 | '~/vars/{}'.format(self.config['prog'])) 24 | self.config['env_path'] = self.config['vars_path'] + '/env' 25 | self.config['bin_path'] = self.config['install_path'] + '/bin' 26 | 27 | # make the /env/ directory if it does not exist 28 | try: 29 | os.makedirs(self.config['env_path']) 30 | except OSError: 31 | pass -------------------------------------------------------------------------------- /src/f5_aws/exceptions.py: -------------------------------------------------------------------------------- 1 | # exceptions.py 2 | 3 | # some custom exceptions that we can catch 4 | 5 | class ValidationError(Exception): 6 | pass 7 | 8 | class ExecutionError(Exception): 9 | pass 10 | 11 | class LifecycleError(Exception): 12 | pass -------------------------------------------------------------------------------- /src/f5_aws/image_finder.py: -------------------------------------------------------------------------------- 1 | import re 2 | import boto 3 | import boto.ec2 4 | import collections 5 | 6 | class BigIpImageFinder(object): 7 | def __init__(self): 8 | pass 9 | 10 | def searchitem(self, keys, name): 11 | value = None 12 | for k in keys: 13 | match = re.search('({})'.format(k), name) 14 | if match: 15 | value = match.group(1) 16 | break 17 | return value 18 | 19 | def getImagesForRegion(self, region): 20 | """ 21 | Takes the name of an amazon region and retrieves a list of all 22 | images published by F5 for this region. 23 | Formats a return object 24 | """ 25 | 26 | #get all the images 27 | arg_s = ['aws', 'ec2', 'describe-images', 28 | '--region', region, '--filter', 29 | 'Name=name,Values=\'F5*\'', '--output=json'] 30 | 31 | conn = boto.ec2.connect_to_region(region) 32 | images = conn.get_all_images(filters={'name':'F5*'}) 33 | 34 | #dimensions 35 | packages = ['good', 'better', 'best'] 36 | throughputs = ['[0-9]+gbps', '[0-9]+mbps'] 37 | licenses = ['byol', 'hourly'] 38 | versions = [ 39 | # 11.6.0.1.0.403-hf1 40 | '[0-9]+[.][0-9]+[.][0-9]+[.][0-9]+[.][0-9]+[.][0-9]+[-hf]*[0-9]*', 41 | # 11.4.1-649.0-hf5 42 | '[0-9]+[.][0-9]+[.][0-9]+[-][0-9]+[.][0-9]+[-hf]*[0-9]*' 43 | ] 44 | 45 | structured = [] 46 | for i in images: 47 | try: 48 | image_name = i.name.lower() 49 | image_id = i.id.lower() 50 | 51 | license = self.searchitem(licenses, image_name) 52 | version = self.searchitem(versions, image_name) 53 | throughput = self.searchitem(throughputs, image_name) 54 | package = self.searchitem(packages, image_name) 55 | 56 | structured.append({ 57 | 'name': image_name, 58 | 'id': image_id, 59 | 'version': version, 60 | 'package': package, 61 | 'license': license, 62 | 'throughput': str(throughput)}) 63 | 64 | except Exception, e: 65 | print 'Failed processing image "{}". Will not be added to index. Error was {}'.format(image_name, e) 66 | 67 | return structured 68 | 69 | def find(self, **kwargs): 70 | images = self.getImagesForRegion(region=kwargs['region']) 71 | if kwargs['package'] is not None: 72 | images = [i for i in images if i['package'] == kwargs['package']] 73 | 74 | if kwargs['license'] is not None: 75 | images = [i for i in images if i['license'] == kwargs['license']] 76 | 77 | if kwargs['license'] == 'hourly' and kwargs['throughput'] is not None: 78 | images = [i for i in images if i['throughput'] == kwargs['throughput']] 79 | 80 | if kwargs['version'] is not None: 81 | images = [i for i in images if i['version'] is not None and 82 | re.match('^({})'.format(kwargs['version']), i['version'])] 83 | 84 | def byName_version(image): 85 | return image['version'] 86 | try: 87 | return sorted(images, key=byName_version, reverse=kwargs['take_newest']) 88 | except KeyError: 89 | return sorted(images, key=byName_version, reverse=True) -------------------------------------------------------------------------------- /src/f5_aws/test/README.md: -------------------------------------------------------------------------------- 1 | # README.md 2 | 3 | These tests should be run before checking code into the master branch on the public github account. You are responsible for this, as there is no CI framework for this code. 4 | 5 | Proper testing will ensure the scripts provided in are kept in working order. 6 | 7 | Run tests (from the top-level directory) via: 8 | (venv)vagrant@f5demo:/aws-deployments$ py.test ./src/f5_aws/test 9 | 10 | to run a specific test: 11 | py.test ./src/f5_aws/test/test_deployments.py 12 | 13 | 14 | # Here are, in general, what we want to cover via tests (manual or automated): 15 | 1) Test that the images are available in the regions we specify (test_images.py) 16 | 2) Test that all the CFTs we are using are valid (test_cfts.py) 17 | 3) Test a simple, single standalone deployment model. 18 | 4) Test deployment path where deploy_analytics=true and where deploy_analytics=False 19 | -when analytics is deployed, `info login` shows the URL to reach analytics host 20 | -when analytics not deployed, value for 'analyticshost' key in `info login` output is null 21 | 5) Test deployment path where deployment_type='lb_only' and where deployment_type='lb_and_waf' 22 | -when 'lb_only', better licensing package is used, only ltm and avr provisioned 23 | -when 'lb_and_waf', best licensing package is used, asm, ltm, avr provisioned, curl against VIP produces blocking page for vulnerabilities 24 | 6) Test the GTM clustering path 25 | -when multiple GTMs are deployed, what do we check? 26 | 7) Test the BIG-IP DSC clustering path (same-zone clustering) 27 | -when BIG-IP DSC is deployed, TMOS configuration objects created on one BIG-IP are replicated to the other. 28 | 8) Test when GTM deployed, WideIP is in output for `info login` 29 | -when GTM and a client host are deployed, running the `start_traffic` command should generate metrics in AVR or Splunk. The JMeter client hits a WideIP exposed by GTM. 30 | 31 | Unfortunately, these tests are expensive.... 32 | 33 | As an example, consider the costs for running test #6 34 | 4 BIG-IP Better, 25Mpbs: 35 | EC2 instance footprint (m3.xlarge): $0.266/hr x 4 36 | BIG-IP Better utility license: $0.83/hr x 4 37 | Total: $1.096/hr x 4 = $4.384 38 | 39 | 2 App hosts 40 | EC2 instance footprint (m3.medium): $0.067 x 2 41 | License: $0.0/hr 42 | Total: $0.067/hr x 2 = $0.134 43 | 44 | 1 Client host 45 | EC2 instance footprint (m3.medium): $0.067/hr 46 | License: $0.0/hr 47 | Total: $0.067 48 | 49 | 1 Enterprise Spunk host 50 | EC2 instance footprint (m3.medium): $0.105/hr 51 | License: $0.0/hr 52 | Total: $0.105 53 | 54 | Total for all for 1 hr = $4.69 55 | 56 | 57 | We ignore EBS storage costs as they are neglibable. $0.10 /GB-month 58 | -------------------------------------------------------------------------------- /src/f5_aws/test/test_cfts.py: -------------------------------------------------------------------------------- 1 | # use the boto api to test all the cfts 2 | # which will be used to deploy ec2/vpc resources 3 | 4 | 5 | import os 6 | import sys 7 | import boto 8 | import boto.cloudformation 9 | import pytest 10 | 11 | from f5_aws.config import Config 12 | 13 | config = Config().config 14 | region_for_tests = config['regions'][0] 15 | 16 | # dynamically get the cfts we want to test 17 | local_cft_path = '/roles/infra/files/' 18 | cfts = [(config['install_path'] + local_cft_path + f) for f in os.listdir(config['install_path']+local_cft_path)] 19 | 20 | # scope=module => this setup function will be run once before 21 | # executing all the test methods in this module 22 | @pytest.fixture(scope="module", params=cfts) 23 | def testenv(request): 24 | testenv = dict() 25 | testenv['cf_conn'] = boto.cloudformation.connect_to_region(region_for_tests) 26 | testenv['cft'] = request.param 27 | return testenv 28 | 29 | def test_cft(testenv): 30 | template_loc = '' + testenv['cft'] 31 | content = open(template_loc,'r').read() 32 | testenv['cf_conn'].validate_template(template_body=content) 33 | 34 | # if we got to hear, boto did not throw an exception 35 | assert True -------------------------------------------------------------------------------- /src/f5_aws/test/test_images.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import random 3 | import boto 4 | import sys 5 | import json 6 | import yaml 7 | 8 | from f5_aws.config import Config 9 | from f5_aws import image_finder, utils 10 | from utils import Region 11 | 12 | config = Config().config 13 | regions = config['regions'] 14 | #regions = ['us-west-1'] 15 | 16 | # scope=module => this setup function will be run once before 17 | # executing all the test methods in this module 18 | @pytest.fixture(scope="function", params=regions) 19 | def testenv(request): 20 | testenv = dict() 21 | 22 | testenv['region'] = Region(request.param) 23 | testenv['region'].createVpc() 24 | testenv['region'].createSubnet() 25 | testenv['region'].createKeyPair() 26 | 27 | # define a finalizer which will be run to teardown 28 | # the text fixture after all dependent tests 29 | # have been run 30 | def fin(): 31 | testenv['region'].deleteAll() 32 | request.addfinalizer(fin) 33 | 34 | return testenv 35 | 36 | 37 | # Proper size, image, and virtualization type. We should 38 | # be able to pass this test, which means we are able to launch 39 | # the image. 40 | # ...just a sanity check that our tests our working 41 | # def test_working_image_conditions(testenv): 42 | # assert utils.touchImage( 43 | # region=testenv['region'].region_name, 44 | # keyName=testenv['region'].key_name, 45 | # subnetId=testenv['region'].subnet_id, 46 | # vpcId=testenv['region'].vpc_id, 47 | # imageId='ami-4c7a3924', 48 | # instanceType='t1.micro', 49 | # ) 50 | 51 | # # this image won't launch because of a mismatch between 52 | # # the image virtualization type (hvm) and the ami definition 53 | # # ...just a sanity check that our tests our working 54 | # def test_broken_image_conditions(testenv): 55 | # assert not utils.touchImage( 56 | # region=testenv['region'].region_name, 57 | # keyName=testenv['region'].key_name, 58 | # subnetId=testenv['region'].subnet_id, 59 | # vpcId=testenv['region'].vpc_id, 60 | # imageId='ami-767a391e', 61 | # instanceType='t1.micro') 62 | 63 | def validate_linux_image(testenv, host_type): 64 | """ 65 | Here we read from our defaults settings files and playbook 66 | definitions to get the instance type and ami ids that we 67 | will launch. Its possible that a user chooses a different 68 | instance type, in which case this test will be meaningless, 69 | but it is okay for most usage scenarios. 70 | """ 71 | 72 | # get the ami id for this type of host working region 73 | cft = json.loads(open(config['install_path']+ 74 | '/roles/infra/files/'+host_type+'.json', 'r').read()) 75 | image_id = cft['Mappings']['AWSRegionArch2AMI'][testenv['region'].region_name]['AMI'] 76 | 77 | # get the default instance type for this type of host 78 | defaults = yaml.load(open(config['install_path']+ 79 | '/roles/inventory_manager/defaults/main.yml')) 80 | instance_type = defaults[host_type+'_instance_type'] 81 | 82 | return utils.touchImage( 83 | imageId=image_id, 84 | instanceType=instance_type, 85 | region=testenv['region'].region_name, 86 | vpcId=testenv['region'].vpc_id, 87 | keyName=testenv['region'].key_name, 88 | subnetId=testenv['region'].subnet_id) 89 | 90 | def validate_bigip_image(testenv, host_type): 91 | """ 92 | Similar to validate_linux_image, but here we are 93 | specifically testing the ability to launch BIG-IP images. 94 | """ 95 | # we can get all information from the defaults file, assume the user will use these 96 | defaults = yaml.load(open(config['install_path']+ 97 | '/roles/inventory_manager/defaults/main.yml')) 98 | 99 | # get the ami id 100 | module_args = { 101 | 'instance_type': defaults[host_type+'_instance_type'], 102 | 'throughput': defaults[host_type+'_license_throughput'], 103 | 'package': defaults[host_type+'_license_package'], 104 | 'license': defaults[host_type+'_license_model'], 105 | 'version': defaults[host_type+'_version'], 106 | 'region': testenv['region'].region_name 107 | } 108 | image_id = image_finder.BigIpImageFinder().find(**module_args)[0]['id'] 109 | 110 | return utils.touchImage( 111 | imageId=image_id, 112 | instanceType=defaults[host_type+'_instance_type'], 113 | region=testenv['region'].region_name, 114 | vpcId=testenv['region'].vpc_id, 115 | keyName=testenv['region'].key_name, 116 | subnetId=testenv['region'].subnet_id) 117 | 118 | # runs once per region 119 | def test_region(testenv): 120 | assert validate_linux_image(testenv, 'client') 121 | assert validate_linux_image(testenv, 'apphost') 122 | assert validate_bigip_image(testenv, 'bigip') 123 | assert validate_bigip_image(testenv, 'gtm') 124 | -------------------------------------------------------------------------------- /src/f5_aws/test/utils.py: -------------------------------------------------------------------------------- 1 | import random 2 | import boto 3 | import boto.ec2 4 | import boto.vpc 5 | from boto.exception import EC2ResponseError 6 | 7 | class Region(object): 8 | def __init__(self, region): 9 | 10 | self.uid='ut_{}'.format(random.randint(1, 10000000000)) 11 | self.region_name = region 12 | self.ec2_conn = boto.ec2.connect_to_region(region) 13 | self.vpc_conn = boto.vpc.connect_to_region(region) 14 | 15 | self.key_name = self.uid+'_kp' 16 | 17 | self.vpc_id = '' 18 | self.vpc_name = self.uid+'_vpc' 19 | self.vpc_netmask = '10.0.0.0/24' 20 | 21 | self.subnet_id = '' 22 | self.subnet_name = self.uid+'_subnet' 23 | self.subnet_netmask = self.vpc_netmask 24 | 25 | def createVpc(self): 26 | self.vpc_id = self.vpc_conn.create_vpc(self.vpc_netmask).id 27 | 28 | def deleteVpc(self): 29 | self.vpc_conn.delete_vpc(self.vpc_id) 30 | 31 | def createSubnet(self): 32 | self.subnet_id = self.vpc_conn.create_subnet(self.vpc_id, self.subnet_netmask).id 33 | 34 | def deleteSubnet(self): 35 | self.vpc_conn.delete_subnet(self.subnet_id) 36 | 37 | def createKeyPair(self): 38 | self.keypair = self.ec2_conn.create_key_pair(self.key_name) 39 | 40 | def deleteKeyPair(self): 41 | self.ec2_conn.delete_key_pair(self.key_name) 42 | 43 | def deleteAll(self): 44 | self.deleteSubnet() 45 | self.deleteVpc() 46 | self.deleteKeyPair() 47 | 48 | -------------------------------------------------------------------------------- /src/f5_aws/utils.py: -------------------------------------------------------------------------------- 1 | # utils.py 2 | 3 | import subprocess 4 | import itertools 5 | import boto.ec2 6 | from boto.exception import EC2ResponseError 7 | 8 | def convert_str(inStr): 9 | """ 10 | One of the quirks with ansible is the use of several different 11 | object notations, sometimes even one embedded within another... 12 | This function modifies the string formatting so we can read it 13 | in as a variable. 14 | """ 15 | l = [i.split("=") for i in inStr.split(" ")] 16 | res_arr = [i.replace('"', "") for i in list(itertools.chain(*l))] 17 | return dict(zip(res_arr[::2], res_arr[1::2])) 18 | 19 | def touchImage(region="", imageId="", 20 | keyName="", instanceType="", vpcId="", 21 | subnetId="", okayStatusCodes=[401]): 22 | """ 23 | This method attempts to launch an AMI in AWS using the dry_run 24 | flag. This allows us to check whether all the conditions 25 | required to use the image are satisfied. 26 | """ 27 | 28 | try: 29 | ec2_conn = boto.ec2.connect_to_region(region) 30 | ec2_conn.get_image(image_id=imageId).run( 31 | instance_type=instanceType, 32 | key_name=keyName, 33 | subnet_id=subnetId, 34 | dry_run=True 35 | ) 36 | except EC2ResponseError, e: 37 | 38 | status=int(e.status) 39 | 40 | if status not in okayStatusCodes: 41 | # e.reason == 'Unauthorized' => EULA needs to be accepted 42 | if int(e.status) == 401: 43 | print "Error: Unauthorized to use this image {} in {}, \ 44 | have the terms and conditions been accepted?".format( 45 | imageId, region) 46 | return False 47 | 48 | # e.reason == 'Bad Request' => bad image launch conditions 49 | # for example: 50 | # "The image id '[ami-4c7a3924]' does not exist" 51 | # "Virtualization type 'hvm' is required for instances of type 't2.micro'." 52 | elif int(e.status) == 400: 53 | print "Error: Unable to launch image with params region={}, \ 54 | imageId={}, keyName={}, instanceType={}\r\n\ 55 | \tReason was: {}".format( 56 | region, imageId, keyName, instanceType, e.message) 57 | return False 58 | 59 | # e.reason = "Precondition Failed" 60 | # for example: 61 | # Request would have succeeded, but DryRun flag is set. 62 | elif int(e.status) == 412: 63 | return True 64 | else: 65 | raise e 66 | return False 67 | return True 68 | -------------------------------------------------------------------------------- /src/setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from setuptools import setup, find_packages 4 | 5 | here = os.path.abspath(os.path.dirname(__file__)) 6 | with open(os.path.join(here, 'README.txt')) as f: 7 | README = f.read() 8 | 9 | setup(name='f5_aws', 10 | version='1.0.5', 11 | description='Code to deploy BIG-IP, network, and applications in AWS VPC', 12 | long_description=README, 13 | classifiers=[ 14 | "Development Status :: 3 - Alpha", 15 | "Programming Language :: Python", 16 | ], 17 | author='Chris Mutzel, Alex Applebaum', 18 | author_email='c.mutzel@f5.com, a.applebaum@f5.com', 19 | zip_safe=False, 20 | include_package_data=True, 21 | packages=find_packages() 22 | ) 23 | -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | 10 | # Every Vagrant development environment requires a box. You can search for 11 | # boxes at https://atlas.hashicorp.com/search. 12 | config.vm.box = "f5networks/demo" 13 | 14 | # Bugs that replace insecure public key doesn't seem to work reliably 15 | # in certain versions (=~ 1.7.2 and up) so unfortunately leaving out 16 | # https://github.com/mitchellh/vagrant/issues/5186 17 | # https://github.com/mitchellh/vagrant/issues/5541 18 | # If you get repeated "default: Warning: Authentication failure. Retrying..." 19 | # Uncomment and set below value to false 20 | # config.ssh.insert_key=false 21 | 22 | # Create a public network, which generally matched to bridged network. 23 | # Bridged networks make the machine appear as another physical device on 24 | # your network. 25 | config.vm.network "public_network" 26 | config.vm.network :forwarded_port, guest: 8080, host: 8080 27 | config.vm.network :forwarded_port, guest: 6379, host: 6379 28 | 29 | config.vm.synced_folder "../", "/home/vagrant/aws-deployments" 30 | 31 | # basic ssh and python environment setup 32 | config.vm.provision "shell", privileged: false, path: "../build/scripts/setup_env_vbox.sh" 33 | 34 | end 35 | --------------------------------------------------------------------------------