├── .gitignore
├── LICENSE
├── README.md
├── ansible.cfg
├── ansible_plugins
└── filter_plugins
│ └── custom.py
├── ec2.ini
├── ec2.py
├── images
└── VPC.png
├── plays
└── operation
│ ├── README.md
│ ├── bootstrap_vpc.yml
│ └── bootstrap_vpc_user_data.j2
└── vars
└── prod.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | *.pyc
2 | *.swo
3 | *.swp
4 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | The MIT License (MIT)
2 |
3 | Copyright (c) 2015 HighOps
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Ansible repo for building an ec2 VPC with Auto Scaling NAT group
2 |
3 | *WARNING - this repo requires use of ansible v2 (devel) modules*
4 |
5 | ## Summary
6 |
7 | The playbook and example var file will create a 2 tiered AWS ec2 VPC using multiple Availability Zones (AZs). In order for the private instances to access the internet, it uses NAT instances and manages these through an auto scaling group.
8 |
9 | 
10 |
11 | When the NAT instances start, they associate themselves with an EIP (so any outbound traffic comes from a known source) and, based on what subnet they're in, attempt to replace the route. They use https://github.com/HighOps/ec2-nat-failover to do this.
12 |
13 | ## What's Included
14 |
15 | At present, it includes
16 | - a variable file example for setting up a multi-AZ VPC
17 | - an operations bootstrap playbook that loads a variable file based on the extra-var ```env``` passed, and
18 | - creates the VPC
19 | - creates the internet gateway
20 | - creates the subnets
21 | - creates the route tables
22 | - creates the security groups
23 | - creates the nat instance auto scaling launch configuration
24 | - creates the nat instance auto scaling group
25 |
26 | ## Dependencies
27 |
28 | - you have ansible v2 (devel) installed, and the new vpc module pull requests merged in
29 | - you have the python boto library installed, and it's the latest version
30 | - you are using a ~/.boto config file or ENV variables for the AWS Access Key and AWS Secret Key
31 |
32 | ## Ansible setup
33 |
34 | Because it's using brand new VPC modules, _which are only currently available as Pull Requests (PRs)!_, the following is required
35 |
36 | ```
37 | cd ~
38 | mkdir git ; cd git
39 | git clone git@github.com:ansible/ansible.git
40 | git clone git@github.com:ansible/ansible-modules-core.git
41 | git clone git@github.com:ansible/ansible-modules-extras.git
42 |
43 | cd ansible-modules-extras
44 | git checkout -b new_vpc_modules
45 | git fetch origin pull/651/head:ec2_vpc_igw
46 | git fetch origin pull/597/head:ec2_vpc_route_table
47 | git fetch origin pull/598/head:ec2_vpc_subnet
48 | git merge ec2_vpc_igw
49 | git merge ec2_vpc_route_table
50 | git merge ec2_vpc_subnet
51 | ```
52 | Then update the ansible.cfg library param to include the path to the ansible-modules-core and ansible-modules-extras directories.
53 |
54 | ## Pre-reqs
55 |
56 | - an ec2 keypair, set in the nat auto scaling launch configuration
57 | - a pair of allocated (but not associated) EIPs
58 | - an IAM role and policy, used by the nat instances to allocate EIPs and take over routes, e.g.
59 | -- Role: prodNATMonitor
60 | -- Policy:
61 | ```
62 | {
63 | "Version": "2012-10-17",
64 | "Statement": [
65 | {
66 | "Sid": "prodNATMonitorInstanceAccess",
67 | "Action": [
68 | "ec2:AssociateAddress",
69 | "ec2:CreateRoute",
70 | "ec2:DescribeInstances",
71 | "ec2:DescribeRouteTables",
72 | "ec2:ModifyInstanceAttribute",
73 | "ec2:ReplaceRoute"
74 | ],
75 | "Effect": "Allow",
76 | "Resource": "*",
77 | "Condition": {
78 | "ForAllValues:StringLike": {
79 | "ec2:ResourceTag/Environment": "prod*",
80 | "ec2:ResourceTag/Role": "nat*"
81 | }
82 | }
83 | }
84 | ]
85 | }
86 | ```
87 | _Note that the Role name should be of the form ```env + 'NATMonitor'```, if you use something else, make sure you update the var file_
88 |
89 | ## Usage
90 |
91 | Review and create/modify a variable file, see ```vars/``` for existing examples. Changes you probably want to make include
92 | - the region
93 | - the CIDR range and subnets
94 | - the AMI ID for the nat instance type in the region (search for ```amzn-ami-vpc-nat-hvm-2015.03.0.x86_64-gp2``` or later in the Community AMIs)
95 | - the AMI ID for a bastion instance type in the region
96 | - the ssh key_name
97 |
98 | Once you are happy with your var file, run it, e.g.
99 |
100 | ansible-playbook plays/operation/bootstrap_vpc.yml --extra-vars "env=prod"
101 |
102 | ## Known Issues
103 |
104 | - the ansible modules used are currently still Pull Requests, so are subject to change and have not been approved
105 | - the new modules require ansible v2 (devel), which is still under heavy development and subject to errors
106 | - the route table module currently forces the destination, which means an auto scaling setup that changes it later will cause an error in a re-run
107 |
108 | ## Troubleshooting
109 |
110 | - you get an error when creating subnets, that an availability zone doesn't exist
111 |
112 | _Each IAM account doesn't have access to all availability zones in a region, make sure you use the ones available._
113 |
114 | If you change the az's defined, make sure that you remove any invalid subnets created as this won't be done automatically.
115 |
116 | - your nat instances don't automatically get an elastic ip
117 |
118 | In the auto scaling launch configuration, check the user data, and make sure that the id's match exisitng objects. Note that this may _not_ be the case if you've previously created subnets and then moved them to new az's.
119 |
120 | - your nat instances don't nat
121 | make sure you're using the correct nat ami, if in doubt, ssh to the bastion and then to the nat and verify that the user data has been run.
122 |
123 | - you're near the end of the playbook, and something went wrong - e.g. the bastion ami - and re-running errors at the route table setup.
124 |
125 | Unfortunately this is a known issue. You'll need to remove the auto scaling group, auto scaling launch configuration, update the route tables on the private subnets so that the 0.0.0.0/0 destination uses the igw, and then you'll be able to re-run.
126 |
127 | ## Todo
128 |
129 | - create a playbook to generate an AMI with the nat_monitor script baked in
130 | - update the nat_monitor script to create the route if it doesn't exist
131 | - add handling of the IAM Policy
132 | - add updating of the IAM Policy to use specific arn values for resources
133 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | # config file for ansible -- http://ansible.com/
2 | # ==============================================
3 |
4 | # nearly all parameters can be overridden in ansible-playbook
5 | # or with command line flags. ansible will read ANSIBLE_CONFIG,
6 | # ansible.cfg in the current working directory, .ansible.cfg in
7 | # the home directory or /etc/ansible/ansible.cfg, whichever it
8 | # finds first
9 |
10 | [defaults]
11 |
12 | # some basic default values...
13 |
14 | inventory = ec2.py
15 | #library = /usr/share/my_modules/
16 | library = ~/dev/ansible-modules-core:~/dev/ansible-modules-extras
17 | remote_tmp = $HOME/.ansible/tmp
18 | pattern = *
19 | forks = 5
20 | poll_interval = 15
21 | sudo_user = root
22 | #ask_sudo_pass = True
23 | #ask_pass = True
24 | transport = smart
25 | #remote_port = 22
26 | module_lang = C
27 |
28 | # plays will gather facts by default, which contain information about
29 | # the remote system.
30 | #
31 | # smart - gather by default, but don't regather if already gathered
32 | # implicit - gather by default, turn off with gather_facts: False
33 | # explicit - do not gather by default, must say gather_facts: True
34 | gathering = implicit
35 |
36 | # additional paths to search for roles in, colon separated
37 | #roles_path = /etc/ansible/roles
38 | roles_path = roles
39 |
40 | # uncomment this to disable SSH key host checking
41 | host_key_checking = False
42 |
43 | # change this for alternative sudo implementations
44 | sudo_exe = sudo
45 |
46 | # what flags to pass to sudo
47 | #sudo_flags = -H
48 |
49 | # SSH timeout
50 | timeout = 10
51 |
52 | # default user to use for playbooks if user is not specified
53 | # (/usr/bin/ansible will use current user as default)
54 | #remote_user = root
55 |
56 | # logging is off by default unless this path is defined
57 | # if so defined, consider logrotate
58 | #log_path = /var/log/ansible.log
59 |
60 | # default module name for /usr/bin/ansible
61 | #module_name = command
62 |
63 | # use this shell for commands executed under sudo
64 | # you may need to change this to bin/bash in rare instances
65 | # if sudo is constrained
66 | #executable = /bin/sh
67 |
68 | # if inventory variables overlap, does the higher precedence one win
69 | # or are hash values merged together? The default is 'replace' but
70 | # this can also be set to 'merge'.
71 | #hash_behaviour = replace
72 |
73 | # list any Jinja2 extensions to enable here:
74 | #jinja2_extensions = jinja2.ext.do,jinja2.ext.i18n
75 |
76 | # if set, always use this private key file for authentication, same as
77 | # if passing --private-key to ansible or ansible-playbook
78 | #private_key_file = /path/to/file
79 |
80 | # format of string {{ ansible_managed }} available within Jinja2
81 | # templates indicates to users editing templates files will be replaced.
82 | # replacing {file}, {host} and {uid} and strftime codes with proper values.
83 | ansible_managed = Ansible managed: {file} modified on %Y-%m-%d %H:%M:%S by {uid} on {host}
84 |
85 | # by default, ansible-playbook will display "Skipping [host]" if it determines a task
86 | # should not be run on a host. Set this to "False" if you don't want to see these "Skipping"
87 | # messages. NOTE: the task header will still be shown regardless of whether or not the
88 | # task is skipped.
89 | #display_skipped_hosts = True
90 |
91 | # by default (as of 1.3), Ansible will raise errors when attempting to dereference
92 | # Jinja2 variables that are not set in templates or action lines. Uncomment this line
93 | # to revert the behavior to pre-1.3.
94 | #error_on_undefined_vars = False
95 |
96 | # by default (as of 1.6), Ansible may display warnings based on the configuration of the
97 | # system running ansible itself. This may include warnings about 3rd party packages or
98 | # other conditions that should be resolved if possible.
99 | # to disable these warnings, set the following value to False:
100 | #system_warnings = True
101 |
102 | # by default (as of 1.4), Ansible may display deprecation warnings for language
103 | # features that should no longer be used and will be removed in future versions.
104 | # to disable these warnings, set the following value to False:
105 | #deprecation_warnings = True
106 |
107 | # (as of 1.8), Ansible can optionally warn when usage of the shell and
108 | # command module appear to be simplified by using a default Ansible module
109 | # instead. These warnings can be silenced by adjusting the following
110 | # setting or adding warn=yes or warn=no to the end of the command line
111 | # parameter string. This will for example suggest using the git module
112 | # instead of shelling out to the git command.
113 | # command_warnings = False
114 |
115 |
116 | # set plugin path directories here, separate with colons
117 | action_plugins = ./ansible_plugins/action_plugins
118 | callback_plugins = ./ansible_plugins/callback_plugins
119 | connection_plugins = ./ansible_plugins/connection_plugins
120 | lookup_plugins = ./ansible_plugins/lookup_plugins
121 | vars_plugins = ./ansible_plugins/vars_plugins
122 | filter_plugins = ./ansible_plugins/filter_plugins
123 |
124 | # by default callbacks are not loaded for /bin/ansible, enable this if you
125 | # want, for example, a notification or logging callback to also apply to
126 | # /bin/ansible runs
127 | #bin_ansible_callbacks = False
128 |
129 |
130 | # don't like cows? that's unfortunate.
131 | # set to 1 if you don't want cowsay support or export ANSIBLE_NOCOWS=1
132 | #nocows = 1
133 |
134 | # don't like colors either?
135 | # set to 1 if you don't want colors, or export ANSIBLE_NOCOLOR=1
136 | #nocolor = 1
137 |
138 | # the CA certificate path used for validating SSL certs. This path
139 | # should exist on the controlling node, not the target nodes
140 | # common locations:
141 | # RHEL/CentOS: /etc/pki/tls/certs/ca-bundle.crt
142 | # Fedora : /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem
143 | # Ubuntu : /usr/share/ca-certificates/cacert.org/cacert.org.crt
144 | #ca_file_path =
145 |
146 | # the http user-agent string to use when fetching urls. Some web server
147 | # operators block the default urllib user agent as it is frequently used
148 | # by malicious attacks/scripts, so we set it to something unique to
149 | # avoid issues.
150 | #http_user_agent = ansible-agent
151 |
152 | # if set to a persistent type (not 'memory', for example 'redis') fact values
153 | # from previous runs in Ansible will be stored. This may be useful when
154 | # wanting to use, for example, IP information from one group of servers
155 | # without having to talk to them in the same playbook run to get their
156 | # current IP information.
157 | fact_caching = memory
158 |
159 |
160 | # retry files
161 | #retry_files_enabled = False
162 | #retry_files_save_path = ~/.ansible-retry
163 |
164 | [privilege_escalation]
165 | #become=True
166 | #become_method='sudo'
167 | #become_user='root'
168 | #become_ask_pass=False
169 |
170 | [paramiko_connection]
171 |
172 | # uncomment this line to cause the paramiko connection plugin to not record new host
173 | # keys encountered. Increases performance on new host additions. Setting works independently of the
174 | # host key checking setting above.
175 | #record_host_keys=False
176 |
177 | # by default, Ansible requests a pseudo-terminal for commands executed under sudo. Uncomment this
178 | # line to disable this behaviour.
179 | #pty=False
180 |
181 | [ssh_connection]
182 |
183 | # ssh arguments to use
184 | # Leaving off ControlPersist will result in poor performance, so use
185 | # paramiko on older platforms rather than removing it
186 | #ssh_args = -o ControlMaster=auto -o ControlPersist=60s
187 |
188 | # The path to use for the ControlPath sockets. This defaults to
189 | # "%(directory)s/ansible-ssh-%%h-%%p-%%r", however on some systems with
190 | # very long hostnames or very long path names (caused by long user names or
191 | # deeply nested home directories) this can exceed the character limit on
192 | # file socket names (108 characters for most platforms). In that case, you
193 | # may wish to shorten the string below.
194 | #
195 | # Example:
196 | # control_path = %(directory)s/%%h-%%r
197 | #control_path = %(directory)s/ansible-ssh-%%h-%%p-%%r
198 |
199 | # Enabling pipelining reduces the number of SSH operations required to
200 | # execute a module on the remote server. This can result in a significant
201 | # performance improvement when enabled, however when using "sudo:" you must
202 | # first disable 'requiretty' in /etc/sudoers
203 | #
204 | # By default, this option is disabled to preserve compatibility with
205 | # sudoers configurations that have requiretty (the default on many distros).
206 | #
207 | #pipelining = False
208 |
209 | # if True, make ansible use scp if the connection type is ssh
210 | # (default is sftp)
211 | #scp_if_ssh = True
212 |
213 | [accelerate]
214 | accelerate_port = 5099
215 | accelerate_timeout = 30
216 | accelerate_connect_timeout = 5.0
217 |
218 | # The daemon timeout is measured in minutes. This time is measured
219 | # from the last activity to the accelerate daemon.
220 | accelerate_daemon_timeout = 30
221 |
222 | # If set to yes, accelerate_multi_key will allow multiple
223 | # private keys to be uploaded to it, though each user must
224 | # have access to the system via SSH to add a new key. The default
225 | # is "no".
226 | #accelerate_multi_key = yes
227 |
228 |
--------------------------------------------------------------------------------
/ansible_plugins/filter_plugins/custom.py:
--------------------------------------------------------------------------------
1 | from jinja2.utils import soft_unicode
2 |
3 | def get_security_groups(value, match_key, match_value, return_key='group_id'):
4 | # 'value' input is expected to be a results list from the ec2_group module
5 | # it looks for a key, e.g. 'name' and a value, e.g. 'prod_nat' and returns
6 | # the return_key, e.g. 'group_id' of all that match in a list
7 | results = []
8 | for item in value:
9 | if match_key in item['item'].keys():
10 | if item['item'][match_key] == match_value:
11 | results.append(item[return_key])
12 |
13 | return results
14 |
15 | def get_subnet_route_map(value, routes, tag_key='Type', tag_value='public'):
16 | # given a list of subnet results from the ec2_vpc_subnet task and a list
17 | # of route results from the ec2_vpc_route_table task, return a list of
18 | # dicts of public subnet_id : route_id mapping where the public subnet
19 | # is in the same az as the private subnet the route is associated with
20 |
21 | # assuming all private subnets in a routing table are in the same az!
22 |
23 | mapping = []
24 | route_az_map = {}
25 | no_routes = {}
26 |
27 | # create a list of route_id:az pairs
28 | for r in routes:
29 | for s in value:
30 | subnet_in_route = False
31 | # the route table task can take a name, cidr or id
32 | if 'Name' in s['subnet']['tags']:
33 | if s['subnet']['tags']['Name'] in r['item']['subnets']:
34 | subnet_in_route = True
35 | elif s['subnet']['cidr'] in r['item']['subnets']:
36 | subnet_in_route = True
37 | elif s['subnet']['id'] in r['item']['subnets']:
38 | subnet_in_route = True
39 |
40 | if subnet_in_route:
41 | route_az_map[r['route_table_id']] = s['subnet']['availability_zone']
42 |
43 | # assume a distinguishing tag exists
44 |
45 | # get a mapping of key (public) subnets to az
46 | subnet_az_map = {}
47 | for s in value:
48 | if s['subnet']['tags'][tag_key] == tag_value:
49 | subnet_az_map[s['subnet']['id']] = s['subnet']['availability_zone']
50 |
51 | # now loop through the route:az's, and find a matching public subnet based on az
52 | for route_table_id,route_az in route_az_map.iteritems():
53 | for subnet_id,subnet_az in subnet_az_map.iteritems():
54 | if route_az == subnet_az:
55 | mapping.append({'subnet_id':subnet_id, 'route_table_id':route_table_id })
56 |
57 | return mapping
58 |
59 | def get_subnets(value, tag_key, tag_value, return_type='subnet_id'):
60 | # return all subnets that match
61 | subnets = []
62 | for item in value:
63 | for key, value in item['subnet']['tags'].iteritems():
64 | if key == tag_key and value == tag_value:
65 | subnets.append(item['subnet']['id'])
66 |
67 | return subnets
68 |
69 | def get_zip(value, list1):
70 | # return zipped result of 2 lists
71 | return zip(value, list1)
72 |
73 | class FilterModule(object):
74 | ''' Ansible jinja2 filters '''
75 |
76 | def filters(self):
77 | return {
78 | 'get_security_groups': get_security_groups,
79 | 'get_subnet_route_map': get_subnet_route_map,
80 | 'get_subnets': get_subnets,
81 | 'get_zip': get_zip,
82 | }
83 |
--------------------------------------------------------------------------------
/ec2.ini:
--------------------------------------------------------------------------------
1 | # Ansible EC2 external inventory script settings
2 | #
3 |
4 | [ec2]
5 |
6 | # to talk to a private eucalyptus instance uncomment these lines
7 | # and edit edit eucalyptus_host to be the host name of your cloud controller
8 | #eucalyptus = True
9 | #eucalyptus_host = clc.cloud.domain.org
10 |
11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions
12 | # in AWS and merge the results together. Alternatively, set this to a comma
13 | # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
14 | regions = all
15 | regions_exclude = us-gov-west-1,cn-north-1
16 |
17 | # When generating inventory, Ansible needs to know how to address a server.
18 | # Each EC2 instance has a lot of variables associated with it. Here is the list:
19 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
20 | # Below are 2 variables that are used as the address of a server:
21 | # - destination_variable
22 | # - vpc_destination_variable
23 |
24 | # This is the normal destination variable to use. If you are running Ansible
25 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are
26 | # running Ansible from within EC2, then perhaps you want to use the internal
27 | # address, and should set this to 'private_dns_name'. The key of an EC2 tag
28 | # may optionally be used; however the boto instance variables hold precedence
29 | # in the event of a collision.
30 | destination_variable = public_dns_name
31 |
32 | # For server inside a VPC, using DNS names may not make sense. When an instance
33 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting
34 | # this to 'ip_address' will return the public IP address. For instances in a
35 | # private subnet, this should be set to 'private_ip_address', and Ansible must
36 | # be run from within EC2. The key of an EC2 tag may optionally be used; however
37 | # the boto instance variables hold precedence in the event of a collision.
38 | # WARNING: - instances that are in the private vpc, _without_ public ip address
39 | # will not be listed in the inventory untill You set:
40 | # vpc_destination_variable = 'private_ip_address'
41 | vpc_destination_variable = ip_address
42 |
43 | # To tag instances on EC2 with the resource records that point to them from
44 | # Route53, uncomment and set 'route53' to True.
45 | route53 = False
46 |
47 | # To exclude RDS instances from the inventory, uncomment and set to False.
48 | #rds = False
49 |
50 | # Additionally, you can specify the list of zones to exclude looking up in
51 | # 'route53_excluded_zones' as a comma-separated list.
52 | # route53_excluded_zones = samplezone1.com, samplezone2.com
53 |
54 | # By default, only EC2 instances in the 'running' state are returned. Set
55 | # 'all_instances' to True to return all instances regardless of state.
56 | all_instances = False
57 |
58 | # By default, only RDS instances in the 'available' state are returned. Set
59 | # 'all_rds_instances' to True return all RDS instances regardless of state.
60 | all_rds_instances = False
61 |
62 | # API calls to EC2 are slow. For this reason, we cache the results of an API
63 | # call. Set this to the path you want cache files to be written to. Two files
64 | # will be written to this directory:
65 | # - ansible-ec2.cache
66 | # - ansible-ec2.index
67 | cache_path = ~/.ansible/tmp
68 |
69 | # The number of seconds a cache file is considered valid. After this many
70 | # seconds, a new API call will be made, and the cache file will be updated.
71 | # To disable the cache, set this value to 0
72 | cache_max_age = 300
73 |
74 | # Organize groups into a nested/hierarchy instead of a flat namespace.
75 | nested_groups = False
76 |
77 | # The EC2 inventory output can become very large. To manage its size,
78 | # configure which groups should be created.
79 | group_by_instance_id = True
80 | group_by_region = True
81 | group_by_availability_zone = True
82 | group_by_ami_id = True
83 | group_by_instance_type = True
84 | group_by_key_pair = True
85 | group_by_vpc_id = True
86 | group_by_security_group = True
87 | group_by_tag_keys = True
88 | group_by_tag_none = True
89 | group_by_route53_names = True
90 | group_by_rds_engine = True
91 | group_by_rds_parameter_group = True
92 |
93 | # If you only want to include hosts that match a certain regular expression
94 | # pattern_include = stage-*
95 |
96 | # If you want to exclude any hosts that match a certain regular expression
97 | # pattern_exclude = stage-*
98 |
99 | # Instance filters can be used to control which instances are retrieved for
100 | # inventory. For the full list of possible filters, please read the EC2 API
101 | # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
102 | # Filters are key/value pairs separated by '=', to list multiple filters use
103 | # a list separated by commas. See examples below.
104 |
105 | # Retrieve only instances with (key=value) env=stage tag
106 | # instance_filters = tag:env=stage
107 |
108 | # Retrieve only instances with role=webservers OR role=dbservers tag
109 | # instance_filters = tag:role=webservers,tag:role=dbservers
110 |
111 | # Retrieve only t1.micro instances OR instances with tag env=stage
112 | # instance_filters = instance-type=t1.micro,tag:env=stage
113 |
114 | # You can use wildcards in filter values also. Below will list instances which
115 | # tag Name value matches webservers1*
116 | # (ex. webservers15, webservers1a, webservers123 etc)
117 | # instance_filters = tag:Name=webservers1*
118 |
--------------------------------------------------------------------------------
/ec2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | EC2 external inventory script
5 | =================================
6 |
7 | Generates inventory that Ansible can understand by making API request to
8 | AWS EC2 using the Boto library.
9 |
10 | NOTE: This script assumes Ansible is being executed where the environment
11 | variables needed for Boto have already been set:
12 | export AWS_ACCESS_KEY_ID='AK123'
13 | export AWS_SECRET_ACCESS_KEY='abc123'
14 |
15 | This script also assumes there is an ec2.ini file alongside it. To specify a
16 | different path to ec2.ini, define the EC2_INI_PATH environment variable:
17 |
18 | export EC2_INI_PATH=/path/to/my_ec2.ini
19 |
20 | If you're using eucalyptus you need to set the above variables and
21 | you need to define:
22 |
23 | export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
24 |
25 | For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
26 |
27 | When run against a specific host, this script returns the following variables:
28 | - ec2_ami_launch_index
29 | - ec2_architecture
30 | - ec2_association
31 | - ec2_attachTime
32 | - ec2_attachment
33 | - ec2_attachmentId
34 | - ec2_client_token
35 | - ec2_deleteOnTermination
36 | - ec2_description
37 | - ec2_deviceIndex
38 | - ec2_dns_name
39 | - ec2_eventsSet
40 | - ec2_group_name
41 | - ec2_hypervisor
42 | - ec2_id
43 | - ec2_image_id
44 | - ec2_instanceState
45 | - ec2_instance_type
46 | - ec2_ipOwnerId
47 | - ec2_ip_address
48 | - ec2_item
49 | - ec2_kernel
50 | - ec2_key_name
51 | - ec2_launch_time
52 | - ec2_monitored
53 | - ec2_monitoring
54 | - ec2_networkInterfaceId
55 | - ec2_ownerId
56 | - ec2_persistent
57 | - ec2_placement
58 | - ec2_platform
59 | - ec2_previous_state
60 | - ec2_private_dns_name
61 | - ec2_private_ip_address
62 | - ec2_publicIp
63 | - ec2_public_dns_name
64 | - ec2_ramdisk
65 | - ec2_reason
66 | - ec2_region
67 | - ec2_requester_id
68 | - ec2_root_device_name
69 | - ec2_root_device_type
70 | - ec2_security_group_ids
71 | - ec2_security_group_names
72 | - ec2_shutdown_state
73 | - ec2_sourceDestCheck
74 | - ec2_spot_instance_request_id
75 | - ec2_state
76 | - ec2_state_code
77 | - ec2_state_reason
78 | - ec2_status
79 | - ec2_subnet_id
80 | - ec2_tenancy
81 | - ec2_virtualization_type
82 | - ec2_vpc_id
83 |
84 | These variables are pulled out of a boto.ec2.instance object. There is a lack of
85 | consistency with variable spellings (camelCase and underscores) since this
86 | just loops through all variables the object exposes. It is preferred to use the
87 | ones with underscores when multiple exist.
88 |
89 | In addition, if an instance has AWS Tags associated with it, each tag is a new
90 | variable named:
91 | - ec2_tag_[Key] = [Value]
92 |
93 | Security groups are comma-separated in 'ec2_security_group_ids' and
94 | 'ec2_security_group_names'.
95 | '''
96 |
97 | # (c) 2012, Peter Sankauskas
98 | #
99 | # This file is part of Ansible,
100 | #
101 | # Ansible is free software: you can redistribute it and/or modify
102 | # it under the terms of the GNU General Public License as published by
103 | # the Free Software Foundation, either version 3 of the License, or
104 | # (at your option) any later version.
105 | #
106 | # Ansible is distributed in the hope that it will be useful,
107 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
108 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
109 | # GNU General Public License for more details.
110 | #
111 | # You should have received a copy of the GNU General Public License
112 | # along with Ansible. If not, see .
113 |
114 | ######################################################################
115 |
116 | import sys
117 | import os
118 | import argparse
119 | import re
120 | from time import time
121 | import boto
122 | from boto import ec2
123 | from boto import rds
124 | from boto import route53
125 | import six
126 |
127 | from six.moves import configparser
128 | from collections import defaultdict
129 |
130 | try:
131 | import json
132 | except ImportError:
133 | import simplejson as json
134 |
135 |
136 | class Ec2Inventory(object):
137 | def _empty_inventory(self):
138 | return {"_meta" : {"hostvars" : {}}}
139 |
140 | def __init__(self):
141 | ''' Main execution path '''
142 |
143 | # Inventory grouped by instance IDs, tags, security groups, regions,
144 | # and availability zones
145 | self.inventory = self._empty_inventory()
146 |
147 | # Index of hostname (address) to instance ID
148 | self.index = {}
149 |
150 | # Read settings and parse CLI arguments
151 | self.read_settings()
152 | self.parse_cli_args()
153 |
154 | # Cache
155 | if self.args.refresh_cache:
156 | self.do_api_calls_update_cache()
157 | elif not self.is_cache_valid():
158 | self.do_api_calls_update_cache()
159 |
160 | # Data to print
161 | if self.args.host:
162 | data_to_print = self.get_host_info()
163 |
164 | elif self.args.list:
165 | # Display list of instances for inventory
166 | if self.inventory == self._empty_inventory():
167 | data_to_print = self.get_inventory_from_cache()
168 | else:
169 | data_to_print = self.json_format_dict(self.inventory, True)
170 |
171 | print(data_to_print)
172 |
173 |
174 | def is_cache_valid(self):
175 | ''' Determines if the cache files have expired, or if it is still valid '''
176 |
177 | if os.path.isfile(self.cache_path_cache):
178 | mod_time = os.path.getmtime(self.cache_path_cache)
179 | current_time = time()
180 | if (mod_time + self.cache_max_age) > current_time:
181 | if os.path.isfile(self.cache_path_index):
182 | return True
183 |
184 | return False
185 |
186 |
187 | def read_settings(self):
188 | ''' Reads the settings from the ec2.ini file '''
189 | if six.PY2:
190 | config = configparser.SafeConfigParser()
191 | else:
192 | config = configparser.ConfigParser()
193 | ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
194 | ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
195 | config.read(ec2_ini_path)
196 |
197 | # is eucalyptus?
198 | self.eucalyptus_host = None
199 | self.eucalyptus = False
200 | if config.has_option('ec2', 'eucalyptus'):
201 | self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
202 | if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
203 | self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
204 |
205 | # Regions
206 | self.regions = []
207 | configRegions = config.get('ec2', 'regions')
208 | configRegions_exclude = config.get('ec2', 'regions_exclude')
209 | if (configRegions == 'all'):
210 | if self.eucalyptus_host:
211 | self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
212 | else:
213 | for regionInfo in ec2.regions():
214 | if regionInfo.name not in configRegions_exclude:
215 | self.regions.append(regionInfo.name)
216 | else:
217 | self.regions = configRegions.split(",")
218 |
219 | # Destination addresses
220 | self.destination_variable = config.get('ec2', 'destination_variable')
221 | self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
222 |
223 | # Route53
224 | self.route53_enabled = config.getboolean('ec2', 'route53')
225 | self.route53_excluded_zones = []
226 | if config.has_option('ec2', 'route53_excluded_zones'):
227 | self.route53_excluded_zones.extend(
228 | config.get('ec2', 'route53_excluded_zones', '').split(','))
229 |
230 | # Include RDS instances?
231 | self.rds_enabled = True
232 | if config.has_option('ec2', 'rds'):
233 | self.rds_enabled = config.getboolean('ec2', 'rds')
234 |
235 | # Return all EC2 and RDS instances (if RDS is enabled)
236 | if config.has_option('ec2', 'all_instances'):
237 | self.all_instances = config.getboolean('ec2', 'all_instances')
238 | else:
239 | self.all_instances = False
240 | if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
241 | self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
242 | else:
243 | self.all_rds_instances = False
244 |
245 | # Cache related
246 | cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
247 | if not os.path.exists(cache_dir):
248 | os.makedirs(cache_dir)
249 |
250 | self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
251 | self.cache_path_index = cache_dir + "/ansible-ec2.index"
252 | self.cache_max_age = config.getint('ec2', 'cache_max_age')
253 |
254 | # Configure nested groups instead of flat namespace.
255 | if config.has_option('ec2', 'nested_groups'):
256 | self.nested_groups = config.getboolean('ec2', 'nested_groups')
257 | else:
258 | self.nested_groups = False
259 |
260 | # Configure which groups should be created.
261 | group_by_options = [
262 | 'group_by_instance_id',
263 | 'group_by_region',
264 | 'group_by_availability_zone',
265 | 'group_by_ami_id',
266 | 'group_by_instance_type',
267 | 'group_by_key_pair',
268 | 'group_by_vpc_id',
269 | 'group_by_security_group',
270 | 'group_by_tag_keys',
271 | 'group_by_tag_none',
272 | 'group_by_route53_names',
273 | 'group_by_rds_engine',
274 | 'group_by_rds_parameter_group',
275 | ]
276 | for option in group_by_options:
277 | if config.has_option('ec2', option):
278 | setattr(self, option, config.getboolean('ec2', option))
279 | else:
280 | setattr(self, option, True)
281 |
282 | # Do we need to just include hosts that match a pattern?
283 | try:
284 | pattern_include = config.get('ec2', 'pattern_include')
285 | if pattern_include and len(pattern_include) > 0:
286 | self.pattern_include = re.compile(pattern_include)
287 | else:
288 | self.pattern_include = None
289 | except configparser.NoOptionError as e:
290 | self.pattern_include = None
291 |
292 | # Do we need to exclude hosts that match a pattern?
293 | try:
294 | pattern_exclude = config.get('ec2', 'pattern_exclude');
295 | if pattern_exclude and len(pattern_exclude) > 0:
296 | self.pattern_exclude = re.compile(pattern_exclude)
297 | else:
298 | self.pattern_exclude = None
299 | except configparser.NoOptionError as e:
300 | self.pattern_exclude = None
301 |
302 | # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
303 | self.ec2_instance_filters = defaultdict(list)
304 | if config.has_option('ec2', 'instance_filters'):
305 | for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
306 | instance_filter = instance_filter.strip()
307 | if not instance_filter or '=' not in instance_filter:
308 | continue
309 | filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
310 | if not filter_key:
311 | continue
312 | self.ec2_instance_filters[filter_key].append(filter_value)
313 |
314 | def parse_cli_args(self):
315 | ''' Command line argument processing '''
316 |
317 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
318 | parser.add_argument('--list', action='store_true', default=True,
319 | help='List instances (default: True)')
320 | parser.add_argument('--host', action='store',
321 | help='Get all the variables about a specific instance')
322 | parser.add_argument('--refresh-cache', action='store_true', default=False,
323 | help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
324 | self.args = parser.parse_args()
325 |
326 |
327 | def do_api_calls_update_cache(self):
328 | ''' Do API calls to each region, and save data in cache files '''
329 |
330 | if self.route53_enabled:
331 | self.get_route53_records()
332 |
333 | for region in self.regions:
334 | self.get_instances_by_region(region)
335 | if self.rds_enabled:
336 | self.get_rds_instances_by_region(region)
337 |
338 | self.write_to_cache(self.inventory, self.cache_path_cache)
339 | self.write_to_cache(self.index, self.cache_path_index)
340 |
341 | def connect(self, region):
342 | ''' create connection to api server'''
343 | if self.eucalyptus:
344 | conn = boto.connect_euca(host=self.eucalyptus_host)
345 | conn.APIVersion = '2010-08-31'
346 | else:
347 | conn = ec2.connect_to_region(region)
348 | # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
349 | if conn is None:
350 | self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
351 | return conn
352 |
353 | def get_instances_by_region(self, region):
354 | ''' Makes an AWS EC2 API call to the list of instances in a particular
355 | region '''
356 |
357 | try:
358 | conn = self.connect(region)
359 | reservations = []
360 | if self.ec2_instance_filters:
361 | for filter_key, filter_values in self.ec2_instance_filters.items():
362 | reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
363 | else:
364 | reservations = conn.get_all_instances()
365 |
366 | for reservation in reservations:
367 | for instance in reservation.instances:
368 | self.add_instance(instance, region)
369 |
370 | except boto.exception.BotoServerError as e:
371 | if e.error_code == 'AuthFailure':
372 | error = self.get_auth_error_message()
373 | else:
374 | backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
375 | error = "Error connecting to %s backend.\n%s" % (backend, e.message)
376 | self.fail_with_error(error)
377 |
378 | def get_rds_instances_by_region(self, region):
379 | ''' Makes an AWS API call to the list of RDS instances in a particular
380 | region '''
381 |
382 | try:
383 | conn = rds.connect_to_region(region)
384 | if conn:
385 | instances = conn.get_all_dbinstances()
386 | for instance in instances:
387 | self.add_rds_instance(instance, region)
388 | except boto.exception.BotoServerError as e:
389 | error = e.reason
390 |
391 | if e.error_code == 'AuthFailure':
392 | error = self.get_auth_error_message()
393 | if not e.reason == "Forbidden":
394 | error = "Looks like AWS RDS is down:\n%s" % e.message
395 | self.fail_with_error(error)
396 |
397 | def get_auth_error_message(self):
398 | ''' create an informative error message if there is an issue authenticating'''
399 | errors = ["Authentication error retrieving ec2 inventory."]
400 | if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
401 | errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
402 | else:
403 | errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
404 |
405 | boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
406 | boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
407 | if len(boto_config_found) > 0:
408 | errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
409 | else:
410 | errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
411 |
412 | return '\n'.join(errors)
413 |
414 | def fail_with_error(self, err_msg):
415 | '''log an error to std err for ansible-playbook to consume and exit'''
416 | sys.stderr.write(err_msg)
417 | sys.exit(1)
418 |
419 | def get_instance(self, region, instance_id):
420 | conn = self.connect(region)
421 |
422 | reservations = conn.get_all_instances([instance_id])
423 | for reservation in reservations:
424 | for instance in reservation.instances:
425 | return instance
426 |
427 | def add_instance(self, instance, region):
428 | ''' Adds an instance to the inventory and index, as long as it is
429 | addressable '''
430 |
431 | # Only want running instances unless all_instances is True
432 | if not self.all_instances and instance.state != 'running':
433 | return
434 |
435 | # Select the best destination address
436 | if instance.subnet_id:
437 | dest = getattr(instance, self.vpc_destination_variable, None)
438 | if dest is None:
439 | dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
440 | else:
441 | dest = getattr(instance, self.destination_variable, None)
442 | if dest is None:
443 | dest = getattr(instance, 'tags').get(self.destination_variable, None)
444 |
445 | if not dest:
446 | # Skip instances we cannot address (e.g. private VPC subnet)
447 | return
448 |
449 | # if we only want to include hosts that match a pattern, skip those that don't
450 | if self.pattern_include and not self.pattern_include.match(dest):
451 | return
452 |
453 | # if we need to exclude hosts that match a pattern, skip those
454 | if self.pattern_exclude and self.pattern_exclude.match(dest):
455 | return
456 |
457 | # Add to index
458 | self.index[dest] = [region, instance.id]
459 |
460 | # Inventory: Group by instance ID (always a group of 1)
461 | if self.group_by_instance_id:
462 | self.inventory[instance.id] = [dest]
463 | if self.nested_groups:
464 | self.push_group(self.inventory, 'instances', instance.id)
465 |
466 | # Inventory: Group by region
467 | if self.group_by_region:
468 | self.push(self.inventory, region, dest)
469 | if self.nested_groups:
470 | self.push_group(self.inventory, 'regions', region)
471 |
472 | # Inventory: Group by availability zone
473 | if self.group_by_availability_zone:
474 | self.push(self.inventory, instance.placement, dest)
475 | if self.nested_groups:
476 | if self.group_by_region:
477 | self.push_group(self.inventory, region, instance.placement)
478 | self.push_group(self.inventory, 'zones', instance.placement)
479 |
480 | # Inventory: Group by Amazon Machine Image (AMI) ID
481 | if self.group_by_ami_id:
482 | ami_id = self.to_safe(instance.image_id)
483 | self.push(self.inventory, ami_id, dest)
484 | if self.nested_groups:
485 | self.push_group(self.inventory, 'images', ami_id)
486 |
487 | # Inventory: Group by instance type
488 | if self.group_by_instance_type:
489 | type_name = self.to_safe('type_' + instance.instance_type)
490 | self.push(self.inventory, type_name, dest)
491 | if self.nested_groups:
492 | self.push_group(self.inventory, 'types', type_name)
493 |
494 | # Inventory: Group by key pair
495 | if self.group_by_key_pair and instance.key_name:
496 | key_name = self.to_safe('key_' + instance.key_name)
497 | self.push(self.inventory, key_name, dest)
498 | if self.nested_groups:
499 | self.push_group(self.inventory, 'keys', key_name)
500 |
501 | # Inventory: Group by VPC
502 | if self.group_by_vpc_id and instance.vpc_id:
503 | vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
504 | self.push(self.inventory, vpc_id_name, dest)
505 | if self.nested_groups:
506 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
507 |
508 | # Inventory: Group by security group
509 | if self.group_by_security_group:
510 | try:
511 | for group in instance.groups:
512 | key = self.to_safe("security_group_" + group.name)
513 | self.push(self.inventory, key, dest)
514 | if self.nested_groups:
515 | self.push_group(self.inventory, 'security_groups', key)
516 | except AttributeError:
517 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
518 | 'Please upgrade boto >= 2.3.0.']))
519 |
520 | # Inventory: Group by tag keys
521 | if self.group_by_tag_keys:
522 | for k, v in instance.tags.items():
523 | key = self.to_safe("tag_" + k + "=" + v)
524 | self.push(self.inventory, key, dest)
525 | if self.nested_groups:
526 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
527 | self.push_group(self.inventory, self.to_safe("tag_" + k), key)
528 |
529 | # Inventory: Group by Route53 domain names if enabled
530 | if self.route53_enabled and self.group_by_route53_names:
531 | route53_names = self.get_instance_route53_names(instance)
532 | for name in route53_names:
533 | self.push(self.inventory, name, dest)
534 | if self.nested_groups:
535 | self.push_group(self.inventory, 'route53', name)
536 |
537 | # Global Tag: instances without tags
538 | if self.group_by_tag_none and len(instance.tags) == 0:
539 | self.push(self.inventory, 'tag_none', dest)
540 | if self.nested_groups:
541 | self.push_group(self.inventory, 'tags', 'tag_none')
542 |
543 | # Global Tag: tag all EC2 instances
544 | self.push(self.inventory, 'ec2', dest)
545 |
546 | self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
547 |
548 |
549 | def add_rds_instance(self, instance, region):
550 | ''' Adds an RDS instance to the inventory and index, as long as it is
551 | addressable '''
552 |
553 | # Only want available instances unless all_rds_instances is True
554 | if not self.all_rds_instances and instance.status != 'available':
555 | return
556 |
557 | # Select the best destination address
558 | dest = instance.endpoint[0]
559 |
560 | if not dest:
561 | # Skip instances we cannot address (e.g. private VPC subnet)
562 | return
563 |
564 | # Add to index
565 | self.index[dest] = [region, instance.id]
566 |
567 | # Inventory: Group by instance ID (always a group of 1)
568 | if self.group_by_instance_id:
569 | self.inventory[instance.id] = [dest]
570 | if self.nested_groups:
571 | self.push_group(self.inventory, 'instances', instance.id)
572 |
573 | # Inventory: Group by region
574 | if self.group_by_region:
575 | self.push(self.inventory, region, dest)
576 | if self.nested_groups:
577 | self.push_group(self.inventory, 'regions', region)
578 |
579 | # Inventory: Group by availability zone
580 | if self.group_by_availability_zone:
581 | self.push(self.inventory, instance.availability_zone, dest)
582 | if self.nested_groups:
583 | if self.group_by_region:
584 | self.push_group(self.inventory, region, instance.availability_zone)
585 | self.push_group(self.inventory, 'zones', instance.availability_zone)
586 |
587 | # Inventory: Group by instance type
588 | if self.group_by_instance_type:
589 | type_name = self.to_safe('type_' + instance.instance_class)
590 | self.push(self.inventory, type_name, dest)
591 | if self.nested_groups:
592 | self.push_group(self.inventory, 'types', type_name)
593 |
594 | # Inventory: Group by VPC
595 | if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
596 | vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
597 | self.push(self.inventory, vpc_id_name, dest)
598 | if self.nested_groups:
599 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
600 |
601 | # Inventory: Group by security group
602 | if self.group_by_security_group:
603 | try:
604 | if instance.security_group:
605 | key = self.to_safe("security_group_" + instance.security_group.name)
606 | self.push(self.inventory, key, dest)
607 | if self.nested_groups:
608 | self.push_group(self.inventory, 'security_groups', key)
609 |
610 | except AttributeError:
611 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
612 | 'Please upgrade boto >= 2.3.0.']))
613 |
614 |
615 | # Inventory: Group by engine
616 | if self.group_by_rds_engine:
617 | self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
618 | if self.nested_groups:
619 | self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
620 |
621 | # Inventory: Group by parameter group
622 | if self.group_by_rds_parameter_group:
623 | self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
624 | if self.nested_groups:
625 | self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
626 |
627 | # Global Tag: all RDS instances
628 | self.push(self.inventory, 'rds', dest)
629 |
630 | self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
631 |
632 |
633 | def get_route53_records(self):
634 | ''' Get and store the map of resource records to domain names that
635 | point to them. '''
636 |
637 | r53_conn = route53.Route53Connection()
638 | all_zones = r53_conn.get_zones()
639 |
640 | route53_zones = [ zone for zone in all_zones if zone.name[:-1]
641 | not in self.route53_excluded_zones ]
642 |
643 | self.route53_records = {}
644 |
645 | for zone in route53_zones:
646 | rrsets = r53_conn.get_all_rrsets(zone.id)
647 |
648 | for record_set in rrsets:
649 | record_name = record_set.name
650 |
651 | if record_name.endswith('.'):
652 | record_name = record_name[:-1]
653 |
654 | for resource in record_set.resource_records:
655 | self.route53_records.setdefault(resource, set())
656 | self.route53_records[resource].add(record_name)
657 |
658 |
659 | def get_instance_route53_names(self, instance):
660 | ''' Check if an instance is referenced in the records we have from
661 | Route53. If it is, return the list of domain names pointing to said
662 | instance. If nothing points to it, return an empty list. '''
663 |
664 | instance_attributes = [ 'public_dns_name', 'private_dns_name',
665 | 'ip_address', 'private_ip_address' ]
666 |
667 | name_list = set()
668 |
669 | for attrib in instance_attributes:
670 | try:
671 | value = getattr(instance, attrib)
672 | except AttributeError:
673 | continue
674 |
675 | if value in self.route53_records:
676 | name_list.update(self.route53_records[value])
677 |
678 | return list(name_list)
679 |
680 |
681 | def get_host_info_dict_from_instance(self, instance):
682 | instance_vars = {}
683 | for key in vars(instance):
684 | value = getattr(instance, key)
685 | key = self.to_safe('ec2_' + key)
686 |
687 | # Handle complex types
688 | # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
689 | if key == 'ec2__state':
690 | instance_vars['ec2_state'] = instance.state or ''
691 | instance_vars['ec2_state_code'] = instance.state_code
692 | elif key == 'ec2__previous_state':
693 | instance_vars['ec2_previous_state'] = instance.previous_state or ''
694 | instance_vars['ec2_previous_state_code'] = instance.previous_state_code
695 | elif type(value) in [int, bool]:
696 | instance_vars[key] = value
697 | elif isinstance(value, six.string_types):
698 | instance_vars[key] = value.strip()
699 | elif type(value) == type(None):
700 | instance_vars[key] = ''
701 | elif key == 'ec2_region':
702 | instance_vars[key] = value.name
703 | elif key == 'ec2__placement':
704 | instance_vars['ec2_placement'] = value.zone
705 | elif key == 'ec2_tags':
706 | for k, v in value.items():
707 | key = self.to_safe('ec2_tag_' + k)
708 | instance_vars[key] = v
709 | elif key == 'ec2_groups':
710 | group_ids = []
711 | group_names = []
712 | for group in value:
713 | group_ids.append(group.id)
714 | group_names.append(group.name)
715 | instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
716 | instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
717 | else:
718 | pass
719 | # TODO Product codes if someone finds them useful
720 | #print key
721 | #print type(value)
722 | #print value
723 |
724 | return instance_vars
725 |
726 | def get_host_info(self):
727 | ''' Get variables about a specific host '''
728 |
729 | if len(self.index) == 0:
730 | # Need to load index from cache
731 | self.load_index_from_cache()
732 |
733 | if not self.args.host in self.index:
734 | # try updating the cache
735 | self.do_api_calls_update_cache()
736 | if not self.args.host in self.index:
737 | # host might not exist anymore
738 | return self.json_format_dict({}, True)
739 |
740 | (region, instance_id) = self.index[self.args.host]
741 |
742 | instance = self.get_instance(region, instance_id)
743 | return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
744 |
745 | def push(self, my_dict, key, element):
746 | ''' Push an element onto an array that may not have been defined in
747 | the dict '''
748 | group_info = my_dict.setdefault(key, [])
749 | if isinstance(group_info, dict):
750 | host_list = group_info.setdefault('hosts', [])
751 | host_list.append(element)
752 | else:
753 | group_info.append(element)
754 |
755 | def push_group(self, my_dict, key, element):
756 | ''' Push a group as a child of another group. '''
757 | parent_group = my_dict.setdefault(key, {})
758 | if not isinstance(parent_group, dict):
759 | parent_group = my_dict[key] = {'hosts': parent_group}
760 | child_groups = parent_group.setdefault('children', [])
761 | if element not in child_groups:
762 | child_groups.append(element)
763 |
764 | def get_inventory_from_cache(self):
765 | ''' Reads the inventory from the cache file and returns it as a JSON
766 | object '''
767 |
768 | cache = open(self.cache_path_cache, 'r')
769 | json_inventory = cache.read()
770 | return json_inventory
771 |
772 |
773 | def load_index_from_cache(self):
774 | ''' Reads the index from the cache file sets self.index '''
775 |
776 | cache = open(self.cache_path_index, 'r')
777 | json_index = cache.read()
778 | self.index = json.loads(json_index)
779 |
780 |
781 | def write_to_cache(self, data, filename):
782 | ''' Writes data in JSON format to a file '''
783 |
784 | json_data = self.json_format_dict(data, True)
785 | cache = open(filename, 'w')
786 | cache.write(json_data)
787 | cache.close()
788 |
789 |
790 | def to_safe(self, word):
791 | ''' Converts 'bad' characters in a string to underscores so they can be
792 | used as Ansible groups '''
793 |
794 | return re.sub("[^A-Za-z0-9\_]", "_", word)
795 |
796 |
797 | def json_format_dict(self, data, pretty=False):
798 | ''' Converts a dict to a JSON object and dumps it as a formatted
799 | string '''
800 |
801 | if pretty:
802 | return json.dumps(data, sort_keys=True, indent=2)
803 | else:
804 | return json.dumps(data)
805 |
806 |
807 | # Run the script
808 | Ec2Inventory()
809 |
--------------------------------------------------------------------------------
/images/VPC.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/HighOps/ansible_ec2_vpc_nat_asg/b175fc478a58ba3a1d5529d08a0f69ffc31e7ccf/images/VPC.png
--------------------------------------------------------------------------------
/plays/operation/README.md:
--------------------------------------------------------------------------------
1 | # Operational playbooks
2 |
3 | WARNING: these playbooks should be run with extreme caution, as they can impact live services!
4 |
--------------------------------------------------------------------------------
/plays/operation/bootstrap_vpc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: bootstrap the vpc
3 | hosts: localhost
4 | connection: local
5 | gather_facts: False
6 | tasks:
7 |
8 | - name: check that env was passed in extra-vars
9 | assert:
10 | that: 'env is defined'
11 |
12 | - include_vars: "{{ inventory_dir }}/vars/{{ env }}.yml"
13 |
14 | - name: create the VPC
15 | ec2_vpc_net:
16 | cidr_block: "{{ vpc.cidr_block }}"
17 | name: "{{ env }}"
18 | region: "{{ region }}"
19 | state: present
20 | tags: "{{ vpc.tags }}"
21 | register: ec2_vpc_net_out
22 |
23 | - name: create the VPC subnets
24 | ec2_vpc_subnet:
25 | az: "{{ item.az }}"
26 | cidr: "{{ item.cidr }}"
27 | region: "{{ region }}"
28 | resource_tags: "{{ item.resource_tags }}"
29 | state: present
30 | vpc_id: "{{ ec2_vpc_net_out.vpc.id }}"
31 | with_items: vpc.subnets
32 | register: ec2_vpc_subnet_out
33 |
34 | - name: create the VPC internet gateway
35 | ec2_vpc_igw:
36 | region: "{{ region }}"
37 | state: present
38 | vpc_id: "{{ ec2_vpc_net_out.vpc.id }}"
39 | register: ec2_vpc_igw_out
40 |
41 | - name: create the igw route table
42 | ec2_vpc_route_table:
43 | region: "{{ region }}"
44 | resource_tags: "{{ item.resource_tags }}"
45 | routes: "{{ item.routes }}"
46 | subnets: "{{ item.subnets }}"
47 | vpc_id: "{{ ec2_vpc_net_out.vpc.id }}"
48 | with_items: vpc.route_tables.igw
49 | register: ec2_vpc_route_table_igw_out
50 |
51 | - name: create the private route tables
52 | ec2_vpc_route_table:
53 | region: "{{ region }}"
54 | resource_tags: "{{ item.resource_tags }}"
55 | routes: "{{ item.routes }}"
56 | subnets: "{{ item.subnets }}"
57 | vpc_id: "{{ ec2_vpc_net_out.vpc.id }}"
58 | with_items: vpc.route_tables.private
59 | register: ec2_vpc_route_table_private_out
60 | ignore_errors: yes
61 |
62 | - name: process security groups
63 | ec2_group:
64 | description: "{{ item.description }}"
65 | name: "{{ item.name }}"
66 | region: "{{ region }}"
67 | rules: "{{ item.rules }}"
68 | rules_egress: "{{ item.rules_egress }}"
69 | state: present
70 | vpc_id: "{{ ec2_vpc_net_out.vpc.id }}"
71 | with_items: sg_list
72 | register: ec2_group_out
73 |
74 | - name: get a list of public subnet-id,route-id maps
75 | set_fact:
76 | subnet_route_map: "{{ ec2_vpc_subnet_out.results | get_subnet_route_map(ec2_vpc_route_table_private_out.results) }}"
77 | - name: merge the eip allocated list with the subnet-id,route-id map list
78 | set_fact:
79 | subnet_route_map: "{{ nat_eipalloc_list | get_zip(subnet_route_map) }}"
80 |
81 | - name: create the nat auto scaling group launch configuration
82 | ec2_lc:
83 | region: "{{ region }}"
84 | name: "{{ nat_asg_lc.name }}"
85 | image_id: "{{ nat_asg_lc.image_id }}"
86 | security_groups: "{{ ec2_group_out.results | get_security_groups('name', nat_asg_lc.security_group) }}"
87 | instance_type: "{{ nat_asg_lc.instance_type }}"
88 | user_data: |
89 | {{ lookup('template', './bootstrap_vpc_user_data.j2') }}
90 | key_name: "{{ nat_asg_lc.key_name }}"
91 | instance_profile_name: "{{ nat_asg_lc.instance_profile_name }}"
92 | assign_public_ip: yes
93 | register: ec2_lc_out
94 | when: nat_asg_lc is defined
95 |
96 | - debug: var=ec2_lc_out
97 |
98 | - name: launch the nat auto scaling group
99 | ec2_asg:
100 | region: "{{ region }}"
101 | name: "{{ nat_asg.name }}"
102 | launch_config_name: "{{ nat_asg.launch_config_name }}"
103 | replace_all_instances: "{{ nat_asg.replace_all_instances }}"
104 | desired_capacity: "{{ nat_asg.desired_capacity }}"
105 | min_size: "{{ nat_asg.min_size }}"
106 | max_size: "{{ nat_asg.max_size }}"
107 | vpc_zone_identifier: "{{ ec2_vpc_subnet_out.results | get_subnets('Type', 'public') }}"
108 | availability_zones: "{{ nat_asg.availability_zones }}"
109 | wait_for_instances: yes
110 | tags: "{{ nat_asg.tags }}"
111 | register: ec2_asg_out
112 | when: nat_asg is defined and ec2_lc_out is defined
113 |
114 | - name: launch a bastion box
115 | ec2:
116 | region: "{{ region }}"
117 | key_name: "{{ key_name }}"
118 | instance_type: "{{ bastion_instance_type }}"
119 | image: "{{ bastion_ami }}"
120 | wait: yes
121 | group: "{{ env + '_bastion' }}"
122 | instance_tags:
123 | Name: "{{ env + '_bastion' }}"
124 | Environment: "{{ env }}"
125 | exact_count: 1
126 | count_tag:
127 | Name: "{{ env + '_bastion' }}"
128 | Environment: "{{ env }}"
129 | vpc_subnet_id: "{{ ec2_vpc_subnet_out.results | get_subnets('Type', 'public') | first }}"
130 | assign_public_ip: yes
131 |
--------------------------------------------------------------------------------
/plays/operation/bootstrap_vpc_user_data.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | cat > /root/nat_configs.txt < /proc/sys/net/ipv4/ip_forward
13 |
14 | curl -sL https://raw.githubusercontent.com/HighOps/ec2-nat-failover/master/nat_monitor.py > /root/nat_monitor.py
15 | python -u /root/nat_monitor.py < /root/nat_configs.txt | logger -t nat_monitor
16 |
17 |
--------------------------------------------------------------------------------
/vars/prod.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Edit the following variables to suit
4 |
5 | env: prod
6 |
7 | region: us-east-1
8 | az_1: "us-east-1d"
9 | az_2: "us-east-1e"
10 |
11 | key_name: "prod_key"
12 |
13 | nat_ami: ami-303b1458
14 | nat_eipalloc_list:
15 | - eipalloc-e35b0486
16 | - eipalloc-e05b0485
17 | nat_instance_type: t2.small
18 |
19 | bastion_ami: ami-1ecae776
20 | bastion_instance_type: t2.micro
21 |
22 | ###############################################################################
23 | # Be careful editing anything below
24 |
25 | vpc:
26 | name: "{{ env }}"
27 | cidr_block: 10.30.0.0/16
28 | tags:
29 | Environment: "{{ env }}"
30 | subnets:
31 | - cidr: 10.30.0.0/23
32 | az: "{{ az_1 }}"
33 | resource_tags:
34 | Name: "{{ env + '_public_0' }}"
35 | Environment: "{{ env }}"
36 | Type: "public"
37 | - cidr: 10.30.2.0/23
38 | az: "{{ az_2 }}"
39 | resource_tags:
40 | Name: "{{ env + '_public_1' }}"
41 | Environment: "{{ env }}"
42 | Type: "public"
43 | - cidr: 10.30.100.0/23
44 | az: "{{ az_1 }}"
45 | resource_tags:
46 | Name: "{{ env + '_private_0' }}"
47 | Environment: "{{ env }}"
48 | Type: "private"
49 | - cidr: 10.30.102.0/23
50 | az: "{{ az_2 }}"
51 | resource_tags:
52 | Name: "{{ env + '_private_1' }}"
53 | Environment: "{{ env }}"
54 | Type: "private"
55 | route_tables:
56 | igw:
57 | - resource_tags:
58 | Name: "{{ env + '_public' }}"
59 | Environment: "{{ env }}"
60 | subnets:
61 | - "10.30.0.0/23"
62 | - "10.30.2.0/23"
63 | routes:
64 | - dest: 0.0.0.0/0
65 | gateway_id: igw
66 | private:
67 | - resource_tags:
68 | Name: "{{ env + '_private_0' }}"
69 | Environment: "{{ env }}"
70 | subnets:
71 | - "{{ env + '_private_0' }}"
72 | routes:
73 | - dest: 0.0.0.0/0
74 | gateway_id: igw
75 | - resource_tags:
76 | Name: "{{ env + '_private_1' }}"
77 | Environment: "{{ env }}"
78 | subnets:
79 | - "{{ env + '_private_1' }}"
80 | routes:
81 | - dest: 0.0.0.0/0
82 | gateway_id: igw
83 |
84 | sg_list:
85 | - name: "{{ env + '_nat' }}"
86 | description: allow outbound nat
87 | rules:
88 | - proto: all
89 | cidr_ip: 10.30.0.0/16
90 | rules_egress:
91 | - proto: all
92 | cidr_ip: 0.0.0.0/0
93 | - name: "{{ env + '_bastion' }}"
94 | description: access bastion, allow outbound nat
95 | rules:
96 | - proto: tcp
97 | from_port: 22
98 | to_port: 22
99 | cidr_ip: 0.0.0.0/0
100 | - proto: icmp
101 | from_port: -1
102 | to_port: -1
103 | cidr_ip: 0.0.0.0/0
104 | rules_egress:
105 | - proto: all
106 | cidr_ip: 0.0.0.0/0
107 |
108 | nat_asg_lc:
109 | name: "{{ env + '_nat_asg_lc' }}"
110 | image_id: "{{ nat_ami }}"
111 | security_group: "{{ env + '_nat' }}"
112 | instance_type: "{{ nat_instance_type }}"
113 | key_name: "{{ key_name }}"
114 | instance_profile_name: "{{ env + 'NATMonitor' }}"
115 |
116 | nat_asg:
117 | name: "{{ env + '_nat_asg' }}"
118 | launch_config_name: "{{ env + '_nat_asg_lc' }}"
119 | replace_all_instances: yes
120 | desired_capacity: 2
121 | min_size: 2
122 | max_size: 2
123 | availability_zones:
124 | - "{{ az_1 }}"
125 | - "{{ az_2 }}"
126 | tags:
127 | - Name: "{{ env + '_nat' }}"
128 | - Environment: "{{ env }}"
129 |
130 |
--------------------------------------------------------------------------------