├── .gitattributes
├── ansiblecontrolhostvm
├── .gitignore
├── README.md
├── Vagrantfile
└── bootstrap.sh
├── apache-spark-standalone
├── .gitignore
├── README.md
├── Vagrantfile
├── bootstrap.sh
└── simpleapp.py
├── aws-full-stack
├── .gitignore
├── README.md
├── ansible.cfg
├── build.yml
├── deploy.yml
├── example_vars.yml
├── files
│ ├── index.j2
│ ├── info.php
│ └── setup_db.sql
├── inventory
│ ├── ec2.ini
│ └── ec2.py
└── setup.sh
├── mysql-db
├── .gitignore
├── README.md
├── VagrantFile
└── bootstrap.sh
├── simple-web-server-nginx
├── .gitignore
├── Vagrantfile
├── bootstrap.sh
├── nginx_conf
└── www
│ └── index.html
└── wordpress_dev
├── .gitignore
├── README.md
├── Vagrantfile
├── bootstrap.sh
└── src
└── theme
├── index.php
└── style.css
/.gitattributes:
--------------------------------------------------------------------------------
1 | # Auto detect text files and perform LF normalization
2 | * text=auto
3 |
4 | # Custom for Visual Studio
5 | *.cs diff=csharp
6 |
7 | # Standard to msysgit
8 | *.doc diff=astextplain
9 | *.DOC diff=astextplain
10 | *.docx diff=astextplain
11 | *.DOCX diff=astextplain
12 | *.dot diff=astextplain
13 | *.DOT diff=astextplain
14 | *.pdf diff=astextplain
15 | *.PDF diff=astextplain
16 | *.rtf diff=astextplain
17 | *.RTF diff=astextplain
18 |
--------------------------------------------------------------------------------
/ansiblecontrolhostvm/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
--------------------------------------------------------------------------------
/ansiblecontrolhostvm/README.md:
--------------------------------------------------------------------------------
1 | Simple Vagrant config to set up a Ubuntu VM to act as an Ansible host/control machine.
2 | Useful if you are running under Windows.
--------------------------------------------------------------------------------
/ansiblecontrolhostvm/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 |
6 | config.vm.box = "ubuntu/trusty32"
7 | config.vm.hostname = "ansiblecontrolhost"
8 |
9 | config.vm.provider "virtualbox" do |vb|
10 | vb.memory = "512"
11 | end
12 |
13 | config.vm.provision "shell", path: "bootstrap.sh"
14 |
15 | end
16 |
--------------------------------------------------------------------------------
/ansiblecontrolhostvm/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # add ansible repo
4 | apt-get install software-properties-common
5 | apt-add-repository ppa:ansible/ansible
6 |
7 | # update package lists
8 | apt-get update
9 |
10 | # install ansible + source ctrl tools
11 | apt-get -y install ansible git subversion
12 |
13 | echo "versions installed:"
14 | ansible --version | head -1
15 | git --version | head -1
16 | svn --version | head -1
17 |
18 | echo "Done."
--------------------------------------------------------------------------------
/apache-spark-standalone/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
--------------------------------------------------------------------------------
/apache-spark-standalone/README.md:
--------------------------------------------------------------------------------
1 | # Apache Spark Standalone
2 | This vagrant script will set up Apache Spark in standalone mode. Useful for experimentation or "little data" processing.
3 |
4 | Execute
5 | ```
6 | vagrant up
7 | ```
8 |
9 | to start the VM, ```vagrant ssh``` into it and then try:
10 |
11 | ```
12 | cd ~/spark-1.5.0-bin-hadoop2.6
13 | ./bin/pyspark
14 | testFile = sc.textFile("README.md")
15 | testFile.count()
16 | ```
17 |
18 | Press ```CTRL+D``` to exit the pyspark shell
19 |
20 | or
21 |
22 | ```
23 | cd ~/spark-1.5.0-bin-hadoop2.6
24 | ./bin/spark-submit --master local /vagrant/simpleapp.py
25 | ```
26 |
27 |
28 |
--------------------------------------------------------------------------------
/apache-spark-standalone/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 |
6 | config.vm.box = "ubuntu/trusty64"
7 | config.vm.hostname = "sparkstandalone"
8 |
9 | config.vm.network :forwarded_port, guest: 4040, host: 4040
10 |
11 | config.vm.provider "virtualbox" do |vb|
12 | vb.memory = "2048"
13 | end
14 |
15 | config.vm.provision "shell", path: "bootstrap.sh"
16 |
17 | end
18 |
--------------------------------------------------------------------------------
/apache-spark-standalone/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | apt-get purge openjdk*
4 | add-apt-repository ppa:webupd8team/java
5 | apt-get update
6 | apt-get -y install software-properties-common
7 | echo oracle-java8-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
8 | echo oracle-java7-installer shared/accepted-oracle-license-v1-1 select true | sudo /usr/bin/debconf-set-selections
9 | apt-get -y install oracle-java8-installer
10 |
11 | cd /tmp
12 | wget http://d3kbcqa49mib13.cloudfront.net/spark-1.5.0-bin-hadoop2.6.tgz
13 | cd /home/vagrant
14 | tar -zxf /tmp/spark-1.5.0-bin-hadoop2.6.tgz
15 |
16 | echo "Done. Try: "
17 | echo " vagrant ssh"
18 | echo " cd spark-1.5.0-bin-hadoop2.6"
19 | echo " ./bin/pyspark"
20 | echo ' testFile = sc.textFile("README.md")'
21 | echo ' testFile.count()'
22 | echo " CTRL+D to exit"
--------------------------------------------------------------------------------
/apache-spark-standalone/simpleapp.py:
--------------------------------------------------------------------------------
1 | """simpleapp.py"""
2 | from pyspark import SparkContext
3 |
4 | logFile = "/home/vagrant/spark-1.5.0-bin-hadoop2.6/README.md"
5 | sc = SparkContext(appName="Simple App")
6 | logData = sc.textFile(logFile).cache()
7 |
8 | numAs = logData.filter(lambda s: 'a' in s).count()
9 | numBs = logData.filter(lambda s: 'b' in s).count()
10 |
11 | print("Lines with a: %i, lines with b: %i" % (numAs, numBs))
--------------------------------------------------------------------------------
/aws-full-stack/.gitignore:
--------------------------------------------------------------------------------
1 | env.sh
2 | *.pem
3 |
--------------------------------------------------------------------------------
/aws-full-stack/README.md:
--------------------------------------------------------------------------------
1 | Example Ansible scripts for building a full AWS stack
2 | =============================================
3 | These scripts build a multi-AZ set of servers including a VPC, load balancer and RDS database and deploy a simple PHP example application. Can be used as a base for automating your own deployments.
4 |
5 | Basic usage
6 | ===========
7 | 1. Do initial set up (see below)
8 | 2. Create a environment specific .yml file e.g. example_vars.yml (you can create as many environments as you want/need, see below for more detail)
9 | 3. Build the AWS infrastructure using build playbook, passing in name of environment:
10 |
11 | ```
12 | ansible-playbook -i inventory/ec2.py build.yml -e "env=example"
13 | ```
14 | 4. Deploy the app to the infrastructure using deploy playbook:
15 |
16 | ```
17 | ansible-playbook -i inventory/ec2.py deploy.yml -e "env=example" --private-key keys/example_key.pem
18 | ```
19 | Note you may need to wait 1 min for the inventory cache to clear or you can run ```python inventory/ec2.py --refresh-cache``` first
20 | 5. Use the AWS console to get the DNS name of the load balancer and open it your browser
21 |
22 |
23 | Initial set up
24 | ==============
25 | 1. Download source to a folder
26 | 2. Run setup script to ensure all required libraries installed:
27 |
28 | ```
29 | ./setup.sh
30 | ```
31 | 3. Create IAM user using AWS console. Give them "Power User" role and download AWS access key and access secret.
32 | 4. Create env.sh containing access key, access secret and your region:
33 |
34 | ```
35 | export AWS_ACCESS_KEY_ID=Axxxxxxxxxxxxxxxxxxxxxxxxxx
36 | export AWS_SECRET_ACCESS_KEY=0xxxxxxxxxxxxxxxxxxxxxxxxxxxxx
37 | export AWS_REGION=ap-southeast-2
38 | ```
39 | 5. Run env.sh to set environment variables (NOTE: YOU WILL NEED TO DO THIS ONCE EVERY SESSION):
40 |
41 | ```
42 | source env.sh
43 | ```
44 | 6. Test connectivity + set up (thsi can take a moment and should return some JSON if everything worked)
45 |
46 | ```
47 | python inventory/ec2.py
48 | ```
49 |
50 | Environment .yml file
51 | =====================
52 | The environment .yml file (xxxx_vars.yml) holds the specific varibles for the environment you want to build:
53 |
54 | ```
55 | ---
56 | control_ip: 175.45.102.163/32
57 |
58 | region: ap-southeast-2
59 | zone_1: ap-southeast-2a
60 | zone_2: ap-southeast-2b
61 |
62 |
63 | server:
64 | instance_type: t2.micro
65 | zone_1_count: 1
66 | zone_2_count: 1
67 |
68 |
69 | db:
70 | instance_type: db.t2.micro
71 | size: 5
72 | name: rockbands
73 | user: rockbands
74 | password: password123
75 | ```
76 |
77 | * control-ip - The IP address(es) that are allowed to SSH to the servers. You must set this to suit where you are running ansible from
78 | * region - The region you wish to deploy to
79 | * zone_1 - The first availability zone to deploy to
80 | * zone_2 - The second availability zone to deploy to
81 | * server.instance_type - The size of the servers to deploy
82 | * zone_1_count - The number of servers to deploy to zone 1. If you edit this number and rerun build.yml, ansible will increase or decrease the number of servers in this zone. Can be set to 0.
83 | * zone_2_count - The number of servers to deploy to zone 2. If you edit this number and rerun build.yml, ansible will increase or decrease the number of servers in this zone. Can be set to 0.
84 | * db.instance_type - The size of the DB server to deploy
85 | * db.size - The size of the database (5GB minimum)
86 | * db.name - The name of the database
87 | * db.user - The user for the database
88 | * db.password - The password for the database
89 |
90 |
91 | TODOs
92 | =====
93 | * Improve the handlers in deploy playbook so things are not restarted AFTER instance is added back into load balancer
94 | * Figure out how to give each server a unique sequence number in it's name
95 | * Add tags to ELB (not supported by Ansible yet)
96 |
--------------------------------------------------------------------------------
/aws-full-stack/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | control_path = %(directory)s/%%h-%%r
3 | host_key_checking = False
4 |
--------------------------------------------------------------------------------
/aws-full-stack/build.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create AWS infrastructure
3 | hosts: localhost
4 | vars_files:
5 | - "{{ env }}_vars.yml"
6 |
7 | tasks:
8 | - name: Create the VPC
9 | ec2_vpc:
10 | region: "{{ region }}"
11 | internet_gateway: True
12 | resource_tags: { Name: "{{ env}}-vpc", env: "{{ env }}" }
13 | cidr_block: 10.0.0.0/16
14 | subnets:
15 | - cidr: 10.0.0.0/24
16 | az: "{{ zone_1 }}"
17 | resource_tags: {env: "{{ env }}", tier: web, az: 1, Name: "{{env}}_sn_web_1" }
18 | - cidr: 10.0.1.0/24
19 | az: "{{ zone_2 }}"
20 | resource_tags: {env: "{{ env }}", tier: web, az: 2, Name: "{{env}}_sn_web_2" }
21 | - cidr: 10.0.5.0/24
22 | az: "{{ zone_1 }}"
23 | resource_tags: {env: "{{ env }}", tier: db, az: 1, Name: "{{env}}_sn_db_1" }
24 | - cidr: 10.0.6.0/24
25 | az: "{{ zone_2 }}"
26 | resource_tags: {env: "{{ env }}", tier: db, az: 2, Name: "{{env}}_sn_db_2" }
27 | route_tables:
28 | - subnets:
29 | - 10.0.0.0/24
30 | - 10.0.1.0/24
31 | routes:
32 | - dest: 0.0.0.0/0
33 | gw: igw
34 | register: vpc
35 |
36 | - set_fact: vpc_id="{{ vpc.vpc_id }}"
37 | - set_fact: web_tier_subnet_1="{{ vpc.subnets[0].id}}"
38 | - set_fact: web_tier_subnet_2="{{ vpc.subnets[1].id}}"
39 | - set_fact: db_tier_subnet_1="{{ vpc.subnets[2].id}}"
40 | - set_fact: db_tier_subnet_2="{{ vpc.subnets[3].id}}"
41 |
42 | - name: Create key pair
43 | ec2_key: name="{{ env }}_key" region="{{ region }}"
44 | register: keypair
45 |
46 | - name: Write the key to a file
47 | copy:
48 | dest: "keys/{{ env }}_key.pem"
49 | content: "{{ keypair.key.private_key }}"
50 | mode: 0600
51 | when: keypair.changed
52 |
53 | - name: Create Web security group
54 | ec2_group:
55 | name: "{{ env }}-sg-web"
56 | region: "{{ region }}"
57 | description: allow web access from everywhere
58 | vpc_id: "{{ vpc_id }}"
59 | rules:
60 | - proto: tcp
61 | from_port: 80
62 | to_port: 80
63 | cidr_ip: 0.0.0.0/0
64 | register: web_sec_group
65 |
66 | - name: Create Internal web security group
67 | ec2_group:
68 | name: "{{ env }}-vpc-web-internal"
69 | region: "{{ region }}"
70 | description: allow web access from internal network only
71 | vpc_id: "{{ vpc_id }}"
72 | rules:
73 | - proto: tcp
74 | from_port: 80
75 | to_port: 80
76 | cidr_ip: 10.0.0.0/24
77 | - proto: tcp
78 | from_port: 80
79 | to_port: 80
80 | cidr_ip: 10.0.1.0/24
81 |
82 | - name: Create DB security group
83 | ec2_group:
84 | name: "{{ env }}-sg-db"
85 | region: "{{ region }}"
86 | description: allow access to db from web tier
87 | vpc_id: "{{ vpc_id }}"
88 | rules:
89 | - proto: tcp
90 | from_port: 3306
91 | to_port: 3306
92 | cidr_ip: 10.0.0.0/24
93 | - proto: tcp
94 | from_port: 3306
95 | to_port: 3306
96 | cidr_ip: 10.0.1.0/24
97 | register: db_sec_group
98 |
99 |
100 | - name: Create SSH security group
101 | ec2_group:
102 | name: "{{ env }}-vpc-ssh"
103 | region: "{{ region }}"
104 | description: allow ssh access
105 | vpc_id: "{{ vpc_id }}"
106 | rules:
107 | - proto: tcp
108 | from_port: 22
109 | to_port: 22
110 | cidr_ip: "{{control_ip}}"
111 |
112 | - name: Create outbound security group
113 | ec2_group:
114 | name: "{{ env }}-vpc-outbound"
115 | description: allow outbound connections to the internet
116 | region: "{{ region }}"
117 | vpc_id: "{{ vpc_id }}"
118 | rules_egress:
119 | - proto: all
120 | cidr_ip: 0.0.0.0/0
121 |
122 | - name: Get the ubuntu trusty AMI
123 | ec2_ami_search: distro=ubuntu release=trusty virt=hvm region={{ region }}
124 | register: ubuntu_image
125 |
126 | - name: Start the zone 1 server instances
127 | ec2:
128 | image: "{{ ubuntu_image.ami }}"
129 | region: "{{ region }}"
130 | instance_type: "{{ server.instance_type }}"
131 | assign_public_ip: True
132 | key_name: "{{ env }}_key"
133 | group: ["{{ env}}-vpc-web-internal", "{{ env}}-vpc-ssh", "{{ env}}-vpc-outbound"]
134 | instance_tags: { Name: "{{ env }}-z1-web", type: webserver, env: "{{ env }}", az: 1 }
135 | exact_count: "{{ server.zone_1_count }}"
136 | count_tag: { type: webserver, env: "{{ env }}", az: 1 }
137 | vpc_subnet_id: "{{ web_tier_subnet_1 }}"
138 | zone: "{{ zone_1 }}"
139 | wait: yes
140 | register: ec2
141 |
142 | - name: Wait for zone 1 servers to be reachable via SSH
143 | wait_for: host={{ item.public_dns_name }} port=22 search_regex=OpenSSH
144 | with_items: ec2.tagged_instances
145 | when: item.public_dns_name is defined
146 |
147 | - name: Start the zone 2 server instances
148 | ec2:
149 | image: "{{ ubuntu_image.ami }}"
150 | region: "{{ region }}"
151 | instance_type: "{{ server.instance_type }}"
152 | assign_public_ip: True
153 | key_name: "{{ env }}_key"
154 | group: ["{{ env}}-vpc-web-internal", "{{ env}}-vpc-ssh", "{{ env}}-vpc-outbound"]
155 | instance_tags: { Name: "{{ env }}-z2-web", type: webserver, env: "{{ env }}", az: 2 }
156 | exact_count: "{{ server.zone_2_count }}"
157 | count_tag: { type: webserver, env: "{{ env }}", az: 2 }
158 | vpc_subnet_id: "{{ web_tier_subnet_2 }}"
159 | zone: "{{ zone_2 }}"
160 | wait: yes
161 | register: ec2
162 |
163 | - name: Wait for zone 2 servers to be reachable via SSH
164 | wait_for: host={{ item.public_dns_name }} port=22 search_regex=OpenSSH
165 | with_items: ec2.tagged_instances
166 | when: item.public_dns_name is defined
167 |
168 | - name: Set up load balancer
169 | ec2_elb_lb:
170 | name: "{{ env }}-lb"
171 | region: "{{ region }}"
172 | state: present
173 | subnets:
174 | - "{{ web_tier_subnet_1 }}"
175 | - "{{ web_tier_subnet_2 }}"
176 | security_group_ids: ["{{ web_sec_group.group_id }}"]
177 | listeners:
178 | - protocol: http
179 | load_balancer_port: 80
180 | instance_port: 80
181 | # cannot tag ELB via ec2_elb_lb module yet
182 | #tags:
183 | # - env: "{{ env }}"
184 |
185 | - name: Create RDS subnet
186 | rds_subnet_group:
187 | name: "{{ env }}-db-sng"
188 | region: "{{ region }}"
189 | state: present
190 | description: "RDS subnet group for {{ env }}"
191 | subnets:
192 | - "{{ db_tier_subnet_1 }}"
193 | - "{{ db_tier_subnet_2 }}"
194 |
195 | - name: Create RDS database (this can take a while)
196 | rds:
197 | command: create
198 | instance_name: "{{ env }}-db"
199 | db_engine: MySQL
200 | size: "{{ db.size }}"
201 | instance_type: "{{ db.instance_type }}"
202 | multi_zone: yes
203 | username: "{{ db.user }}"
204 | password: "{{ db.password }}"
205 | tags: { Name: "{{ env }}-db", type: db, env: "{{ env }}" }
206 | subnet: "{{ env }}-db-sng"
207 | vpc_security_groups: ["{{ db_sec_group.group_id }}"]
208 | wait: yes
209 | wait_timeout: 1200
210 |
--------------------------------------------------------------------------------
/aws-full-stack/deploy.yml:
--------------------------------------------------------------------------------
1 |
2 | - name: Configure Web servers
3 | hosts: tag_type_webserver:&tag_env_{{ env }}
4 | user: ubuntu
5 | sudo: True
6 | serial: "25%"
7 | vars_files:
8 | - "{{ env }}_vars.yml"
9 |
10 | tasks:
11 | - name: Get DB end point
12 | set_fact: db_host_name="{{ groups[env+"-db"][0] }}"
13 |
14 | - name: Check that req libraries are loaded for DB commands
15 | apt: pkg={{item}} state=installed
16 | with_items:
17 | - python-mysqldb
18 | - mysql-client
19 |
20 | - name: Ensure that DB has been created (only runs once)
21 | mysql_db:
22 | name: "{{ db.name }}"
23 | state: present
24 | login_host: "{{ db_host_name }}"
25 | login_user: "{{ db.user }}"
26 | login_password: "{{db.password }}"
27 | run_once: true
28 | notify:
29 | - Copy db script
30 | - Create database
31 |
32 | - name: Now processing instance
33 | debug: msg="{{ ec2_id }}"
34 |
35 | - name: Pull instance from elb
36 | local_action:
37 | module: ec2_elb
38 | region: "{{ region }}"
39 | ec2_elbs:
40 | - "{{ env }}-lb"
41 | instance_id: "{{ ec2_id }}"
42 | state: absent
43 | sudo: false
44 |
45 | - name: Install Apache+PHP
46 | apt: pkg={{item}} state=installed
47 | with_items:
48 | - apache2
49 | - php5
50 | - libapache2-mod-php5
51 | - php5-mysqlnd
52 | notify:
53 | - Restart apache
54 |
55 | - name: Enable PHP
56 | apache2_module: name=php5 state=present
57 |
58 | - name: Delete old index.html
59 | file: path=/var/www/html/index.html state=absent
60 |
61 | - name: Create index.php (with DB connection info burned in)
62 | template: src=files/index.j2 dest=/var/www/html/index.php
63 |
64 | - name: Copy info.php
65 | copy: src=files/info.php dest=/var/www/html/info.php
66 |
67 | - name: Check that Apache is running
68 | service: name=apache2 state=started
69 |
70 | - name: Add instance back into elb
71 | local_action:
72 | module: ec2_elb
73 | region: "{{ region }}"
74 | ec2_elbs: "{{ ec2_elbs }}"
75 | instance_id: "{{ ec2_id }}"
76 | state: present
77 | sudo: false
78 |
79 | handlers:
80 | - name: Copy db script
81 | copy: src=files/setup_db.sql dest=/tmp/setup_db.sql
82 |
83 | - name: Create database
84 | mysql_db:
85 | name: "{{ db.name }}"
86 | state: import
87 | login_host: "{{ db_host_name }}"
88 | login_user: "{{ db.user }}"
89 | login_password: "{{db.password }}"
90 | target: /tmp/setup_db.sql
91 |
92 | - name: Restart apache
93 | service: name=apache2 state=restarted
94 |
--------------------------------------------------------------------------------
/aws-full-stack/example_vars.yml:
--------------------------------------------------------------------------------
1 | ---
2 | control_ip: 175.45.102.163/32
3 |
4 | region: ap-southeast-2
5 | zone_1: ap-southeast-2a
6 | zone_2: ap-southeast-2b
7 |
8 |
9 | server:
10 | instance_type: t2.micro
11 | zone_1_count: 1
12 | zone_2_count: 1
13 |
14 |
15 | db:
16 | instance_type: db.t2.micro
17 | size: 5
18 | name: rockbands
19 | user: rockbands
20 | password: password123
21 |
--------------------------------------------------------------------------------
/aws-full-stack/files/index.j2:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 | 80's band
5 |
6 |
7 | And now for some 80's bands
8 |
9 |
10 | connect_error){
19 | die("Unable to connect to DB: " . $conn->connect_error);
20 | }
21 |
22 | $sql = "SELECT * from names";
23 | $result = $conn->query($sql);
24 |
25 | if ($result->num_rows > 0){
26 | while ($row = $result->fetch_assoc()){
27 | echo $row["name"] . "
";
28 | }
29 | } else {
30 | echo "No names in database!";
31 | }
32 |
33 | ?>
34 |
35 | Served by = gethostname() ?>
36 |
37 |
38 |
--------------------------------------------------------------------------------
/aws-full-stack/files/info.php:
--------------------------------------------------------------------------------
1 |
4 |
5 |
--------------------------------------------------------------------------------
/aws-full-stack/files/setup_db.sql:
--------------------------------------------------------------------------------
1 | CREATE TABLE names(
2 | id INT(6) UNSIGNED AUTO_INCREMENT PRIMARY KEY,
3 | name VARCHAR(100) NOT NULL
4 | );
5 |
6 | INSERT INTO names(name) values('Bon Jovi');
7 | INSERT INTO names(name) values('Queen');
8 | INSERT INTO names(name) values('Duran Duran');
9 | INSERT INTO names(name) values('U2');
10 |
11 |
--------------------------------------------------------------------------------
/aws-full-stack/inventory/ec2.ini:
--------------------------------------------------------------------------------
1 | # Ansible EC2 external inventory script settings
2 | #
3 |
4 | [ec2]
5 |
6 | # to talk to a private eucalyptus instance uncomment these lines
7 | # and edit edit eucalyptus_host to be the host name of your cloud controller
8 | #eucalyptus = True
9 | #eucalyptus_host = clc.cloud.domain.org
10 |
11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions
12 | # in AWS and merge the results together. Alternatively, set this to a comma
13 | # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2'
14 | regions = all
15 | regions_exclude = us-gov-west-1,cn-north-1
16 |
17 | # When generating inventory, Ansible needs to know how to address a server.
18 | # Each EC2 instance has a lot of variables associated with it. Here is the list:
19 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance
20 | # Below are 2 variables that are used as the address of a server:
21 | # - destination_variable
22 | # - vpc_destination_variable
23 |
24 | # This is the normal destination variable to use. If you are running Ansible
25 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are
26 | # running Ansible from within EC2, then perhaps you want to use the internal
27 | # address, and should set this to 'private_dns_name'. The key of an EC2 tag
28 | # may optionally be used; however the boto instance variables hold precedence
29 | # in the event of a collision.
30 | destination_variable = public_dns_name
31 |
32 | # For server inside a VPC, using DNS names may not make sense. When an instance
33 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting
34 | # this to 'ip_address' will return the public IP address. For instances in a
35 | # private subnet, this should be set to 'private_ip_address', and Ansible must
36 | # be run from within EC2. The key of an EC2 tag may optionally be used; however
37 | # the boto instance variables hold precedence in the event of a collision.
38 | # WARNING: - instances that are in the private vpc, _without_ public ip address
39 | # will not be listed in the inventory until You set:
40 | # vpc_destination_variable = 'private_ip_address'
41 | vpc_destination_variable = ip_address
42 |
43 | # To tag instances on EC2 with the resource records that point to them from
44 | # Route53, uncomment and set 'route53' to True.
45 | route53 = False
46 |
47 | # To exclude RDS instances from the inventory, uncomment and set to False.
48 | #rds = False
49 |
50 | # To exclude ElastiCache instances from the inventory, uncomment and set to False.
51 | #elasticache = False
52 |
53 | # Additionally, you can specify the list of zones to exclude looking up in
54 | # 'route53_excluded_zones' as a comma-separated list.
55 | # route53_excluded_zones = samplezone1.com, samplezone2.com
56 |
57 | # By default, only EC2 instances in the 'running' state are returned. Set
58 | # 'all_instances' to True to return all instances regardless of state.
59 | all_instances = False
60 |
61 | # By default, only RDS instances in the 'available' state are returned. Set
62 | # 'all_rds_instances' to True return all RDS instances regardless of state.
63 | all_rds_instances = False
64 |
65 | # By default, only ElastiCache clusters and nodes in the 'available' state
66 | # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes'
67 | # to True return all ElastiCache clusters and nodes, regardless of state.
68 | #
69 | # Note that all_elasticache_nodes only applies to listed clusters. That means
70 | # if you set all_elastic_clusters to false, no node will be return from
71 | # unavailable clusters, regardless of the state and to what you set for
72 | # all_elasticache_nodes.
73 | all_elasticache_replication_groups = False
74 | all_elasticache_clusters = False
75 | all_elasticache_nodes = False
76 |
77 | # API calls to EC2 are slow. For this reason, we cache the results of an API
78 | # call. Set this to the path you want cache files to be written to. Two files
79 | # will be written to this directory:
80 | # - ansible-ec2.cache
81 | # - ansible-ec2.index
82 | cache_path = ~/.ansible/tmp
83 |
84 | # The number of seconds a cache file is considered valid. After this many
85 | # seconds, a new API call will be made, and the cache file will be updated.
86 | # To disable the cache, set this value to 0
87 | cache_max_age = 60
88 |
89 | # Organize groups into a nested/hierarchy instead of a flat namespace.
90 | nested_groups = False
91 |
92 | # The EC2 inventory output can become very large. To manage its size,
93 | # configure which groups should be created.
94 | group_by_instance_id = True
95 | group_by_region = True
96 | group_by_availability_zone = True
97 | group_by_ami_id = True
98 | group_by_instance_type = True
99 | group_by_key_pair = True
100 | group_by_vpc_id = True
101 | group_by_security_group = True
102 | group_by_tag_keys = True
103 | group_by_tag_none = True
104 | group_by_route53_names = True
105 | group_by_rds_engine = True
106 | group_by_rds_parameter_group = True
107 | group_by_elasticache_engine = True
108 | group_by_elasticache_cluster = True
109 | group_by_elasticache_parameter_group = True
110 | group_by_elasticache_replication_group = True
111 |
112 | # If you only want to include hosts that match a certain regular expression
113 | # pattern_include = staging-*
114 |
115 | # If you want to exclude any hosts that match a certain regular expression
116 | # pattern_exclude = staging-*
117 |
118 | # Instance filters can be used to control which instances are retrieved for
119 | # inventory. For the full list of possible filters, please read the EC2 API
120 | # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters
121 | # Filters are key/value pairs separated by '=', to list multiple filters use
122 | # a list separated by commas. See examples below.
123 |
124 | # Retrieve only instances with (key=value) env=staging tag
125 | # instance_filters = tag:env=staging
126 |
127 | # Retrieve only instances with role=webservers OR role=dbservers tag
128 | # instance_filters = tag:role=webservers,tag:role=dbservers
129 |
130 | # Retrieve only t1.micro instances OR instances with tag env=staging
131 | # instance_filters = instance-type=t1.micro,tag:env=staging
132 |
133 | # You can use wildcards in filter values also. Below will list instances which
134 | # tag Name value matches webservers1*
135 | # (ex. webservers15, webservers1a, webservers123 etc)
136 | # instance_filters = tag:Name=webservers1*
137 |
--------------------------------------------------------------------------------
/aws-full-stack/inventory/ec2.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 |
3 | '''
4 | EC2 external inventory script
5 | =================================
6 |
7 | Generates inventory that Ansible can understand by making API request to
8 | AWS EC2 using the Boto library.
9 |
10 | NOTE: This script assumes Ansible is being executed where the environment
11 | variables needed for Boto have already been set:
12 | export AWS_ACCESS_KEY_ID='AK123'
13 | export AWS_SECRET_ACCESS_KEY='abc123'
14 |
15 | This script also assumes there is an ec2.ini file alongside it. To specify a
16 | different path to ec2.ini, define the EC2_INI_PATH environment variable:
17 |
18 | export EC2_INI_PATH=/path/to/my_ec2.ini
19 |
20 | If you're using eucalyptus you need to set the above variables and
21 | you need to define:
22 |
23 | export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus
24 |
25 | For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html
26 |
27 | When run against a specific host, this script returns the following variables:
28 | - ec2_ami_launch_index
29 | - ec2_architecture
30 | - ec2_association
31 | - ec2_attachTime
32 | - ec2_attachment
33 | - ec2_attachmentId
34 | - ec2_client_token
35 | - ec2_deleteOnTermination
36 | - ec2_description
37 | - ec2_deviceIndex
38 | - ec2_dns_name
39 | - ec2_eventsSet
40 | - ec2_group_name
41 | - ec2_hypervisor
42 | - ec2_id
43 | - ec2_image_id
44 | - ec2_instanceState
45 | - ec2_instance_type
46 | - ec2_ipOwnerId
47 | - ec2_ip_address
48 | - ec2_item
49 | - ec2_kernel
50 | - ec2_key_name
51 | - ec2_launch_time
52 | - ec2_monitored
53 | - ec2_monitoring
54 | - ec2_networkInterfaceId
55 | - ec2_ownerId
56 | - ec2_persistent
57 | - ec2_placement
58 | - ec2_platform
59 | - ec2_previous_state
60 | - ec2_private_dns_name
61 | - ec2_private_ip_address
62 | - ec2_publicIp
63 | - ec2_public_dns_name
64 | - ec2_ramdisk
65 | - ec2_reason
66 | - ec2_region
67 | - ec2_requester_id
68 | - ec2_root_device_name
69 | - ec2_root_device_type
70 | - ec2_security_group_ids
71 | - ec2_security_group_names
72 | - ec2_shutdown_state
73 | - ec2_sourceDestCheck
74 | - ec2_spot_instance_request_id
75 | - ec2_state
76 | - ec2_state_code
77 | - ec2_state_reason
78 | - ec2_status
79 | - ec2_subnet_id
80 | - ec2_tenancy
81 | - ec2_virtualization_type
82 | - ec2_vpc_id
83 |
84 | These variables are pulled out of a boto.ec2.instance object. There is a lack of
85 | consistency with variable spellings (camelCase and underscores) since this
86 | just loops through all variables the object exposes. It is preferred to use the
87 | ones with underscores when multiple exist.
88 |
89 | In addition, if an instance has AWS Tags associated with it, each tag is a new
90 | variable named:
91 | - ec2_tag_[Key] = [Value]
92 |
93 | Security groups are comma-separated in 'ec2_security_group_ids' and
94 | 'ec2_security_group_names'.
95 | '''
96 |
97 | # (c) 2012, Peter Sankauskas
98 | #
99 | # This file is part of Ansible,
100 | #
101 | # Ansible is free software: you can redistribute it and/or modify
102 | # it under the terms of the GNU General Public License as published by
103 | # the Free Software Foundation, either version 3 of the License, or
104 | # (at your option) any later version.
105 | #
106 | # Ansible is distributed in the hope that it will be useful,
107 | # but WITHOUT ANY WARRANTY; without even the implied warranty of
108 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
109 | # GNU General Public License for more details.
110 | #
111 | # You should have received a copy of the GNU General Public License
112 | # along with Ansible. If not, see .
113 |
114 | ######################################################################
115 |
116 | import sys
117 | import os
118 | import argparse
119 | import re
120 | from time import time
121 | import boto
122 | from boto import ec2
123 | from boto import rds
124 | from boto import elasticache
125 | from boto import route53
126 | import six
127 |
128 | from six.moves import configparser
129 | from collections import defaultdict
130 |
131 | try:
132 | import json
133 | except ImportError:
134 | import simplejson as json
135 |
136 |
137 | class Ec2Inventory(object):
138 | def _empty_inventory(self):
139 | return {"_meta" : {"hostvars" : {}}}
140 |
141 | def __init__(self):
142 | ''' Main execution path '''
143 |
144 | # Inventory grouped by instance IDs, tags, security groups, regions,
145 | # and availability zones
146 | self.inventory = self._empty_inventory()
147 |
148 | # Index of hostname (address) to instance ID
149 | self.index = {}
150 |
151 | # Read settings and parse CLI arguments
152 | self.read_settings()
153 | self.parse_cli_args()
154 |
155 | # Cache
156 | if self.args.refresh_cache:
157 | self.do_api_calls_update_cache()
158 | elif not self.is_cache_valid():
159 | self.do_api_calls_update_cache()
160 |
161 | # Data to print
162 | if self.args.host:
163 | data_to_print = self.get_host_info()
164 |
165 | elif self.args.list:
166 | # Display list of instances for inventory
167 | if self.inventory == self._empty_inventory():
168 | data_to_print = self.get_inventory_from_cache()
169 | else:
170 | data_to_print = self.json_format_dict(self.inventory, True)
171 |
172 | print(data_to_print)
173 |
174 |
175 | def is_cache_valid(self):
176 | ''' Determines if the cache files have expired, or if it is still valid '''
177 |
178 | if os.path.isfile(self.cache_path_cache):
179 | mod_time = os.path.getmtime(self.cache_path_cache)
180 | current_time = time()
181 | if (mod_time + self.cache_max_age) > current_time:
182 | if os.path.isfile(self.cache_path_index):
183 | return True
184 |
185 | return False
186 |
187 |
188 | def read_settings(self):
189 | ''' Reads the settings from the ec2.ini file '''
190 | if six.PY2:
191 | config = configparser.SafeConfigParser()
192 | else:
193 | config = configparser.ConfigParser()
194 | ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini')
195 | ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path)
196 | config.read(ec2_ini_path)
197 |
198 | # is eucalyptus?
199 | self.eucalyptus_host = None
200 | self.eucalyptus = False
201 | if config.has_option('ec2', 'eucalyptus'):
202 | self.eucalyptus = config.getboolean('ec2', 'eucalyptus')
203 | if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'):
204 | self.eucalyptus_host = config.get('ec2', 'eucalyptus_host')
205 |
206 | # Regions
207 | self.regions = []
208 | configRegions = config.get('ec2', 'regions')
209 | configRegions_exclude = config.get('ec2', 'regions_exclude')
210 | if (configRegions == 'all'):
211 | if self.eucalyptus_host:
212 | self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name)
213 | else:
214 | for regionInfo in ec2.regions():
215 | if regionInfo.name not in configRegions_exclude:
216 | self.regions.append(regionInfo.name)
217 | else:
218 | self.regions = configRegions.split(",")
219 |
220 | # Destination addresses
221 | self.destination_variable = config.get('ec2', 'destination_variable')
222 | self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable')
223 |
224 | # Route53
225 | self.route53_enabled = config.getboolean('ec2', 'route53')
226 | self.route53_excluded_zones = []
227 | if config.has_option('ec2', 'route53_excluded_zones'):
228 | self.route53_excluded_zones.extend(
229 | config.get('ec2', 'route53_excluded_zones', '').split(','))
230 |
231 | # Include RDS instances?
232 | self.rds_enabled = True
233 | if config.has_option('ec2', 'rds'):
234 | self.rds_enabled = config.getboolean('ec2', 'rds')
235 |
236 | # Include ElastiCache instances?
237 | self.elasticache_enabled = True
238 | if config.has_option('ec2', 'elasticache'):
239 | self.elasticache_enabled = config.getboolean('ec2', 'elasticache')
240 |
241 | # Return all EC2 instances?
242 | if config.has_option('ec2', 'all_instances'):
243 | self.all_instances = config.getboolean('ec2', 'all_instances')
244 | else:
245 | self.all_instances = False
246 |
247 | # Return all RDS instances? (if RDS is enabled)
248 | if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled:
249 | self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances')
250 | else:
251 | self.all_rds_instances = False
252 |
253 | # Return all ElastiCache replication groups? (if ElastiCache is enabled)
254 | if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled:
255 | self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups')
256 | else:
257 | self.all_elasticache_replication_groups = False
258 |
259 | # Return all ElastiCache clusters? (if ElastiCache is enabled)
260 | if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled:
261 | self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters')
262 | else:
263 | self.all_elasticache_clusters = False
264 |
265 | # Return all ElastiCache nodes? (if ElastiCache is enabled)
266 | if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled:
267 | self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes')
268 | else:
269 | self.all_elasticache_nodes = False
270 |
271 | # Cache related
272 | cache_dir = os.path.expanduser(config.get('ec2', 'cache_path'))
273 | if not os.path.exists(cache_dir):
274 | os.makedirs(cache_dir)
275 |
276 | self.cache_path_cache = cache_dir + "/ansible-ec2.cache"
277 | self.cache_path_index = cache_dir + "/ansible-ec2.index"
278 | self.cache_max_age = config.getint('ec2', 'cache_max_age')
279 |
280 | # Configure nested groups instead of flat namespace.
281 | if config.has_option('ec2', 'nested_groups'):
282 | self.nested_groups = config.getboolean('ec2', 'nested_groups')
283 | else:
284 | self.nested_groups = False
285 |
286 | # Configure which groups should be created.
287 | group_by_options = [
288 | 'group_by_instance_id',
289 | 'group_by_region',
290 | 'group_by_availability_zone',
291 | 'group_by_ami_id',
292 | 'group_by_instance_type',
293 | 'group_by_key_pair',
294 | 'group_by_vpc_id',
295 | 'group_by_security_group',
296 | 'group_by_tag_keys',
297 | 'group_by_tag_none',
298 | 'group_by_route53_names',
299 | 'group_by_rds_engine',
300 | 'group_by_rds_parameter_group',
301 | 'group_by_elasticache_engine',
302 | 'group_by_elasticache_cluster',
303 | 'group_by_elasticache_parameter_group',
304 | 'group_by_elasticache_replication_group',
305 | ]
306 | for option in group_by_options:
307 | if config.has_option('ec2', option):
308 | setattr(self, option, config.getboolean('ec2', option))
309 | else:
310 | setattr(self, option, True)
311 |
312 | # Do we need to just include hosts that match a pattern?
313 | try:
314 | pattern_include = config.get('ec2', 'pattern_include')
315 | if pattern_include and len(pattern_include) > 0:
316 | self.pattern_include = re.compile(pattern_include)
317 | else:
318 | self.pattern_include = None
319 | except configparser.NoOptionError as e:
320 | self.pattern_include = None
321 |
322 | # Do we need to exclude hosts that match a pattern?
323 | try:
324 | pattern_exclude = config.get('ec2', 'pattern_exclude');
325 | if pattern_exclude and len(pattern_exclude) > 0:
326 | self.pattern_exclude = re.compile(pattern_exclude)
327 | else:
328 | self.pattern_exclude = None
329 | except configparser.NoOptionError as e:
330 | self.pattern_exclude = None
331 |
332 | # Instance filters (see boto and EC2 API docs). Ignore invalid filters.
333 | self.ec2_instance_filters = defaultdict(list)
334 | if config.has_option('ec2', 'instance_filters'):
335 | for instance_filter in config.get('ec2', 'instance_filters', '').split(','):
336 | instance_filter = instance_filter.strip()
337 | if not instance_filter or '=' not in instance_filter:
338 | continue
339 | filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)]
340 | if not filter_key:
341 | continue
342 | self.ec2_instance_filters[filter_key].append(filter_value)
343 |
344 | def parse_cli_args(self):
345 | ''' Command line argument processing '''
346 |
347 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2')
348 | parser.add_argument('--list', action='store_true', default=True,
349 | help='List instances (default: True)')
350 | parser.add_argument('--host', action='store',
351 | help='Get all the variables about a specific instance')
352 | parser.add_argument('--refresh-cache', action='store_true', default=False,
353 | help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)')
354 | self.args = parser.parse_args()
355 |
356 |
357 | def do_api_calls_update_cache(self):
358 | ''' Do API calls to each region, and save data in cache files '''
359 |
360 | if self.route53_enabled:
361 | self.get_route53_records()
362 |
363 | for region in self.regions:
364 | self.get_instances_by_region(region)
365 | if self.rds_enabled:
366 | self.get_rds_instances_by_region(region)
367 | if self.elasticache_enabled:
368 | self.get_elasticache_clusters_by_region(region)
369 | self.get_elasticache_replication_groups_by_region(region)
370 |
371 | self.write_to_cache(self.inventory, self.cache_path_cache)
372 | self.write_to_cache(self.index, self.cache_path_index)
373 |
374 | def connect(self, region):
375 | ''' create connection to api server'''
376 | if self.eucalyptus:
377 | conn = boto.connect_euca(host=self.eucalyptus_host)
378 | conn.APIVersion = '2010-08-31'
379 | else:
380 | conn = ec2.connect_to_region(region)
381 | # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
382 | if conn is None:
383 | self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
384 | return conn
385 |
386 | def get_instances_by_region(self, region):
387 | ''' Makes an AWS EC2 API call to the list of instances in a particular
388 | region '''
389 |
390 | try:
391 | conn = self.connect(region)
392 | reservations = []
393 | if self.ec2_instance_filters:
394 | for filter_key, filter_values in self.ec2_instance_filters.items():
395 | reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values }))
396 | else:
397 | reservations = conn.get_all_instances()
398 |
399 | for reservation in reservations:
400 | for instance in reservation.instances:
401 | self.add_instance(instance, region)
402 |
403 | except boto.exception.BotoServerError as e:
404 | if e.error_code == 'AuthFailure':
405 | error = self.get_auth_error_message()
406 | else:
407 | backend = 'Eucalyptus' if self.eucalyptus else 'AWS'
408 | error = "Error connecting to %s backend.\n%s" % (backend, e.message)
409 | self.fail_with_error(error)
410 |
411 | def get_rds_instances_by_region(self, region):
412 | ''' Makes an AWS API call to the list of RDS instances in a particular
413 | region '''
414 |
415 | try:
416 | conn = rds.connect_to_region(region)
417 | if conn:
418 | instances = conn.get_all_dbinstances()
419 | for instance in instances:
420 | self.add_rds_instance(instance, region)
421 | except boto.exception.BotoServerError as e:
422 | error = e.reason
423 |
424 | if e.error_code == 'AuthFailure':
425 | error = self.get_auth_error_message()
426 | if not e.reason == "Forbidden":
427 | error = "Looks like AWS RDS is down:\n%s" % e.message
428 | self.fail_with_error(error)
429 |
430 | def get_elasticache_clusters_by_region(self, region):
431 | ''' Makes an AWS API call to the list of ElastiCache clusters (with
432 | nodes' info) in a particular region.'''
433 |
434 | # ElastiCache boto module doesn't provide a get_all_intances method,
435 | # that's why we need to call describe directly (it would be called by
436 | # the shorthand method anyway...)
437 | try:
438 | conn = elasticache.connect_to_region(region)
439 | if conn:
440 | # show_cache_node_info = True
441 | # because we also want nodes' information
442 | response = conn.describe_cache_clusters(None, None, None, True)
443 |
444 | except boto.exception.BotoServerError as e:
445 | error = e.reason
446 |
447 | if e.error_code == 'AuthFailure':
448 | error = self.get_auth_error_message()
449 | if not e.reason == "Forbidden":
450 | error = "Looks like AWS ElastiCache is down:\n%s" % e.message
451 | self.fail_with_error(error)
452 |
453 | try:
454 | # Boto also doesn't provide wrapper classes to CacheClusters or
455 | # CacheNodes. Because of that wo can't make use of the get_list
456 | # method in the AWSQueryConnection. Let's do the work manually
457 | clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters']
458 |
459 | except KeyError as e:
460 | error = "ElastiCache query to AWS failed (unexpected format)."
461 | self.fail_with_error(error)
462 |
463 | for cluster in clusters:
464 | self.add_elasticache_cluster(cluster, region)
465 |
466 | def get_elasticache_replication_groups_by_region(self, region):
467 | ''' Makes an AWS API call to the list of ElastiCache replication groups
468 | in a particular region.'''
469 |
470 | # ElastiCache boto module doesn't provide a get_all_intances method,
471 | # that's why we need to call describe directly (it would be called by
472 | # the shorthand method anyway...)
473 | try:
474 | conn = elasticache.connect_to_region(region)
475 | if conn:
476 | response = conn.describe_replication_groups()
477 |
478 | except boto.exception.BotoServerError as e:
479 | error = e.reason
480 |
481 | if e.error_code == 'AuthFailure':
482 | error = self.get_auth_error_message()
483 | if not e.reason == "Forbidden":
484 | error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message
485 | self.fail_with_error(error)
486 |
487 | try:
488 | # Boto also doesn't provide wrapper classes to ReplicationGroups
489 | # Because of that wo can't make use of the get_list method in the
490 | # AWSQueryConnection. Let's do the work manually
491 | replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups']
492 |
493 | except KeyError as e:
494 | error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)."
495 | self.fail_with_error(error)
496 |
497 | for replication_group in replication_groups:
498 | self.add_elasticache_replication_group(replication_group, region)
499 |
500 | def get_auth_error_message(self):
501 | ''' create an informative error message if there is an issue authenticating'''
502 | errors = ["Authentication error retrieving ec2 inventory."]
503 | if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]:
504 | errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found')
505 | else:
506 | errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct')
507 |
508 | boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials']
509 | boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p)))
510 | if len(boto_config_found) > 0:
511 | errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found))
512 | else:
513 | errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths))
514 |
515 | return '\n'.join(errors)
516 |
517 | def fail_with_error(self, err_msg):
518 | '''log an error to std err for ansible-playbook to consume and exit'''
519 | sys.stderr.write(err_msg)
520 | sys.exit(1)
521 |
522 | def get_instance(self, region, instance_id):
523 | conn = self.connect(region)
524 |
525 | reservations = conn.get_all_instances([instance_id])
526 | for reservation in reservations:
527 | for instance in reservation.instances:
528 | return instance
529 |
530 | def add_instance(self, instance, region):
531 | ''' Adds an instance to the inventory and index, as long as it is
532 | addressable '''
533 |
534 | # Only want running instances unless all_instances is True
535 | if not self.all_instances and instance.state != 'running':
536 | return
537 |
538 | # Select the best destination address
539 | if instance.subnet_id:
540 | dest = getattr(instance, self.vpc_destination_variable, None)
541 | if dest is None:
542 | dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None)
543 | else:
544 | dest = getattr(instance, self.destination_variable, None)
545 | if dest is None:
546 | dest = getattr(instance, 'tags').get(self.destination_variable, None)
547 |
548 | if not dest:
549 | # Skip instances we cannot address (e.g. private VPC subnet)
550 | return
551 |
552 | # if we only want to include hosts that match a pattern, skip those that don't
553 | if self.pattern_include and not self.pattern_include.match(dest):
554 | return
555 |
556 | # if we need to exclude hosts that match a pattern, skip those
557 | if self.pattern_exclude and self.pattern_exclude.match(dest):
558 | return
559 |
560 | # Add to index
561 | self.index[dest] = [region, instance.id]
562 |
563 | # Inventory: Group by instance ID (always a group of 1)
564 | if self.group_by_instance_id:
565 | self.inventory[instance.id] = [dest]
566 | if self.nested_groups:
567 | self.push_group(self.inventory, 'instances', instance.id)
568 |
569 | # Inventory: Group by region
570 | if self.group_by_region:
571 | self.push(self.inventory, region, dest)
572 | if self.nested_groups:
573 | self.push_group(self.inventory, 'regions', region)
574 |
575 | # Inventory: Group by availability zone
576 | if self.group_by_availability_zone:
577 | self.push(self.inventory, instance.placement, dest)
578 | if self.nested_groups:
579 | if self.group_by_region:
580 | self.push_group(self.inventory, region, instance.placement)
581 | self.push_group(self.inventory, 'zones', instance.placement)
582 |
583 | # Inventory: Group by Amazon Machine Image (AMI) ID
584 | if self.group_by_ami_id:
585 | ami_id = self.to_safe(instance.image_id)
586 | self.push(self.inventory, ami_id, dest)
587 | if self.nested_groups:
588 | self.push_group(self.inventory, 'images', ami_id)
589 |
590 | # Inventory: Group by instance type
591 | if self.group_by_instance_type:
592 | type_name = self.to_safe('type_' + instance.instance_type)
593 | self.push(self.inventory, type_name, dest)
594 | if self.nested_groups:
595 | self.push_group(self.inventory, 'types', type_name)
596 |
597 | # Inventory: Group by key pair
598 | if self.group_by_key_pair and instance.key_name:
599 | key_name = self.to_safe('key_' + instance.key_name)
600 | self.push(self.inventory, key_name, dest)
601 | if self.nested_groups:
602 | self.push_group(self.inventory, 'keys', key_name)
603 |
604 | # Inventory: Group by VPC
605 | if self.group_by_vpc_id and instance.vpc_id:
606 | vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id)
607 | self.push(self.inventory, vpc_id_name, dest)
608 | if self.nested_groups:
609 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
610 |
611 | # Inventory: Group by security group
612 | if self.group_by_security_group:
613 | try:
614 | for group in instance.groups:
615 | key = self.to_safe("security_group_" + group.name)
616 | self.push(self.inventory, key, dest)
617 | if self.nested_groups:
618 | self.push_group(self.inventory, 'security_groups', key)
619 | except AttributeError:
620 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
621 | 'Please upgrade boto >= 2.3.0.']))
622 |
623 | # Inventory: Group by tag keys
624 | if self.group_by_tag_keys:
625 | for k, v in instance.tags.items():
626 | if v:
627 | key = self.to_safe("tag_" + k + "=" + v)
628 | else:
629 | key = self.to_safe("tag_" + k)
630 | self.push(self.inventory, key, dest)
631 | if self.nested_groups:
632 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k))
633 | self.push_group(self.inventory, self.to_safe("tag_" + k), key)
634 |
635 | # Inventory: Group by Route53 domain names if enabled
636 | if self.route53_enabled and self.group_by_route53_names:
637 | route53_names = self.get_instance_route53_names(instance)
638 | for name in route53_names:
639 | self.push(self.inventory, name, dest)
640 | if self.nested_groups:
641 | self.push_group(self.inventory, 'route53', name)
642 |
643 | # Global Tag: instances without tags
644 | if self.group_by_tag_none and len(instance.tags) == 0:
645 | self.push(self.inventory, 'tag_none', dest)
646 | if self.nested_groups:
647 | self.push_group(self.inventory, 'tags', 'tag_none')
648 |
649 | # Global Tag: tag all EC2 instances
650 | self.push(self.inventory, 'ec2', dest)
651 |
652 | self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
653 |
654 |
655 | def add_rds_instance(self, instance, region):
656 | ''' Adds an RDS instance to the inventory and index, as long as it is
657 | addressable '''
658 |
659 | # Only want available instances unless all_rds_instances is True
660 | if not self.all_rds_instances and instance.status != 'available':
661 | return
662 |
663 | # Select the best destination address
664 | dest = instance.endpoint[0]
665 |
666 | if not dest:
667 | # Skip instances we cannot address (e.g. private VPC subnet)
668 | return
669 |
670 | # Add to index
671 | self.index[dest] = [region, instance.id]
672 |
673 | # Inventory: Group by instance ID (always a group of 1)
674 | if self.group_by_instance_id:
675 | self.inventory[instance.id] = [dest]
676 | if self.nested_groups:
677 | self.push_group(self.inventory, 'instances', instance.id)
678 |
679 | # Inventory: Group by region
680 | if self.group_by_region:
681 | self.push(self.inventory, region, dest)
682 | if self.nested_groups:
683 | self.push_group(self.inventory, 'regions', region)
684 |
685 | # Inventory: Group by availability zone
686 | if self.group_by_availability_zone:
687 | self.push(self.inventory, instance.availability_zone, dest)
688 | if self.nested_groups:
689 | if self.group_by_region:
690 | self.push_group(self.inventory, region, instance.availability_zone)
691 | self.push_group(self.inventory, 'zones', instance.availability_zone)
692 |
693 | # Inventory: Group by instance type
694 | if self.group_by_instance_type:
695 | type_name = self.to_safe('type_' + instance.instance_class)
696 | self.push(self.inventory, type_name, dest)
697 | if self.nested_groups:
698 | self.push_group(self.inventory, 'types', type_name)
699 |
700 | # Inventory: Group by VPC
701 | if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id:
702 | vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id)
703 | self.push(self.inventory, vpc_id_name, dest)
704 | if self.nested_groups:
705 | self.push_group(self.inventory, 'vpcs', vpc_id_name)
706 |
707 | # Inventory: Group by security group
708 | if self.group_by_security_group:
709 | try:
710 | if instance.security_group:
711 | key = self.to_safe("security_group_" + instance.security_group.name)
712 | self.push(self.inventory, key, dest)
713 | if self.nested_groups:
714 | self.push_group(self.inventory, 'security_groups', key)
715 |
716 | except AttributeError:
717 | self.fail_with_error('\n'.join(['Package boto seems a bit older.',
718 | 'Please upgrade boto >= 2.3.0.']))
719 |
720 |
721 | # Inventory: Group by engine
722 | if self.group_by_rds_engine:
723 | self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest)
724 | if self.nested_groups:
725 | self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine))
726 |
727 | # Inventory: Group by parameter group
728 | if self.group_by_rds_parameter_group:
729 | self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest)
730 | if self.nested_groups:
731 | self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name))
732 |
733 | # Global Tag: all RDS instances
734 | self.push(self.inventory, 'rds', dest)
735 |
736 | self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance)
737 |
738 | def add_elasticache_cluster(self, cluster, region):
739 | ''' Adds an ElastiCache cluster to the inventory and index, as long as
740 | it's nodes are addressable '''
741 |
742 | # Only want available clusters unless all_elasticache_clusters is True
743 | if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available':
744 | return
745 |
746 | # Select the best destination address
747 | if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']:
748 | # Memcached cluster
749 | dest = cluster['ConfigurationEndpoint']['Address']
750 | is_redis = False
751 | else:
752 | # Redis sigle node cluster
753 | # Because all Redis clusters are single nodes, we'll merge the
754 | # info from the cluster with info about the node
755 | dest = cluster['CacheNodes'][0]['Endpoint']['Address']
756 | is_redis = True
757 |
758 | if not dest:
759 | # Skip clusters we cannot address (e.g. private VPC subnet)
760 | return
761 |
762 | # Add to index
763 | self.index[dest] = [region, cluster['CacheClusterId']]
764 |
765 | # Inventory: Group by instance ID (always a group of 1)
766 | if self.group_by_instance_id:
767 | self.inventory[cluster['CacheClusterId']] = [dest]
768 | if self.nested_groups:
769 | self.push_group(self.inventory, 'instances', cluster['CacheClusterId'])
770 |
771 | # Inventory: Group by region
772 | if self.group_by_region and not is_redis:
773 | self.push(self.inventory, region, dest)
774 | if self.nested_groups:
775 | self.push_group(self.inventory, 'regions', region)
776 |
777 | # Inventory: Group by availability zone
778 | if self.group_by_availability_zone and not is_redis:
779 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
780 | if self.nested_groups:
781 | if self.group_by_region:
782 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
783 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
784 |
785 | # Inventory: Group by node type
786 | if self.group_by_instance_type and not is_redis:
787 | type_name = self.to_safe('type_' + cluster['CacheNodeType'])
788 | self.push(self.inventory, type_name, dest)
789 | if self.nested_groups:
790 | self.push_group(self.inventory, 'types', type_name)
791 |
792 | # Inventory: Group by VPC (information not available in the current
793 | # AWS API version for ElastiCache)
794 |
795 | # Inventory: Group by security group
796 | if self.group_by_security_group and not is_redis:
797 |
798 | # Check for the existence of the 'SecurityGroups' key and also if
799 | # this key has some value. When the cluster is not placed in a SG
800 | # the query can return None here and cause an error.
801 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
802 | for security_group in cluster['SecurityGroups']:
803 | key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
804 | self.push(self.inventory, key, dest)
805 | if self.nested_groups:
806 | self.push_group(self.inventory, 'security_groups', key)
807 |
808 | # Inventory: Group by engine
809 | if self.group_by_elasticache_engine and not is_redis:
810 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
811 | if self.nested_groups:
812 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine']))
813 |
814 | # Inventory: Group by parameter group
815 | if self.group_by_elasticache_parameter_group:
816 | self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest)
817 | if self.nested_groups:
818 | self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName']))
819 |
820 | # Inventory: Group by replication group
821 | if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']:
822 | self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest)
823 | if self.nested_groups:
824 | self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId']))
825 |
826 | # Global Tag: all ElastiCache clusters
827 | self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId'])
828 |
829 | host_info = self.get_host_info_dict_from_describe_dict(cluster)
830 |
831 | self.inventory["_meta"]["hostvars"][dest] = host_info
832 |
833 | # Add the nodes
834 | for node in cluster['CacheNodes']:
835 | self.add_elasticache_node(node, cluster, region)
836 |
837 | def add_elasticache_node(self, node, cluster, region):
838 | ''' Adds an ElastiCache node to the inventory and index, as long as
839 | it is addressable '''
840 |
841 | # Only want available nodes unless all_elasticache_nodes is True
842 | if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available':
843 | return
844 |
845 | # Select the best destination address
846 | dest = node['Endpoint']['Address']
847 |
848 | if not dest:
849 | # Skip nodes we cannot address (e.g. private VPC subnet)
850 | return
851 |
852 | node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId'])
853 |
854 | # Add to index
855 | self.index[dest] = [region, node_id]
856 |
857 | # Inventory: Group by node ID (always a group of 1)
858 | if self.group_by_instance_id:
859 | self.inventory[node_id] = [dest]
860 | if self.nested_groups:
861 | self.push_group(self.inventory, 'instances', node_id)
862 |
863 | # Inventory: Group by region
864 | if self.group_by_region:
865 | self.push(self.inventory, region, dest)
866 | if self.nested_groups:
867 | self.push_group(self.inventory, 'regions', region)
868 |
869 | # Inventory: Group by availability zone
870 | if self.group_by_availability_zone:
871 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest)
872 | if self.nested_groups:
873 | if self.group_by_region:
874 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone'])
875 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone'])
876 |
877 | # Inventory: Group by node type
878 | if self.group_by_instance_type:
879 | type_name = self.to_safe('type_' + cluster['CacheNodeType'])
880 | self.push(self.inventory, type_name, dest)
881 | if self.nested_groups:
882 | self.push_group(self.inventory, 'types', type_name)
883 |
884 | # Inventory: Group by VPC (information not available in the current
885 | # AWS API version for ElastiCache)
886 |
887 | # Inventory: Group by security group
888 | if self.group_by_security_group:
889 |
890 | # Check for the existence of the 'SecurityGroups' key and also if
891 | # this key has some value. When the cluster is not placed in a SG
892 | # the query can return None here and cause an error.
893 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None:
894 | for security_group in cluster['SecurityGroups']:
895 | key = self.to_safe("security_group_" + security_group['SecurityGroupId'])
896 | self.push(self.inventory, key, dest)
897 | if self.nested_groups:
898 | self.push_group(self.inventory, 'security_groups', key)
899 |
900 | # Inventory: Group by engine
901 | if self.group_by_elasticache_engine:
902 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest)
903 | if self.nested_groups:
904 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine']))
905 |
906 | # Inventory: Group by parameter group (done at cluster level)
907 |
908 | # Inventory: Group by replication group (done at cluster level)
909 |
910 | # Inventory: Group by ElastiCache Cluster
911 | if self.group_by_elasticache_cluster:
912 | self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest)
913 |
914 | # Global Tag: all ElastiCache nodes
915 | self.push(self.inventory, 'elasticache_nodes', dest)
916 |
917 | host_info = self.get_host_info_dict_from_describe_dict(node)
918 |
919 | if dest in self.inventory["_meta"]["hostvars"]:
920 | self.inventory["_meta"]["hostvars"][dest].update(host_info)
921 | else:
922 | self.inventory["_meta"]["hostvars"][dest] = host_info
923 |
924 | def add_elasticache_replication_group(self, replication_group, region):
925 | ''' Adds an ElastiCache replication group to the inventory and index '''
926 |
927 | # Only want available clusters unless all_elasticache_replication_groups is True
928 | if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available':
929 | return
930 |
931 | # Select the best destination address (PrimaryEndpoint)
932 | dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address']
933 |
934 | if not dest:
935 | # Skip clusters we cannot address (e.g. private VPC subnet)
936 | return
937 |
938 | # Add to index
939 | self.index[dest] = [region, replication_group['ReplicationGroupId']]
940 |
941 | # Inventory: Group by ID (always a group of 1)
942 | if self.group_by_instance_id:
943 | self.inventory[replication_group['ReplicationGroupId']] = [dest]
944 | if self.nested_groups:
945 | self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId'])
946 |
947 | # Inventory: Group by region
948 | if self.group_by_region:
949 | self.push(self.inventory, region, dest)
950 | if self.nested_groups:
951 | self.push_group(self.inventory, 'regions', region)
952 |
953 | # Inventory: Group by availability zone (doesn't apply to replication groups)
954 |
955 | # Inventory: Group by node type (doesn't apply to replication groups)
956 |
957 | # Inventory: Group by VPC (information not available in the current
958 | # AWS API version for replication groups
959 |
960 | # Inventory: Group by security group (doesn't apply to replication groups)
961 | # Check this value in cluster level
962 |
963 | # Inventory: Group by engine (replication groups are always Redis)
964 | if self.group_by_elasticache_engine:
965 | self.push(self.inventory, 'elasticache_redis', dest)
966 | if self.nested_groups:
967 | self.push_group(self.inventory, 'elasticache_engines', 'redis')
968 |
969 | # Global Tag: all ElastiCache clusters
970 | self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId'])
971 |
972 | host_info = self.get_host_info_dict_from_describe_dict(replication_group)
973 |
974 | self.inventory["_meta"]["hostvars"][dest] = host_info
975 |
976 | def get_route53_records(self):
977 | ''' Get and store the map of resource records to domain names that
978 | point to them. '''
979 |
980 | r53_conn = route53.Route53Connection()
981 | all_zones = r53_conn.get_zones()
982 |
983 | route53_zones = [ zone for zone in all_zones if zone.name[:-1]
984 | not in self.route53_excluded_zones ]
985 |
986 | self.route53_records = {}
987 |
988 | for zone in route53_zones:
989 | rrsets = r53_conn.get_all_rrsets(zone.id)
990 |
991 | for record_set in rrsets:
992 | record_name = record_set.name
993 |
994 | if record_name.endswith('.'):
995 | record_name = record_name[:-1]
996 |
997 | for resource in record_set.resource_records:
998 | self.route53_records.setdefault(resource, set())
999 | self.route53_records[resource].add(record_name)
1000 |
1001 |
1002 | def get_instance_route53_names(self, instance):
1003 | ''' Check if an instance is referenced in the records we have from
1004 | Route53. If it is, return the list of domain names pointing to said
1005 | instance. If nothing points to it, return an empty list. '''
1006 |
1007 | instance_attributes = [ 'public_dns_name', 'private_dns_name',
1008 | 'ip_address', 'private_ip_address' ]
1009 |
1010 | name_list = set()
1011 |
1012 | for attrib in instance_attributes:
1013 | try:
1014 | value = getattr(instance, attrib)
1015 | except AttributeError:
1016 | continue
1017 |
1018 | if value in self.route53_records:
1019 | name_list.update(self.route53_records[value])
1020 |
1021 | return list(name_list)
1022 |
1023 | def get_host_info_dict_from_instance(self, instance):
1024 | instance_vars = {}
1025 | for key in vars(instance):
1026 | value = getattr(instance, key)
1027 | key = self.to_safe('ec2_' + key)
1028 |
1029 | # Handle complex types
1030 | # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518
1031 | if key == 'ec2__state':
1032 | instance_vars['ec2_state'] = instance.state or ''
1033 | instance_vars['ec2_state_code'] = instance.state_code
1034 | elif key == 'ec2__previous_state':
1035 | instance_vars['ec2_previous_state'] = instance.previous_state or ''
1036 | instance_vars['ec2_previous_state_code'] = instance.previous_state_code
1037 | elif type(value) in [int, bool]:
1038 | instance_vars[key] = value
1039 | elif isinstance(value, six.string_types):
1040 | instance_vars[key] = value.strip()
1041 | elif type(value) == type(None):
1042 | instance_vars[key] = ''
1043 | elif key == 'ec2_region':
1044 | instance_vars[key] = value.name
1045 | elif key == 'ec2__placement':
1046 | instance_vars['ec2_placement'] = value.zone
1047 | elif key == 'ec2_tags':
1048 | for k, v in value.items():
1049 | key = self.to_safe('ec2_tag_' + k)
1050 | instance_vars[key] = v
1051 | elif key == 'ec2_groups':
1052 | group_ids = []
1053 | group_names = []
1054 | for group in value:
1055 | group_ids.append(group.id)
1056 | group_names.append(group.name)
1057 | instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids])
1058 | instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names])
1059 | else:
1060 | pass
1061 | # TODO Product codes if someone finds them useful
1062 | #print key
1063 | #print type(value)
1064 | #print value
1065 |
1066 | return instance_vars
1067 |
1068 | def get_host_info_dict_from_describe_dict(self, describe_dict):
1069 | ''' Parses the dictionary returned by the API call into a flat list
1070 | of parameters. This method should be used only when 'describe' is
1071 | used directly because Boto doesn't provide specific classes. '''
1072 |
1073 | # I really don't agree with prefixing everything with 'ec2'
1074 | # because EC2, RDS and ElastiCache are different services.
1075 | # I'm just following the pattern used until now to not break any
1076 | # compatibility.
1077 |
1078 | host_info = {}
1079 | for key in describe_dict:
1080 | value = describe_dict[key]
1081 | key = self.to_safe('ec2_' + self.uncammelize(key))
1082 |
1083 | # Handle complex types
1084 |
1085 | # Target: Memcached Cache Clusters
1086 | if key == 'ec2_configuration_endpoint' and value:
1087 | host_info['ec2_configuration_endpoint_address'] = value['Address']
1088 | host_info['ec2_configuration_endpoint_port'] = value['Port']
1089 |
1090 | # Target: Cache Nodes and Redis Cache Clusters (single node)
1091 | if key == 'ec2_endpoint' and value:
1092 | host_info['ec2_endpoint_address'] = value['Address']
1093 | host_info['ec2_endpoint_port'] = value['Port']
1094 |
1095 | # Target: Redis Replication Groups
1096 | if key == 'ec2_node_groups' and value:
1097 | host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address']
1098 | host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port']
1099 | replica_count = 0
1100 | for node in value[0]['NodeGroupMembers']:
1101 | if node['CurrentRole'] == 'primary':
1102 | host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address']
1103 | host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port']
1104 | host_info['ec2_primary_cluster_id'] = node['CacheClusterId']
1105 | elif node['CurrentRole'] == 'replica':
1106 | host_info['ec2_replica_cluster_address_'+ str(replica_count)] = node['ReadEndpoint']['Address']
1107 | host_info['ec2_replica_cluster_port_'+ str(replica_count)] = node['ReadEndpoint']['Port']
1108 | host_info['ec2_replica_cluster_id_'+ str(replica_count)] = node['CacheClusterId']
1109 | replica_count += 1
1110 |
1111 | # Target: Redis Replication Groups
1112 | if key == 'ec2_member_clusters' and value:
1113 | host_info['ec2_member_clusters'] = ','.join([str(i) for i in value])
1114 |
1115 | # Target: All Cache Clusters
1116 | elif key == 'ec2_cache_parameter_group':
1117 | host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']])
1118 | host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName']
1119 | host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus']
1120 |
1121 | # Target: Almost everything
1122 | elif key == 'ec2_security_groups':
1123 |
1124 | # Skip if SecurityGroups is None
1125 | # (it is possible to have the key defined but no value in it).
1126 | if value is not None:
1127 | sg_ids = []
1128 | for sg in value:
1129 | sg_ids.append(sg['SecurityGroupId'])
1130 | host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids])
1131 |
1132 | # Target: Everything
1133 | # Preserve booleans and integers
1134 | elif type(value) in [int, bool]:
1135 | host_info[key] = value
1136 |
1137 | # Target: Everything
1138 | # Sanitize string values
1139 | elif isinstance(value, six.string_types):
1140 | host_info[key] = value.strip()
1141 |
1142 | # Target: Everything
1143 | # Replace None by an empty string
1144 | elif type(value) == type(None):
1145 | host_info[key] = ''
1146 |
1147 | else:
1148 | # Remove non-processed complex types
1149 | pass
1150 |
1151 | return host_info
1152 |
1153 | def get_host_info(self):
1154 | ''' Get variables about a specific host '''
1155 |
1156 | if len(self.index) == 0:
1157 | # Need to load index from cache
1158 | self.load_index_from_cache()
1159 |
1160 | if not self.args.host in self.index:
1161 | # try updating the cache
1162 | self.do_api_calls_update_cache()
1163 | if not self.args.host in self.index:
1164 | # host might not exist anymore
1165 | return self.json_format_dict({}, True)
1166 |
1167 | (region, instance_id) = self.index[self.args.host]
1168 |
1169 | instance = self.get_instance(region, instance_id)
1170 | return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True)
1171 |
1172 | def push(self, my_dict, key, element):
1173 | ''' Push an element onto an array that may not have been defined in
1174 | the dict '''
1175 | group_info = my_dict.setdefault(key, [])
1176 | if isinstance(group_info, dict):
1177 | host_list = group_info.setdefault('hosts', [])
1178 | host_list.append(element)
1179 | else:
1180 | group_info.append(element)
1181 |
1182 | def push_group(self, my_dict, key, element):
1183 | ''' Push a group as a child of another group. '''
1184 | parent_group = my_dict.setdefault(key, {})
1185 | if not isinstance(parent_group, dict):
1186 | parent_group = my_dict[key] = {'hosts': parent_group}
1187 | child_groups = parent_group.setdefault('children', [])
1188 | if element not in child_groups:
1189 | child_groups.append(element)
1190 |
1191 | def get_inventory_from_cache(self):
1192 | ''' Reads the inventory from the cache file and returns it as a JSON
1193 | object '''
1194 |
1195 | cache = open(self.cache_path_cache, 'r')
1196 | json_inventory = cache.read()
1197 | return json_inventory
1198 |
1199 |
1200 | def load_index_from_cache(self):
1201 | ''' Reads the index from the cache file sets self.index '''
1202 |
1203 | cache = open(self.cache_path_index, 'r')
1204 | json_index = cache.read()
1205 | self.index = json.loads(json_index)
1206 |
1207 |
1208 | def write_to_cache(self, data, filename):
1209 | ''' Writes data in JSON format to a file '''
1210 |
1211 | json_data = self.json_format_dict(data, True)
1212 | cache = open(filename, 'w')
1213 | cache.write(json_data)
1214 | cache.close()
1215 |
1216 | def uncammelize(self, key):
1217 | temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key)
1218 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower()
1219 |
1220 | def to_safe(self, word):
1221 | ''' Converts 'bad' characters in a string to underscores so they can be
1222 | used as Ansible groups '''
1223 |
1224 | return re.sub("[^A-Za-z0-9\_]", "_", word)
1225 |
1226 | def json_format_dict(self, data, pretty=False):
1227 | ''' Converts a dict to a JSON object and dumps it as a formatted
1228 | string '''
1229 |
1230 | if pretty:
1231 | return json.dumps(data, sort_keys=True, indent=2)
1232 | else:
1233 | return json.dumps(data)
1234 |
1235 |
1236 | # Run the script
1237 | Ec2Inventory()
1238 |
--------------------------------------------------------------------------------
/aws-full-stack/setup.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | #uncomment this if you want to grab the latest ec inventory script
4 | #mkdir inventory
5 | #cd inventory
6 | #wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/ec2.py
7 | #wget https://raw.githubusercontent.com/ansible/ansible/devel/contrib/inventory/ec2.ini
8 | #chmod +x ec2.py
9 | #cd ..
10 |
11 | mkdir keys
12 | sudo apt-get install ansible
13 | sudo apt-get install python-pip
14 | sudo pip install boto
15 |
--------------------------------------------------------------------------------
/mysql-db/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
--------------------------------------------------------------------------------
/mysql-db/README.md:
--------------------------------------------------------------------------------
1 | # Dev MySql DATABASE
2 |
3 | Vagrant scripts to create a local MySql database for development work.
4 |
5 | * `vagrant up` to start
6 | * Database will be on _localhost_ on port _3306_
7 | * Root password is _password123_
8 | * Creates a _dev_ database with a _dev_ user with full rights. Passwords is _password123_ for _dev_ user
9 |
--------------------------------------------------------------------------------
/mysql-db/VagrantFile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 |
6 | config.vm.box = "ubuntu/trusty32"
7 | config.vm.hostname = "mysql-db"
8 |
9 | config.vm.provider "virtualbox" do |vb|
10 | vb.memory = "512"
11 | # hack to work around Ubuntu + Inetl +14.04 bug . See https://github.com/mitchellh/vagrant/issues/3860#issuecomment-167664778
12 | vb.customize ["modifyvm", :id, "--nictype1", "Am79C973"]
13 | end
14 |
15 | config.vm.provision "shell", path: "bootstrap.sh"
16 |
17 | config.vm.network "forwarded_port", guest: 3306, host: 3306
18 |
19 | end
20 |
--------------------------------------------------------------------------------
/mysql-db/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | apt-get update
4 |
5 | sudo apt-get update
6 |
7 | debconf-set-selections <<< 'mysql-server mysql-server/root_password password password123'
8 | debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password123'
9 |
10 | apt-get -y install mysql-server
11 | sed -i "s/^bind-address/#bind-address/" /etc/mysql/my.cnf
12 | mysql -u root -ppassword123 -e "CREATE DATABASE dev;"
13 | mysql -u root -ppassword123 -e "GRANT ALL PRIVILEGES ON dev.* TO 'dev'@'%' IDENTIFIED BY 'password123' WITH GRANT OPTION; FLUSH PRIVILEGES;"
14 |
15 | service mysql restart
16 |
17 | echo "Done!"
18 |
--------------------------------------------------------------------------------
/simple-web-server-nginx/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
--------------------------------------------------------------------------------
/simple-web-server-nginx/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 |
6 | config.vm.box = "ubuntu/trusty32"
7 | config.vm.hostname = "simplenginx"
8 | config.vm.network "forwarded_port", guest: 80, host: 8989
9 | config.vm.synced_folder "www/", "/var/www-on-host", owner: "root", group: "root"
10 |
11 | config.vm.provider "virtualbox" do |vb|
12 | vb.memory = "512"
13 | end
14 |
15 | config.vm.provision "shell", path: "bootstrap.sh"
16 |
17 | end
18 |
--------------------------------------------------------------------------------
/simple-web-server-nginx/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # add ansible repo
4 | apt-get install software-properties-common
5 | apt-add-repository ppa:nginx/stable
6 |
7 | # update package lists
8 | apt-get update
9 |
10 | # install ansible + source ctrl tools
11 | apt-get -y install nginx
12 |
13 | # override default config to load content from host disk
14 | cp /vagrant/nginx_conf /etc/nginx/sites-enabled/default
15 | service nginx restart
16 |
17 | echo "versions installed:"
18 | nginx -v
19 |
20 | echo "Browse to http://127.0.0.1:8989/"
21 | echo "Done."
--------------------------------------------------------------------------------
/simple-web-server-nginx/nginx_conf:
--------------------------------------------------------------------------------
1 | server {
2 | listen 80 default_server;
3 |
4 | # this is to avoid virtualbox bug
5 | sendfile off;
6 |
7 | root /var/www-on-host/;
8 |
9 | index index.html ;
10 |
11 | server_name _;
12 |
13 | location / {
14 | try_files $uri $uri/ =404;
15 | }
16 |
17 | }
--------------------------------------------------------------------------------
/simple-web-server-nginx/www/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 | Hello
4 |
5 |
--------------------------------------------------------------------------------
/wordpress_dev/.gitignore:
--------------------------------------------------------------------------------
1 | .vagrant
--------------------------------------------------------------------------------
/wordpress_dev/README.md:
--------------------------------------------------------------------------------
1 | Vagrant script to set up a Wordpress theme development VM.
2 |
3 | * Run ```vagrant up```
4 | * Then browse to http://localhost/ and complete the installation. Use the default database name,password, etc values provided
5 | * In the wordpress admin tool, browse to the Themes section, You should see a Dev Theme.
6 | The ```src/theme``` folder on your host is mapped to ```/var/www/html/wp-content/themes/devtheme``` and changes made to the files in ```src/theme``` will be reflect in the "Dev Theme"
7 | * See https://codex.wordpress.org/Theme_Development and https://yoast.com/wordpress-theme-anatomy/ for info on building themes
8 |
9 |
--------------------------------------------------------------------------------
/wordpress_dev/Vagrantfile:
--------------------------------------------------------------------------------
1 | # -*- mode: ruby -*-
2 | # vi: set ft=ruby :
3 |
4 | Vagrant.configure(2) do |config|
5 |
6 | config.vm.box = "ubuntu/trusty32"
7 | config.vm.hostname = "wordpressdev"
8 |
9 | config.vm.network :forwarded_port, guest: 80, host: 80
10 |
11 | config.vm.provider "virtualbox" do |vb|
12 | vb.memory = "1024"
13 | end
14 |
15 |
16 | config.vm.synced_folder "src/theme/", "/var/www/html/wp-content/themes/devtheme", owner: "www-data", group: "www-data"
17 |
18 | config.vm.provision "shell", path: "bootstrap.sh"
19 |
20 | end
21 |
--------------------------------------------------------------------------------
/wordpress_dev/bootstrap.sh:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env bash
2 |
3 | # update all packages
4 | apt-get update
5 |
6 | # install mysql
7 | debconf-set-selections <<< 'mysql-server mysql-server/root_password password password'
8 | debconf-set-selections <<< 'mysql-server mysql-server/root_password_again password password'
9 | apt-get -y install mysql-server
10 |
11 | # set up wordpress database and user
12 | mysql -u root -ppassword -e "CREATE DATABASE wordpress";
13 | mysql -u root -ppassword -e "GRANT ALL PRIVILEGES ON wordpress.* TO 'username'@'%' IDENTIFIED BY 'password' WITH GRANT OPTION; FLUSH PRIVILEGES;"
14 |
15 | # install apache
16 | apt-get -y install apache2 php5 php5-mysql
17 |
18 | # install wordpress
19 | cd /tmp
20 | wget http://wordpress.org/latest.tar.gz
21 | tar -xzvf latest.tar.gz
22 | cp -R /tmp/wordpress/* /var/www/html
23 | rm /var/www/html/index.html
24 | chown -R www-data:www-data html
25 |
26 |
27 |
28 | echo "Browse to http://localhost/ to complete your installation. (use default values)"
29 |
30 | echo "Done."
--------------------------------------------------------------------------------
/wordpress_dev/src/theme/index.php:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/wordpress_dev/src/theme/style.css:
--------------------------------------------------------------------------------
1 | /*
2 | Theme Name: Dev Theme
3 |
4 | Make sure you add more meta data here. See https://codex.wordpress.org/Theme_Development#Theme_Stylesheet
5 |
6 | */
7 |
--------------------------------------------------------------------------------