├── Chapter02 ├── phonebook.yml ├── playbook.yml └── roles │ ├── ec2 │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main.yml │ └── phonebook │ ├── files │ └── phone-book.service │ └── tasks │ └── main.yml ├── Chapter03 ├── aws.yml ├── ec2.ini ├── ec2.py ├── phonebook.yml └── roles │ ├── aws │ ├── files │ │ ├── hello_world.py │ │ ├── iam_admin.json │ │ ├── myfirstzip.zip │ │ ├── policy_lambda.json │ │ └── text.txt │ ├── tasks │ │ └── main.yml │ └── vars │ │ ├── main.yml │ │ └── secrets.yml │ └── phonebook │ ├── files │ └── phone-book.service │ ├── tasks │ └── main.yml │ └── templates │ └── config.py ├── Chapter04 ├── gce.py ├── phonebook.yml ├── playbook.yml └── roles │ ├── gce │ ├── files │ │ └── google-cloud-sdk.repo │ ├── tasks │ │ ├── configure_gcloud.yml │ │ └── main.yml │ ├── templates │ │ └── google-cloud-sdk.list │ └── vars │ │ ├── main.yml │ │ └── secrets.yml │ └── phonebook │ ├── files │ └── phone-book.service │ ├── tasks │ └── main.yml │ ├── templates │ └── config.py │ └── vars │ └── secrets.yml ├── Chapter05 ├── azure_rm.ini ├── azure_rm.py ├── phonebook.yml ├── playbook.yml └── roles │ ├── azure │ └── tasks │ │ └── main.yml │ └── phonebook │ ├── files │ └── phone-book.service │ └── tasks │ └── main.yml ├── Chapter06 ├── digital_ocean.ini ├── digital_ocean.py ├── phonebook.yml ├── playbook.yml └── roles │ ├── digitalocean │ ├── tasks │ │ └── main.yml │ └── vars │ │ ├── main.yml │ │ └── secrets.yml │ └── phonebook │ ├── files │ └── phone-book.service │ └── tasks │ └── main.yml ├── Chapter07 ├── docker.yml ├── phonebook.yml └── roles │ ├── docker │ ├── files │ │ ├── docker_compose │ │ │ ├── Dockerfile │ │ │ ├── app.py │ │ │ ├── docker-compose.yml │ │ │ └── requirements.txt │ │ └── docker_files │ │ │ ├── Dockerfile │ │ │ ├── index.html │ │ │ └── nginx.conf │ └── tasks │ │ └── main.yml │ └── phonebook │ ├── files │ └── phonebook-docker │ │ ├── Dockerfile │ │ ├── app.py │ │ ├── docker-compose.yml │ │ ├── init.sh │ │ ├── requirements.txt │ │ └── templates │ │ ├── index.html │ │ └── new.html │ └── tasks │ └── main.yml ├── Chapter08 ├── openstack.py ├── phonebook.yml ├── playbook.yml └── roles │ ├── openstack │ ├── tasks │ │ └── main.yml │ └── vars │ │ └── main.yml │ └── phonebook │ ├── files │ └── phone-book.service │ └── tasks │ └── main.yml ├── LICENSE └── README.md /Chapter02/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - ec2 5 | - hosts: phonebook-infra 6 | roles: 7 | - phonebook 8 | -------------------------------------------------------------------------------- /Chapter02/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - ec2 5 | -------------------------------------------------------------------------------- /Chapter02/roles/ec2/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create AWS VPC 2 | ec2_vpc_net: 3 | name: My VPC 4 | cidr_block: "{{ vpc_cidr_block }}" 5 | region: "{{ aws_region }}" 6 | aws_access_key: "{{ access_key }}" 7 | aws_secret_key: "{{ secret_key }}" 8 | state: present 9 | register: my_first_vpc 10 | tags: 11 | - vpc 12 | - recipe2 13 | 14 | - name: Create Public Subnet in VPC 15 | ec2_vpc_subnet: 16 | vpc_id: "{{ my_first_vpc.vpc.id }}" 17 | cidr: "{{ vpc_public_subnet_cidr }}" 18 | region: "{{ aws_region }}" 19 | az: "{{ aws_region }}a" 20 | aws_access_key: "{{ access_key }}" 21 | aws_secret_key: "{{ secret_key }}" 22 | state: present 23 | tags: 24 | Name: Public Subnet 25 | register: my_public_subnet 26 | tags: 27 | - vpc 28 | - recipe2 29 | 30 | - name: Create Private Subnet in VPC 31 | ec2_vpc_subnet: 32 | vpc_id: "{{ my_first_vpc.vpc.id }}" 33 | cidr: "{{ vpc_private_subnet_cidr }}" 34 | region: "{{ aws_region }}" 35 | az: "{{ aws_region }}a" 36 | aws_access_key: "{{ access_key }}" 37 | aws_secret_key: "{{ secret_key }}" 38 | state: present 39 | tags: 40 | Name: Private Subnet 41 | register: my_private_subnet 42 | tags: 43 | - vpc 44 | - recipe2 45 | 46 | - name: Create Internet Gateway 47 | ec2_vpc_igw: 48 | vpc_id: "{{ my_first_vpc.vpc.id }}" 49 | region: "{{ aws_region }}" 50 | aws_access_key: "{{ access_key }}" 51 | aws_secret_key: "{{ secret_key }}" 52 | state: present 53 | register: my_first_igw 54 | tags: 55 | - vpc 56 | - recipe2 57 | 58 | - name: Create NAT Gateway 59 | ec2_vpc_nat_gateway: 60 | if_exist_do_not_create: yes 61 | subnet_id: "{{ my_public_subnet.subnet.id }}" 62 | region: "{{ aws_region }}" 63 | state: present 64 | aws_access_key: "{{ access_key }}" 65 | aws_secret_key: "{{ secret_key }}" 66 | wait: yes 67 | register: my_first_nat_gateway 68 | tags: 69 | - vpc 70 | - recipe2 71 | 72 | - name: Create Route Table for Public Subnet 73 | ec2_vpc_route_table: 74 | vpc_id: "{{ my_first_vpc.vpc.id }}" 75 | region: "{{ aws_region }}" 76 | routes: 77 | - dest: 0.0.0.0/0 78 | gateway_id: "{{ my_first_igw.gateway_id }}" 79 | subnets: 80 | - "{{ my_public_subnet.subnet.id }}" 81 | aws_access_key: "{{ access_key }}" 82 | aws_secret_key: "{{ secret_key }}" 83 | tags: 84 | Name: Public Subnet Route Table 85 | tags: 86 | - vpc 87 | - recipe2 88 | 89 | - name: Create Route Table for Private Subnet 90 | ec2_vpc_route_table: 91 | vpc_id: "{{ my_first_vpc.vpc.id }}" 92 | region: "{{ aws_region }}" 93 | routes: 94 | - dest: 0.0.0.0/0 95 | gateway_id: "{{ my_first_nat_gateway.nat_gateway_id }}" 96 | subnets: 97 | - "{{ my_private_subnet.subnet.id }}" 98 | aws_access_key: "{{ access_key }}" 99 | aws_secret_key: "{{ secret_key }}" 100 | tags: 101 | Name: Private Subnet Route Table 102 | tags: 103 | - vpc 104 | - recipe2 105 | 106 | - name: Create EC2 Security Group 107 | ec2_group: 108 | name: my_first_sg 109 | description: A sample security group webservers 110 | vpc_id: "{{ my_first_vpc.vpc.id }}" 111 | region: "{{ aws_region }}" 112 | aws_secret_key: "{{ secret_key }}" 113 | aws_access_key: "{{ access_key }}" 114 | rules: 115 | - proto: tcp 116 | from_port: 80 117 | to_port: 80 118 | cidr_ip: 0.0.0.0/0 119 | - proto: tcp 120 | from_port: 22 121 | to_port: 22 122 | cidr_ip: 10.0.0.0/8 123 | - proto: tcp 124 | from_port: 443 125 | to_port: 443 126 | cidr_ip: 0.0.0.0/0 127 | - proto: icmp 128 | from_port: 8 129 | to_port: -1 130 | cidr_ip: 10.0.0.0/8 131 | rules_egress: 132 | - proto: all 133 | cidr_ip: 0.0.0.0/0 134 | register: my_first_sg 135 | tags: 136 | - security_group 137 | - recipe3 138 | 139 | #Creating EC2 Key pair 140 | - name: Create EC2 Key Pair 141 | ec2_key: 142 | name: my_first_key 143 | aws_access_key: "{{ access_key }}" 144 | aws_secret_key: "{{ secret_key }}" 145 | key_material: "{{ key }}" 146 | region: "{{ aws_region }}" 147 | state: present 148 | tags: 149 | - ec2_instance 150 | - recipe4 151 | 152 | - name: Create EC2 Instance in private subnet 153 | ec2: 154 | key_name: my_first_key 155 | instance_type: "{{ instance_type }}" 156 | image: "{{ ami_id }}" 157 | wait: yes 158 | group: my_first_sg 159 | vpc_subnet_id: "{{ my_private_subnet.subnet.id }}" 160 | aws_access_key: "{{ access_key }}" 161 | aws_secret_key: "{{ secret_key }}" 162 | region: "{{ aws_region }}" 163 | count_tag: 164 | Name: Private Instance 165 | exact_count: "{{ private_instance_count }}" 166 | instance_tags: 167 | Name: Private Instance 168 | tags: 169 | - ec2_instance 170 | - recipe4 171 | 172 | - name: Create EC2 Instance in public subnet 173 | ec2: 174 | key_name: my_first_key 175 | instance_type: "{{ instance_type }}" 176 | image: "{{ ami_id }}" 177 | wait: yes 178 | group: my_first_sg 179 | vpc_subnet_id: "{{ my_public_subnet.subnet.id }}" 180 | assign_public_ip: yes 181 | aws_access_key: "{{ access_key }}" 182 | aws_secret_key: "{{ secret_key }}" 183 | region: "{{ aws_region }}" 184 | count_tag: 185 | Name: Public Instance 186 | exact_count: "{{ public_instance_count }}" 187 | instance_tags: 188 | Name: Public Instance 189 | register: ec2_public_instance 190 | tags: 191 | - ec2_instance 192 | - recipe4 193 | 194 | #Creating and attaching EIPs to instance 195 | - name: Allocate Elastic IP and associate it with an instance 196 | ec2_eip: 197 | device_id: "{{ item }}" 198 | aws_access_key: "{{ access_key }}" 199 | aws_secret_key: "{{ secret_key }}" 200 | region: "{{ aws_region }}" 201 | with_items: 202 | - "{{ ec2_public_instance.instance_ids }}" 203 | register: elastic_ip 204 | tags: 205 | - elastic_ip 206 | - recipe5 207 | 208 | ##Attaching EC2 volume to Public Instance created 209 | - name: Create EBS volume and attach to Instance 210 | ec2_vol: 211 | aws_access_key: "{{ access_key }}" 212 | aws_secret_key: "{{ secret_key }}" 213 | region: "{{ aws_region }}" 214 | instance: "{{ item }}" 215 | volume_size: 10 216 | name: Public Instance 217 | device_name: /dev/xvdf 218 | with_items: "{{ ec2_public_instance.instance_ids }}" 219 | register: ec2_vol 220 | tags: 221 | - ebs_volume 222 | - recipe6 223 | 224 | - name: Create AMI of Public Instance Created 225 | ec2_ami: 226 | aws_access_key: "{{ access_key }}" 227 | aws_secret_key: "{{ secret_key }}" 228 | region: "{{ aws_region }}" 229 | instance_id: "{{ item }}" 230 | wait: yes 231 | name: first_ami 232 | no_reboot: yes 233 | tags: 234 | Name: First AMI 235 | with_items: 236 | - "{{ ec2_public_instance.instance_ids }}" 237 | register: image 238 | tags: 239 | - ami 240 | - recipe7 241 | 242 | 243 | - name: Create ELB in public subnet 244 | ec2_elb_lb: 245 | state: present 246 | name: first-elb 247 | security_group_ids: "{{ my_first_sg.group_id }}" 248 | region: "{{ aws_region }}" 249 | subnets: "{{ my_public_subnet.subnet.id }}" 250 | aws_access_key: "{{ access_key }}" 251 | aws_secret_key: "{{ secret_key }}" 252 | purge_subnets: yes 253 | listeners: 254 | - protocol: http 255 | load_balancer_port: 80 256 | instance_port: 80 257 | register: my_first_elb 258 | tags: 259 | - elb 260 | - recipe8 261 | 262 | - name: Get all running ec2 instances with given tags 263 | ec2_remote_facts: 264 | aws_access_key: "{{ access_key }}" 265 | aws_secret_key: "{{ secret_key }}" 266 | aws_region: "{{ aws_region }}" 267 | filters: 268 | instance-state-name: running 269 | "tag:Name": Private Instance 270 | register: ec2_instances_private 271 | tags: 272 | - elb 273 | - recipe8 274 | 275 | - name: Register all private instances with elb created 276 | ec2_elb: 277 | instance_id: "{{ item.id }}" 278 | ec2_elbs: first-elb 279 | state: present 280 | aws_access_key: "{{ access_key }}" 281 | aws_secret_key: "{{ secret_key }}" 282 | aws_region: "{{ aws_region }}" 283 | with_items: 284 | - "{{ ec2_instances_private.instances }}" 285 | ignore_errors: yes 286 | tags: 287 | - elb 288 | - recipe8 289 | 290 | - name: Deregister all private instances with elb created 291 | ec2_elb: 292 | instance_id: "{{ item.id }}" 293 | ec2_elbs: first-elb 294 | state: absent 295 | aws_access_key: "{{ access_key }}" 296 | aws_secret_key: "{{ secret_key }}" 297 | aws_region: "{{ aws_region }}" 298 | with_items: 299 | - "{{ ec2_instances_private.instances }}" 300 | ignore_errors: yes 301 | tags: 302 | - elb 303 | - recipe8 304 | 305 | #AutoScaling 306 | - name: Create Launch Configuration 307 | ec2_lc: 308 | region: "{{ aws_region }}" 309 | aws_access_key: "{{ access_key }}" 310 | aws_secret_key: "{{ secret_key }}" 311 | name: my_first_lc 312 | image_id: "{{ ami_id }}" 313 | key_name: my_first_key 314 | instance_type: "{{ instance_type }}" 315 | security_groups: "{{ my_first_sg.group_id }}" 316 | instance_monitoring: yes 317 | tags: 318 | - autoscaling 319 | - recipe9 320 | 321 | - name: Create Auto Scaling group 322 | ec2_asg: 323 | name: my_first_asg 324 | region: "{{ aws_region }}" 325 | aws_access_key: "{{ access_key }}" 326 | aws_secret_key: "{{ secret_key }}" 327 | load_balancers: 328 | - first-elb 329 | launch_config_name: my_first_lc 330 | min_size: 1 331 | max_size: 5 332 | desired_capacity: 3 333 | vpc_zone_identifier: 334 | - "{{ my_private_subnet.subnet.id }}" 335 | tags: 336 | - environment: test 337 | tags: 338 | - autoscaling 339 | - recipe9 340 | 341 | - name: Configure Scaling Policies (scale-up) 342 | ec2_scaling_policy: 343 | region: "{{ aws_region }}" 344 | aws_access_key: "{{ access_key }}" 345 | aws_secret_key: "{{ secret_key }}" 346 | name: scale-up policy 347 | asg_name: my_first_asg 348 | state: present 349 | adjustment_type: ChangeInCapacity 350 | min_adjustment_step: 1 351 | scaling_adjustment: +1 352 | register: scale_up_policy 353 | tags: 354 | - autoscaling 355 | - recipe9 356 | 357 | - name: Configure CloudWatch Metric for Scaling Up Policy 358 | ec2_metric_alarm: 359 | state: present 360 | region: "{{ aws_region }}" 361 | aws_access_key: "{{ access_key }}" 362 | aws_secret_key: "{{ secret_key }}" 363 | name: "scale-up-metric" 364 | description: "This alarm notify auto scaling policy to step up instance" 365 | metric: "CPUUtilization" 366 | namespace: "AWS/EC2" 367 | statistic: "Average" 368 | comparison: ">=" 369 | threshold: 60.0 370 | unit: "Percent" 371 | period: 300 372 | evaluation_periods: 3 373 | dimensions: 374 | AutoScalingGroupName: "my_first_asg" 375 | alarm_actions: 376 | - "{{ scale_up_policy.arn }}" 377 | tags: 378 | - autoscaling 379 | - recipe9 380 | 381 | - name: Adding Elastic IP to the phonebook-infra 382 | add_host: 383 | hostname: "{{ item.public_ip }}" 384 | groups: phonebook-infra 385 | with_items: "{{ elastic_ip }}" 386 | tags: 387 | - deployment 388 | - recipe10 389 | -------------------------------------------------------------------------------- /Chapter02/roles/ec2/vars/main.yml: -------------------------------------------------------------------------------- 1 | # VPC Information 2 | vpc_name: "My VPC" 3 | vpc_cidr_block: "10.0.0.0/16" 4 | aws_region: "us-east-1" 5 | aws_zone: "us-east-1a" 6 | 7 | # Subnets 8 | vpc_public_subnet_cidr: "10.0.0.0/24" 9 | 10 | # Subnets 11 | vpc_private_subnet_cidr: "10.0.1.0/24" 12 | 13 | #Key Pair Content 14 | key: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCsvk9a8rmEuvcaOTDLFNMoaxzzWVz8ArgdR04ufaYHUYnYQfJNzKayFipidf/zZAnGdP2KCuWF19DwNkHguABL95HCcjIWrSSkWPL+hwWvI6sAJm2o+OrIW0w1MG1eWpQk7zfb6pJLQMJT9f12XdV60Z4MHqNCXGc++xVdZpIie/ZTfBuHDw5PKLWXn54J6eb58RZxUzj2PuVlRC9o4afS30XDqoLiiidc5cz3Byxp7+Ilb00TEl0ClHsabaO4cgCsq72O6YO0M2VCqxdy73v2t7LlnHKwOgLnMp2WZ4UAwVgjkYLVwn4AZ3O0CxrsvD1f70UE2MOWnulyTO3bJgup user@example.com' 15 | 16 | #EC2 Instance Info 17 | instance_type: 't2.micro' 18 | ami_id: 'ami-46c1b650' 19 | private_instance_count: 1 20 | public_instance_count: 1 21 | -------------------------------------------------------------------------------- /Chapter02/roles/phonebook/files/phone-book.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple Phone Book 3 | 4 | [Service] 5 | WorkingDirectory=/opt/phone-book 6 | ExecStartPre=/bin/bash /opt/phone-book/init.sh 7 | ExecStart=/usr/bin/uwsgi --http-socket 0.0.0.0:8080 --manage-script-name --mount /phonebook=app:app 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /Chapter02/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install epel repository 3 | package: 4 | name: epel-release 5 | state: present 6 | 7 | - name: install dependencies 8 | package: 9 | name: "{{ item }}" 10 | state: present 11 | with_items: 12 | - git 13 | - python-pip 14 | - gcc 15 | - python-devel 16 | 17 | - name: install python libraries 18 | pip: 19 | name: "{{ item }}" 20 | state: present 21 | with_items: 22 | - flask 23 | - flask-sqlalchemy 24 | - flask-migrate 25 | - uwsgi 26 | 27 | - name: get the application code 28 | git: 29 | repo: https://github.com/adimania/phone-book 30 | dest: /opt/phone-book 31 | 32 | - name: upload systemd unit file 33 | copy: 34 | src: phone-book.service 35 | dest: /etc/systemd/system/phone-book.service 36 | 37 | - name: start phonebook 38 | systemd: 39 | state: started 40 | daemon_reload: yes 41 | name: phone-book 42 | enabled: yes 43 | -------------------------------------------------------------------------------- /Chapter03/aws.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: no 4 | roles: 5 | - aws 6 | -------------------------------------------------------------------------------- /Chapter03/ec2.ini: -------------------------------------------------------------------------------- 1 | # Ansible EC2 external inventory script settings 2 | # 3 | 4 | [ec2] 5 | 6 | # to talk to a private eucalyptus instance uncomment these lines 7 | # and edit edit eucalyptus_host to be the host name of your cloud controller 8 | #eucalyptus = True 9 | #eucalyptus_host = clc.cloud.domain.org 10 | 11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions 12 | # in AWS and merge the results together. Alternatively, set this to a comma 13 | # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not 14 | # provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or 15 | # AWS_DEFAULT_REGION environment variable will be read to determine the region. 16 | regions = all 17 | regions_exclude = us-gov-west-1, cn-north-1 18 | 19 | # When generating inventory, Ansible needs to know how to address a server. 20 | # Each EC2 instance has a lot of variables associated with it. Here is the list: 21 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance 22 | # Below are 2 variables that are used as the address of a server: 23 | # - destination_variable 24 | # - vpc_destination_variable 25 | 26 | # This is the normal destination variable to use. If you are running Ansible 27 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are 28 | # running Ansible from within EC2, then perhaps you want to use the internal 29 | # address, and should set this to 'private_dns_name'. The key of an EC2 tag 30 | # may optionally be used; however the boto instance variables hold precedence 31 | # in the event of a collision. 32 | destination_variable = public_dns_name 33 | 34 | # This allows you to override the inventory_name with an ec2 variable, instead 35 | # of using the destination_variable above. Addressing (aka ansible_ssh_host) 36 | # will still use destination_variable. Tags should be written as 'tag_TAGNAME'. 37 | #hostname_variable = tag_Name 38 | 39 | # For server inside a VPC, using DNS names may not make sense. When an instance 40 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting 41 | # this to 'ip_address' will return the public IP address. For instances in a 42 | # private subnet, this should be set to 'private_ip_address', and Ansible must 43 | # be run from within EC2. The key of an EC2 tag may optionally be used; however 44 | # the boto instance variables hold precedence in the event of a collision. 45 | # WARNING: - instances that are in the private vpc, _without_ public ip address 46 | # will not be listed in the inventory until You set: 47 | # vpc_destination_variable = private_ip_address 48 | vpc_destination_variable = ip_address 49 | 50 | # The following two settings allow flexible ansible host naming based on a 51 | # python format string and a comma-separated list of ec2 tags. Note that: 52 | # 53 | # 1) If the tags referenced are not present for some instances, empty strings 54 | # will be substituted in the format string. 55 | # 2) This overrides both destination_variable and vpc_destination_variable. 56 | # 57 | #destination_format = {0}.{1}.example.com 58 | #destination_format_tags = Name,environment 59 | 60 | # To tag instances on EC2 with the resource records that point to them from 61 | # Route53, set 'route53' to True. 62 | route53 = False 63 | 64 | # To use Route53 records as the inventory hostnames, uncomment and set 65 | # to equal the domain name you wish to use. You must also have 'route53' (above) 66 | # set to True. 67 | # route53_hostnames = .example.com 68 | 69 | # To exclude RDS instances from the inventory, uncomment and set to False. 70 | rds = True 71 | 72 | # To exclude ElastiCache instances from the inventory, uncomment and set to False. 73 | #elasticache = False 74 | 75 | # Additionally, you can specify the list of zones to exclude looking up in 76 | # 'route53_excluded_zones' as a comma-separated list. 77 | # route53_excluded_zones = samplezone1.com, samplezone2.com 78 | 79 | # By default, only EC2 instances in the 'running' state are returned. Set 80 | # 'all_instances' to True to return all instances regardless of state. 81 | all_instances = False 82 | 83 | # By default, only EC2 instances in the 'running' state are returned. Specify 84 | # EC2 instance states to return as a comma-separated list. This 85 | # option is overridden when 'all_instances' is True. 86 | # instance_states = pending, running, shutting-down, terminated, stopping, stopped 87 | 88 | # By default, only RDS instances in the 'available' state are returned. Set 89 | # 'all_rds_instances' to True return all RDS instances regardless of state. 90 | all_rds_instances = False 91 | 92 | # Include RDS cluster information (Aurora etc.) 93 | include_rds_clusters = False 94 | 95 | # By default, only ElastiCache clusters and nodes in the 'available' state 96 | # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' 97 | # to True return all ElastiCache clusters and nodes, regardless of state. 98 | # 99 | # Note that all_elasticache_nodes only applies to listed clusters. That means 100 | # if you set all_elastic_clusters to false, no node will be return from 101 | # unavailable clusters, regardless of the state and to what you set for 102 | # all_elasticache_nodes. 103 | all_elasticache_replication_groups = False 104 | all_elasticache_clusters = False 105 | all_elasticache_nodes = False 106 | 107 | # API calls to EC2 are slow. For this reason, we cache the results of an API 108 | # call. Set this to the path you want cache files to be written to. Two files 109 | # will be written to this directory: 110 | # - ansible-ec2.cache 111 | # - ansible-ec2.index 112 | cache_path = ~/.ansible/tmp 113 | 114 | # The number of seconds a cache file is considered valid. After this many 115 | # seconds, a new API call will be made, and the cache file will be updated. 116 | # To disable the cache, set this value to 0 117 | cache_max_age = 300 118 | 119 | # Organize groups into a nested/hierarchy instead of a flat namespace. 120 | nested_groups = False 121 | 122 | # Replace - tags when creating groups to avoid issues with ansible 123 | replace_dash_in_groups = True 124 | 125 | # If set to true, any tag of the form "a,b,c" is expanded into a list 126 | # and the results are used to create additional tag_* inventory groups. 127 | expand_csv_tags = False 128 | 129 | # The EC2 inventory output can become very large. To manage its size, 130 | # configure which groups should be created. 131 | group_by_instance_id = True 132 | group_by_region = True 133 | group_by_availability_zone = True 134 | group_by_aws_account = False 135 | group_by_ami_id = True 136 | group_by_instance_type = True 137 | group_by_instance_state = False 138 | group_by_platform = True 139 | group_by_key_pair = True 140 | group_by_vpc_id = True 141 | group_by_security_group = True 142 | group_by_tag_keys = True 143 | group_by_tag_none = True 144 | group_by_route53_names = True 145 | group_by_rds_engine = True 146 | group_by_rds_parameter_group = True 147 | group_by_elasticache_engine = True 148 | group_by_elasticache_cluster = True 149 | group_by_elasticache_parameter_group = True 150 | group_by_elasticache_replication_group = True 151 | 152 | # If you only want to include hosts that match a certain regular expression 153 | # pattern_include = staging-* 154 | 155 | # If you want to exclude any hosts that match a certain regular expression 156 | # pattern_exclude = staging-* 157 | 158 | # Instance filters can be used to control which instances are retrieved for 159 | # inventory. For the full list of possible filters, please read the EC2 API 160 | # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters 161 | # Filters are key/value pairs separated by '=', to list multiple filters use 162 | # a list separated by commas. To "AND" criteria together, use "&". Note that 163 | # the "AND" is not useful along with stack_filters and so such usage is not allowed. 164 | # See examples below. 165 | 166 | # If you want to apply multiple filters simultaneously, set stack_filters to 167 | # True. Default behaviour is to combine the results of all filters. Stacking 168 | # allows the use of multiple conditions to filter down, for example by 169 | # environment and type of host. 170 | stack_filters = False 171 | 172 | # Retrieve only instances with (key=value) env=staging tag 173 | # instance_filters = tag:env=staging 174 | 175 | # Retrieve only instances with role=webservers OR role=dbservers tag 176 | # instance_filters = tag:role=webservers,tag:role=dbservers 177 | 178 | # Retrieve only t1.micro instances OR instances with tag env=staging 179 | # instance_filters = instance-type=t1.micro,tag:env=staging 180 | 181 | # You can use wildcards in filter values also. Below will list instances which 182 | # tag Name value matches webservers1* 183 | # (ex. webservers15, webservers1a, webservers123 etc) 184 | # instance_filters = tag:Name=webservers1* 185 | 186 | # Retrieve only instances of type t1.micro that also have tag env=stage 187 | # instance_filters = instance-type=t1.micro&tag:env=stage 188 | 189 | # Retrieve instances of type t1.micro AND tag env=stage, as well as any instance 190 | # that are of type m3.large, regardless of env tag 191 | # instance_filters = instance-type=t1.micro&tag:env=stage,instance-type=m3.large 192 | 193 | # An IAM role can be assumed, so all requests are run as that role. 194 | # This can be useful for connecting across different accounts, or to limit user 195 | # access 196 | # iam_role = role-arn 197 | 198 | # A boto configuration profile may be used to separate out credentials 199 | # see http://boto.readthedocs.org/en/latest/boto_config_tut.html 200 | # boto_profile = some-boto-profile-name 201 | 202 | 203 | [credentials] 204 | 205 | # The AWS credentials can optionally be specified here. Credentials specified 206 | # here are ignored if the environment variable AWS_ACCESS_KEY_ID or 207 | # AWS_PROFILE is set, or if the boto_profile property above is set. 208 | # 209 | # Supplying AWS credentials here is not recommended, as it introduces 210 | # non-trivial security concerns. When going down this route, please make sure 211 | # to set access permissions for this file correctly, e.g. handle it the same 212 | # way as you would a private SSH key. 213 | # 214 | # Unlike the boto and AWS configure files, this section does not support 215 | # profiles. 216 | # 217 | # aws_access_key_id = AXXXXXXXXXXXXXX 218 | # aws_secret_access_key = XXXXXXXXXXXXXXXXXXX 219 | # aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX 220 | -------------------------------------------------------------------------------- /Chapter03/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tag_Name_phonebook01 3 | vars_files: 4 | - roles/aws/vars/secrets.yml 5 | roles: 6 | - phonebook 7 | 8 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/files/hello_world.py: -------------------------------------------------------------------------------- 1 | def my_handler(event, context): 2 | return "Hello World" 3 | 4 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/files/iam_admin.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Action": "*", 7 | "Resource": "*" 8 | } 9 | ] 10 | } 11 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/files/myfirstzip.zip: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/PacktPublishing/Ansible-2-Cloud-Automation-Cookbook/8d3452a33481dcc5fb41607abf45d846afef2918/Chapter03/roles/aws/files/myfirstzip.zip -------------------------------------------------------------------------------- /Chapter03/roles/aws/files/policy_lambda.json: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { 7 | "Service": "lambda.amazonaws.com" 8 | }, 9 | "Action": "sts:AssumeRole" 10 | } 11 | ] 12 | } 13 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/files/text.txt: -------------------------------------------------------------------------------- 1 | Hello World for S3 2 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include secrets 3 | include_vars: secrets.yml 4 | 5 | - name: Create EC2 Security Group for RDS 6 | ec2_group: 7 | name: my_rds_sg 8 | description: Scurity group for RDS instance 9 | region: "{{ aws_region }}" 10 | aws_secret_key: "{{ secret_key }}" 11 | aws_access_key: "{{ access_key }}" 12 | rules: 13 | - proto: tcp 14 | from_port: 3306 15 | to_port: 3306 16 | group_name: my_rds_sg 17 | rules_egress: 18 | - proto: all 19 | cidr_ip: 0.0.0.0/0 20 | register: rds_sg 21 | tags: 22 | - recipe1 23 | 24 | - name: Create RDS Instance 25 | rds: 26 | aws_access_key: "{{ access_key }}" 27 | aws_secret_key: "{{ secret_key }}" 28 | region: "{{ aws_region }}" 29 | command: create 30 | instance_name: my-first-rds-instance 31 | db_engine: MySQL 32 | size: 10 33 | instance_type: db.m1.small 34 | username: cookbook_admin 35 | password: "{{ rds_admin_pass }}" 36 | vpc_security_groups: "{{ rds_sg.group_id }}" 37 | multi_zone: yes 38 | wait: true 39 | wait_timeout: 700 40 | backup_retention: 7 41 | tags: 42 | Enviornment: cookbook-prod 43 | Application: cookbook-test 44 | register: rds_result 45 | tags: 46 | - recipe1 47 | 48 | - name: Create Route53 record 49 | route53: 50 | aws_access_key: "{{ access_key }}" 51 | aws_secret_key: "{{ secret_key }}" 52 | state: present 53 | zone: example.com 54 | record: app.example.com 55 | type: A 56 | ttl: 7200 57 | value: 58 | - 1.1.1.1 59 | - 2.2.2.2 60 | - 3.3.3.3 61 | tags: 62 | - recipe2 63 | 64 | - name: Get existing Route53 record 65 | route53: 66 | aws_access_key: "{{ access_key }}" 67 | aws_secret_key: "{{ secret_key }}" 68 | state: get 69 | zone: example.com 70 | record: dns.example.com 71 | type: A 72 | register: record 73 | tags: 74 | - recipe2 75 | 76 | - name: Delete existing Route53 record 77 | route53: 78 | aws_access_key: "{{ access_key }}" 79 | aws_secret_key: "{{ secret_key }}" 80 | state: absent 81 | zone: example.com 82 | record: "{{ record.set.record }}" 83 | ttl: "{{ record.set.ttl }}" 84 | type: "{{ record.set.type }}" 85 | value: "{{ record.set.value }}" 86 | tags: 87 | - recipe2 88 | 89 | #Creating an empty s3 bucket 90 | - name: Creating S3 bucket 91 | aws_s3: 92 | aws_access_key: "{{ access_key }}" 93 | aws_secret_key: "{{ secret_key }}" 94 | region: "{{aws_region}}" 95 | bucket: my_ansible_cookbook_bucket 96 | mode: create 97 | permission: public-read 98 | tags: 99 | - recipe3 100 | 101 | #Putting object 102 | - name: Put object in S3 bucket 103 | aws_s3: 104 | aws_access_key: "{{ access_key }}" 105 | aws_secret_key: "{{ secret_key }}" 106 | region: "{{aws_region}}" 107 | bucket: my_ansible_cookbook_bucket 108 | object: "file.txt" 109 | src: "{{role_path}}/files/text.txt" 110 | mode: put 111 | tags: 112 | - recipe3 113 | 114 | - name: Create IAM role for lamda 115 | iam_role: 116 | aws_access_key: "{{ access_key }}" 117 | aws_secret_key: "{{ secret_key }}" 118 | region: "{{aws_region}}" 119 | name: lambda 120 | assume_role_policy_document: "{{ lookup('file','policy_lambda.json') }}" 121 | state: present 122 | tags: 123 | - recipe4 124 | 125 | - name: Creating first lambda function 126 | lambda: 127 | aws_access_key: "{{ access_key }}" 128 | aws_secret_key: "{{ secret_key }}" 129 | region: "{{aws_region}}" 130 | name: MyFirstLambda 131 | state: present 132 | zip_file: "{{role_path}}/files/myfirstzip.zip" 133 | runtime: 'python2.7' 134 | role: 'arn:aws:iam::779710563302:role/lambda' 135 | handler: 'hello_world.my_handler' 136 | tags: 137 | - recipe4 138 | 139 | - name: Execute lamda function 140 | execute_lambda: 141 | aws_access_key: "{{ access_key }}" 142 | aws_secret_key: "{{ secret_key }}" 143 | region: "{{aws_region}}" 144 | name: MyFirstLambda 145 | payload: 146 | foo: test 147 | value: hello 148 | register: response 149 | tags: 150 | - recipe4 151 | 152 | - name: debug 153 | debug: msg=response 154 | tags: 155 | - recipe4 156 | 157 | - name: Create IAM users 158 | iam: 159 | aws_access_key: "{{ access_key }}" 160 | aws_secret_key: "{{ secret_key }}" 161 | region: "{{aws_region}}" 162 | iam_type: user 163 | name: "{{ item }}" 164 | state: present 165 | password: "{{ iam_pass }}" 166 | with_items: 167 | - cookbook-admin 168 | - cookbook-two 169 | tags: 170 | - recipe5 171 | 172 | - name: Assign IAM policy to user 173 | iam_policy: 174 | aws_access_key: "{{ access_key }}" 175 | aws_secret_key: "{{ secret_key }}" 176 | region: "{{aws_region}}" 177 | iam_type: user 178 | iam_name: cookbook-admin 179 | policy_name: Admin 180 | state: present 181 | policy_document: "{{role_path}}/files/iam_admin.json" 182 | tags: 183 | - recipe5 184 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | aws_region: "us-east-1" 3 | -------------------------------------------------------------------------------- /Chapter03/roles/aws/vars/secrets.yml: -------------------------------------------------------------------------------- 1 | $ANSIBLE_VAULT;1.1;AES256 2 | 36626239366533303434303963613736313337323066396630653232353637316363626330626231 3 | 3934633334653466643233376363356431643131323464660a396230636335626662333264353136 4 | 37366630356539326532356662646161633363356165336535363661333837646363393637323235 5 | 3964363034313132360a396563333333663466386265653436653536363237626338383932623732 6 | 37396264316662366131666231356131316264316236653461386339353562346434376261326365 7 | 36636364653532666365393536666637616561383936326261363133643862616432633831663866 8 | 386665363730326261303433626237623039 9 | -------------------------------------------------------------------------------- /Chapter03/roles/phonebook/files/phone-book.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple Phone Book 3 | 4 | [Service] 5 | WorkingDirectory=/opt/phone-book 6 | ExecStartPre=/bin/bash /opt/phone-book/init.sh 7 | ExecStart=/usr/bin/uwsgi --http-socket 0.0.0.0:8080 --manage-script-name --mount /phonebook=app:app 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /Chapter03/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install epel repository 3 | package: 4 | name: epel-release 5 | state: present 6 | become: yes 7 | 8 | - name: install dependencies 9 | package: 10 | name: "{{ item }}" 11 | state: present 12 | with_items: 13 | - git 14 | - python-pip 15 | - gcc 16 | - python-devel 17 | - mysql-devel 18 | become: yes 19 | 20 | - name: install python libraries 21 | pip: 22 | name: "{{ item }}" 23 | state: present 24 | with_items: 25 | - flask 26 | - flask-sqlalchemy 27 | - flask-migrate 28 | - uwsgi 29 | - MySQL-python 30 | become: yes 31 | 32 | - name: get the application code 33 | git: 34 | repo: https://github.com/DevopsNexus/phone-book-mysql.git 35 | dest: /opt/phone-book 36 | force: yes 37 | become: yes 38 | 39 | - name: upload systemd unit file 40 | copy: 41 | src: phone-book.service 42 | dest: /etc/systemd/system/phone-book.service 43 | become: yes 44 | 45 | - name: upload app config file 46 | template: 47 | src: config.py 48 | dest: /opt/phone-book/config.py 49 | become: yes 50 | 51 | - name: create phonebook database 52 | mysql_db: 53 | name: phonebook 54 | state: present 55 | login_host: "{{hostvars[groups['tag_Application_cookbook_test'][0]].ansible_host}}" 56 | login_user: cookbook_admin 57 | login_password: "{{ rds_admin_pass }}" 58 | 59 | - name: create app user for phonebook database 60 | mysql_user: 61 | name: app 62 | password: "{{ rds_admin_pass }}" 63 | priv: 'phonebook.*:ALL' 64 | host: "%" 65 | state: present 66 | login_host: "{{hostvars[groups['tag_Application_cookbook_test'][0]].ansible_host}}" 67 | login_user: cookbook_admin 68 | login_password: "{{ rds_admin_pass }}" 69 | 70 | - name: start phonebook 71 | systemd: 72 | state: started 73 | daemon_reload: yes 74 | name: phone-book 75 | enabled: yes 76 | become: yes 77 | -------------------------------------------------------------------------------- /Chapter03/roles/phonebook/templates/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SQLALCHEMY_DATABASE_URI = "mysql://app:{{ rds_admin_pass }}@{{hostvars[groups['tag_Application_cookbook_test'][0]].ansible_host}}/phonebook" 4 | SECRET_KEY = os.urandom(32) 5 | -------------------------------------------------------------------------------- /Chapter04/gce.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2013 Google Inc. 3 | # 4 | # This file is part of Ansible 5 | # 6 | # Ansible is free software: you can redistribute it and/or modify 7 | # it under the terms of the GNU General Public License as published by 8 | # the Free Software Foundation, either version 3 of the License, or 9 | # (at your option) any later version. 10 | # 11 | # Ansible is distributed in the hope that it will be useful, 12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 | # GNU General Public License for more details. 15 | # 16 | # You should have received a copy of the GNU General Public License 17 | # along with Ansible. If not, see . 18 | 19 | ''' 20 | GCE external inventory script 21 | ================================= 22 | 23 | Generates inventory that Ansible can understand by making API requests 24 | Google Compute Engine via the libcloud library. Full install/configuration 25 | instructions for the gce* modules can be found in the comments of 26 | ansible/test/gce_tests.py. 27 | 28 | When run against a specific host, this script returns the following variables 29 | based on the data obtained from the libcloud Node object: 30 | - gce_uuid 31 | - gce_id 32 | - gce_image 33 | - gce_machine_type 34 | - gce_private_ip 35 | - gce_public_ip 36 | - gce_name 37 | - gce_description 38 | - gce_status 39 | - gce_zone 40 | - gce_tags 41 | - gce_metadata 42 | - gce_network 43 | - gce_subnetwork 44 | 45 | When run in --list mode, instances are grouped by the following categories: 46 | - zone: 47 | zone group name examples are us-central1-b, europe-west1-a, etc. 48 | - instance tags: 49 | An entry is created for each tag. For example, if you have two instances 50 | with a common tag called 'foo', they will both be grouped together under 51 | the 'tag_foo' name. 52 | - network name: 53 | the name of the network is appended to 'network_' (e.g. the 'default' 54 | network will result in a group named 'network_default') 55 | - machine type 56 | types follow a pattern like n1-standard-4, g1-small, etc. 57 | - running status: 58 | group name prefixed with 'status_' (e.g. status_running, status_stopped,..) 59 | - image: 60 | when using an ephemeral/scratch disk, this will be set to the image name 61 | used when creating the instance (e.g. debian-7-wheezy-v20130816). when 62 | your instance was created with a root persistent disk it will be set to 63 | 'persistent_disk' since there is no current way to determine the image. 64 | 65 | Examples: 66 | Execute uname on all instances in the us-central1-a zone 67 | $ ansible -i gce.py us-central1-a -m shell -a "/bin/uname -a" 68 | 69 | Use the GCE inventory script to print out instance specific information 70 | $ contrib/inventory/gce.py --host my_instance 71 | 72 | Author: Eric Johnson 73 | Contributors: Matt Hite , Tom Melendez 74 | Version: 0.0.3 75 | ''' 76 | 77 | try: 78 | import pkg_resources 79 | except ImportError: 80 | # Use pkg_resources to find the correct versions of libraries and set 81 | # sys.path appropriately when there are multiversion installs. We don't 82 | # fail here as there is code that better expresses the errors where the 83 | # library is used. 84 | pass 85 | 86 | USER_AGENT_PRODUCT = "Ansible-gce_inventory_plugin" 87 | USER_AGENT_VERSION = "v2" 88 | 89 | import sys 90 | import os 91 | import argparse 92 | 93 | from time import time 94 | 95 | if sys.version_info >= (3, 0): 96 | import configparser 97 | else: 98 | import ConfigParser as configparser 99 | 100 | import logging 101 | logging.getLogger('libcloud.common.google').addHandler(logging.NullHandler()) 102 | 103 | try: 104 | import json 105 | except ImportError: 106 | import simplejson as json 107 | 108 | try: 109 | from libcloud.compute.types import Provider 110 | from libcloud.compute.providers import get_driver 111 | _ = Provider.GCE 112 | except: 113 | sys.exit("GCE inventory script requires libcloud >= 0.13") 114 | 115 | 116 | class CloudInventoryCache(object): 117 | def __init__(self, cache_name='ansible-cloud-cache', cache_path='/tmp', 118 | cache_max_age=300): 119 | cache_dir = os.path.expanduser(cache_path) 120 | if not os.path.exists(cache_dir): 121 | os.makedirs(cache_dir) 122 | self.cache_path_cache = os.path.join(cache_dir, cache_name) 123 | 124 | self.cache_max_age = cache_max_age 125 | 126 | def is_valid(self, max_age=None): 127 | ''' Determines if the cache files have expired, or if it is still valid ''' 128 | 129 | if max_age is None: 130 | max_age = self.cache_max_age 131 | 132 | if os.path.isfile(self.cache_path_cache): 133 | mod_time = os.path.getmtime(self.cache_path_cache) 134 | current_time = time() 135 | if (mod_time + max_age) > current_time: 136 | return True 137 | 138 | return False 139 | 140 | def get_all_data_from_cache(self, filename=''): 141 | ''' Reads the JSON inventory from the cache file. Returns Python dictionary. ''' 142 | 143 | data = '' 144 | if not filename: 145 | filename = self.cache_path_cache 146 | with open(filename, 'r') as cache: 147 | data = cache.read() 148 | return json.loads(data) 149 | 150 | def write_to_cache(self, data, filename=''): 151 | ''' Writes data to file as JSON. Returns True. ''' 152 | if not filename: 153 | filename = self.cache_path_cache 154 | json_data = json.dumps(data) 155 | with open(filename, 'w') as cache: 156 | cache.write(json_data) 157 | return True 158 | 159 | 160 | class GceInventory(object): 161 | def __init__(self): 162 | # Cache object 163 | self.cache = None 164 | # dictionary containing inventory read from disk 165 | self.inventory = {} 166 | 167 | # Read settings and parse CLI arguments 168 | self.parse_cli_args() 169 | self.config = self.get_config() 170 | self.driver = self.get_gce_driver() 171 | self.ip_type = self.get_inventory_options() 172 | if self.ip_type: 173 | self.ip_type = self.ip_type.lower() 174 | 175 | # Cache management 176 | start_inventory_time = time() 177 | cache_used = False 178 | if self.args.refresh_cache or not self.cache.is_valid(): 179 | self.do_api_calls_update_cache() 180 | else: 181 | self.load_inventory_from_cache() 182 | cache_used = True 183 | self.inventory['_meta']['stats'] = {'use_cache': True} 184 | self.inventory['_meta']['stats'] = { 185 | 'inventory_load_time': time() - start_inventory_time, 186 | 'cache_used': cache_used 187 | } 188 | 189 | # Just display data for specific host 190 | if self.args.host: 191 | print(self.json_format_dict( 192 | self.inventory['_meta']['hostvars'][self.args.host], 193 | pretty=self.args.pretty)) 194 | else: 195 | # Otherwise, assume user wants all instances grouped 196 | zones = self.parse_env_zones() 197 | print(self.json_format_dict(self.inventory, 198 | pretty=self.args.pretty)) 199 | sys.exit(0) 200 | 201 | def get_config(self): 202 | """ 203 | Reads the settings from the gce.ini file. 204 | 205 | Populates a SafeConfigParser object with defaults and 206 | attempts to read an .ini-style configuration from the filename 207 | specified in GCE_INI_PATH. If the environment variable is 208 | not present, the filename defaults to gce.ini in the current 209 | working directory. 210 | """ 211 | gce_ini_default_path = os.path.join( 212 | os.path.dirname(os.path.realpath(__file__)), "gce.ini") 213 | gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path) 214 | 215 | # Create a ConfigParser. 216 | # This provides empty defaults to each key, so that environment 217 | # variable configuration (as opposed to INI configuration) is able 218 | # to work. 219 | config = configparser.SafeConfigParser(defaults={ 220 | 'gce_service_account_email_address': '', 221 | 'gce_service_account_pem_file_path': '', 222 | 'gce_project_id': '', 223 | 'gce_zone': '', 224 | 'libcloud_secrets': '', 225 | 'inventory_ip_type': '', 226 | 'cache_path': '~/.ansible/tmp', 227 | 'cache_max_age': '300' 228 | }) 229 | if 'gce' not in config.sections(): 230 | config.add_section('gce') 231 | if 'inventory' not in config.sections(): 232 | config.add_section('inventory') 233 | if 'cache' not in config.sections(): 234 | config.add_section('cache') 235 | 236 | config.read(gce_ini_path) 237 | 238 | ######### 239 | # Section added for processing ini settings 240 | ######### 241 | 242 | # Set the instance_states filter based on config file options 243 | self.instance_states = [] 244 | if config.has_option('gce', 'instance_states'): 245 | states = config.get('gce', 'instance_states') 246 | # Ignore if instance_states is an empty string. 247 | if states: 248 | self.instance_states = states.split(',') 249 | 250 | # Caching 251 | cache_path = config.get('cache', 'cache_path') 252 | cache_max_age = config.getint('cache', 'cache_max_age') 253 | # TOOD(supertom): support project-specific caches 254 | cache_name = 'ansible-gce.cache' 255 | self.cache = CloudInventoryCache(cache_path=cache_path, 256 | cache_max_age=cache_max_age, 257 | cache_name=cache_name) 258 | return config 259 | 260 | def get_inventory_options(self): 261 | """Determine inventory options. Environment variables always 262 | take precedence over configuration files.""" 263 | ip_type = self.config.get('inventory', 'inventory_ip_type') 264 | # If the appropriate environment variables are set, they override 265 | # other configuration 266 | ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type) 267 | return ip_type 268 | 269 | def get_gce_driver(self): 270 | """Determine the GCE authorization settings and return a 271 | libcloud driver. 272 | """ 273 | # Attempt to get GCE params from a configuration file, if one 274 | # exists. 275 | secrets_path = self.config.get('gce', 'libcloud_secrets') 276 | secrets_found = False 277 | 278 | try: 279 | import secrets 280 | args = list(secrets.GCE_PARAMS) 281 | kwargs = secrets.GCE_KEYWORD_PARAMS 282 | secrets_found = True 283 | except: 284 | pass 285 | 286 | if not secrets_found and secrets_path: 287 | if not secrets_path.endswith('secrets.py'): 288 | err = "Must specify libcloud secrets file as " 289 | err += "/absolute/path/to/secrets.py" 290 | sys.exit(err) 291 | sys.path.append(os.path.dirname(secrets_path)) 292 | try: 293 | import secrets 294 | args = list(getattr(secrets, 'GCE_PARAMS', [])) 295 | kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {}) 296 | secrets_found = True 297 | except: 298 | pass 299 | 300 | if not secrets_found: 301 | args = [ 302 | self.config.get('gce', 'gce_service_account_email_address'), 303 | self.config.get('gce', 'gce_service_account_pem_file_path') 304 | ] 305 | kwargs = {'project': self.config.get('gce', 'gce_project_id'), 306 | 'datacenter': self.config.get('gce', 'gce_zone')} 307 | 308 | # If the appropriate environment variables are set, they override 309 | # other configuration; process those into our args and kwargs. 310 | args[0] = os.environ.get('GCE_EMAIL', args[0]) 311 | args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1]) 312 | args[1] = os.environ.get('GCE_CREDENTIALS_FILE_PATH', args[1]) 313 | 314 | kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project']) 315 | kwargs['datacenter'] = os.environ.get('GCE_ZONE', kwargs['datacenter']) 316 | 317 | # Retrieve and return the GCE driver. 318 | gce = get_driver(Provider.GCE)(*args, **kwargs) 319 | gce.connection.user_agent_append( 320 | '%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION), 321 | ) 322 | return gce 323 | 324 | def parse_env_zones(self): 325 | '''returns a list of comma separated zones parsed from the GCE_ZONE environment variable. 326 | If provided, this will be used to filter the results of the grouped_instances call''' 327 | import csv 328 | reader = csv.reader([os.environ.get('GCE_ZONE', "")], skipinitialspace=True) 329 | zones = [r for r in reader] 330 | return [z for z in zones[0]] 331 | 332 | def parse_cli_args(self): 333 | ''' Command line argument processing ''' 334 | 335 | parser = argparse.ArgumentParser( 336 | description='Produce an Ansible Inventory file based on GCE') 337 | parser.add_argument('--list', action='store_true', default=True, 338 | help='List instances (default: True)') 339 | parser.add_argument('--host', action='store', 340 | help='Get all information about an instance') 341 | parser.add_argument('--pretty', action='store_true', default=False, 342 | help='Pretty format (default: False)') 343 | parser.add_argument( 344 | '--refresh-cache', action='store_true', default=False, 345 | help='Force refresh of cache by making API requests (default: False - use cache files)') 346 | self.args = parser.parse_args() 347 | 348 | def node_to_dict(self, inst): 349 | md = {} 350 | 351 | if inst is None: 352 | return {} 353 | 354 | if 'items' in inst.extra['metadata']: 355 | for entry in inst.extra['metadata']['items']: 356 | md[entry['key']] = entry['value'] 357 | 358 | net = inst.extra['networkInterfaces'][0]['network'].split('/')[-1] 359 | subnet = None 360 | if 'subnetwork' in inst.extra['networkInterfaces'][0]: 361 | subnet = inst.extra['networkInterfaces'][0]['subnetwork'].split('/')[-1] 362 | # default to exernal IP unless user has specified they prefer internal 363 | if self.ip_type == 'internal': 364 | ssh_host = inst.private_ips[0] 365 | else: 366 | ssh_host = inst.public_ips[0] if len(inst.public_ips) >= 1 else inst.private_ips[0] 367 | 368 | return { 369 | 'gce_uuid': inst.uuid, 370 | 'gce_id': inst.id, 371 | 'gce_image': inst.image, 372 | 'gce_machine_type': inst.size, 373 | 'gce_private_ip': inst.private_ips[0], 374 | 'gce_public_ip': inst.public_ips[0] if len(inst.public_ips) >= 1 else None, 375 | 'gce_name': inst.name, 376 | 'gce_description': inst.extra['description'], 377 | 'gce_status': inst.extra['status'], 378 | 'gce_zone': inst.extra['zone'].name, 379 | 'gce_tags': inst.extra['tags'], 380 | 'gce_metadata': md, 381 | 'gce_network': net, 382 | 'gce_subnetwork': subnet, 383 | # Hosts don't have a public name, so we add an IP 384 | 'ansible_ssh_host': ssh_host 385 | } 386 | 387 | def load_inventory_from_cache(self): 388 | ''' Loads inventory from JSON on disk. ''' 389 | 390 | try: 391 | self.inventory = self.cache.get_all_data_from_cache() 392 | hosts = self.inventory['_meta']['hostvars'] 393 | except Exception as e: 394 | print( 395 | "Invalid inventory file %s. Please rebuild with -refresh-cache option." 396 | % (self.cache.cache_path_cache)) 397 | raise 398 | 399 | def do_api_calls_update_cache(self): 400 | ''' Do API calls and save data in cache. ''' 401 | zones = self.parse_env_zones() 402 | data = self.group_instances(zones) 403 | self.cache.write_to_cache(data) 404 | self.inventory = data 405 | 406 | def list_nodes(self): 407 | all_nodes = [] 408 | params, more_results = {'maxResults': 500}, True 409 | while more_results: 410 | self.driver.connection.gce_params = params 411 | all_nodes.extend(self.driver.list_nodes()) 412 | more_results = 'pageToken' in params 413 | return all_nodes 414 | 415 | def group_instances(self, zones=None): 416 | '''Group all instances''' 417 | groups = {} 418 | meta = {} 419 | meta["hostvars"] = {} 420 | 421 | for node in self.list_nodes(): 422 | 423 | # This check filters on the desired instance states defined in the 424 | # config file with the instance_states config option. 425 | # 426 | # If the instance_states list is _empty_ then _ALL_ states are returned. 427 | # 428 | # If the instance_states list is _populated_ then check the current 429 | # state against the instance_states list 430 | if self.instance_states and not node.extra['status'] in self.instance_states: 431 | continue 432 | 433 | name = node.name 434 | 435 | meta["hostvars"][name] = self.node_to_dict(node) 436 | 437 | zone = node.extra['zone'].name 438 | 439 | # To avoid making multiple requests per zone 440 | # we list all nodes and then filter the results 441 | if zones and zone not in zones: 442 | continue 443 | 444 | if zone in groups: 445 | groups[zone].append(name) 446 | else: 447 | groups[zone] = [name] 448 | 449 | tags = node.extra['tags'] 450 | for t in tags: 451 | if t.startswith('group-'): 452 | tag = t[6:] 453 | else: 454 | tag = 'tag_%s' % t 455 | if tag in groups: 456 | groups[tag].append(name) 457 | else: 458 | groups[tag] = [name] 459 | 460 | net = node.extra['networkInterfaces'][0]['network'].split('/')[-1] 461 | net = 'network_%s' % net 462 | if net in groups: 463 | groups[net].append(name) 464 | else: 465 | groups[net] = [name] 466 | 467 | machine_type = node.size 468 | if machine_type in groups: 469 | groups[machine_type].append(name) 470 | else: 471 | groups[machine_type] = [name] 472 | 473 | image = node.image and node.image or 'persistent_disk' 474 | if image in groups: 475 | groups[image].append(name) 476 | else: 477 | groups[image] = [name] 478 | 479 | status = node.extra['status'] 480 | stat = 'status_%s' % status.lower() 481 | if stat in groups: 482 | groups[stat].append(name) 483 | else: 484 | groups[stat] = [name] 485 | 486 | for private_ip in node.private_ips: 487 | groups[private_ip] = [name] 488 | 489 | if len(node.public_ips) >= 1: 490 | for public_ip in node.public_ips: 491 | groups[public_ip] = [name] 492 | 493 | groups["_meta"] = meta 494 | 495 | return groups 496 | 497 | def json_format_dict(self, data, pretty=False): 498 | ''' Converts a dict to a JSON object and dumps it as a formatted 499 | string ''' 500 | 501 | if pretty: 502 | return json.dumps(data, sort_keys=True, indent=2) 503 | else: 504 | return json.dumps(data) 505 | 506 | # Run the script 507 | if __name__ == '__main__': 508 | GceInventory() 509 | -------------------------------------------------------------------------------- /Chapter04/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: tag_app 3 | roles: 4 | - phonebook 5 | -------------------------------------------------------------------------------- /Chapter04/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - gce 5 | -------------------------------------------------------------------------------- /Chapter04/roles/gce/files/google-cloud-sdk.repo: -------------------------------------------------------------------------------- 1 | [google-cloud-sdk] 2 | name=Google Cloud SDK 3 | baseurl=https://packages.cloud.google.com/yum/repos/cloud-sdk-el7-x86_64 4 | enabled=1 5 | gpgcheck=1 6 | repo_gpgcheck=1 7 | gpgkey=https://packages.cloud.google.com/yum/doc/yum-key.gpg 8 | https://packages.cloud.google.com/yum/doc/rpm-package-key.gpg 9 | -------------------------------------------------------------------------------- /Chapter04/roles/gce/tasks/configure_gcloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: enable Google SDK repo for RedHat/Centos 3 | copy: 4 | src: google-cloud-sdk.repo 5 | dest: /etc/yum.repos.d/google-cloud-sdk.repo 6 | when: ansible_os_family == "RedHat" 7 | 8 | - name: enable Google SDK repo for Debian/Ubuntu 9 | template: 10 | src: google-cloud-sdk.list 11 | dest: /etc/apt/sources.list.d/google-cloud-sdk.list 12 | when: ansible_os_family == "Debian" 13 | 14 | - name: install Google Cloud's public key for Debain/Ubuntu 15 | apt_key: 16 | url: https://packages.cloud.google.com/apt/doc/apt-key.gpg 17 | state: present 18 | when: ansible_os_family == "Debian" 19 | 20 | - name: install gcloud SDK 21 | package: 22 | name: google-cloud-sdk 23 | state: present 24 | 25 | - name: configure gcloud with service account 26 | command: gcloud auth activate-service-account --key-file=/home/packt/gce-default.json 27 | -------------------------------------------------------------------------------- /Chapter04/roles/gce/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include secrets 3 | include_vars: secrets.yml 4 | 5 | - name: Create Custom Network 6 | gce_net: 7 | name: my-network 8 | mode: custom 9 | subnet_name: "public-subnet" 10 | subnet_region: us-west1 11 | ipv4_range: '10.0.0.0/24' 12 | state: "present" 13 | service_account_email: "{{ service_account_email }}" 14 | project_id: "{{ project_id }}" 15 | credentials_file: "{{ credentials_file }}" 16 | tags: 17 | - recipe2 18 | 19 | - name: create public ip 20 | gce_eip: 21 | name: app 22 | region: us-west1 23 | state: present 24 | service_account_email: "{{ service_account_email }}" 25 | project_id: "{{ project_id }}" 26 | credentials_file: "{{ credentials_file }}" 27 | register: app_eip 28 | tags: 29 | - recipe2 30 | 31 | - name: create and start the instance 32 | gce: 33 | instance_names: app 34 | zone: "{{ zone }}" 35 | machine_type: f1-micro 36 | image: centos-7 37 | state: present 38 | disk_size: 15 39 | tags: http 40 | metadata: "{{ instance_metadata }}" 41 | network: my-network 42 | subnetwork: public-subnet 43 | external_ip: "{{ app_eip.address }}" 44 | service_account_email: "{{ service_account_email }}" 45 | credentials_file: "{{ credentials_file }}" 46 | project_id: "{{ project_id }}" 47 | tags: 48 | - recipe2 49 | 50 | - name: attach persistent disk 51 | gce_pd: 52 | disk_type: pd-standard 53 | size_gb: 10 54 | instance_name: app 55 | zone: "{{ zone }}" 56 | service_account_email: "{{ service_account_email }}" 57 | credentials_file: "{{ credentials_file }}" 58 | project_id: "{{ project_id }}" 59 | name: app-pd 60 | tags: 61 | - recipe3 62 | 63 | - name: create snapshot of instance 64 | gce_snapshot: 65 | instance_name: app 66 | snapshot_name: app-snapshot 67 | state: present 68 | service_account_email: "{{ service_account_email }}" 69 | project_id: "{{ project_id }}" 70 | credentials_file: "{{ credentials_file }}" 71 | tags: 72 | - recipe4 73 | 74 | - name: tag the instance 75 | gce_tag: 76 | instance_name: app 77 | tags: http,app 78 | zone: "{{ zone }}" 79 | state: present 80 | pem_file: "{{ credentials_file }}" 81 | service_account_email: "{{ service_account_email }}" 82 | project_id: "{{ project_id }}" 83 | tags: 84 | - recipe5 85 | 86 | - name: Create Firewall Rule with Source Tags 87 | gce_net: 88 | name: my-network 89 | fwname: "allow-http" 90 | allowed: tcp:80,8080 91 | state: "present" 92 | target_tags: "http" 93 | subnet_region: us-west1 94 | service_account_email: "{{ service_account_email }}" 95 | project_id: "{{ project_id }}" 96 | credentials_file: "{{ credentials_file }}" 97 | tags: 98 | - recipe6 99 | 100 | - name: Create Firewall Rule with Source Range 101 | gce_net: 102 | name: my-network 103 | fwname: "allow-internal" 104 | state: "present" 105 | src_range: ['10.0.0.0/16'] 106 | subnet_name: public-subnet 107 | allowed: 'tcp' 108 | service_account_email: "{{ service_account_email }}" 109 | project_id: "{{ project_id }}" 110 | credentials_file: "{{ credentials_file }}" 111 | tags: 112 | - recipe6 113 | 114 | - name: create load balancer and attach to instance 115 | gce_lb: 116 | name: loadbalancer1 117 | region: us-west1 118 | members: ["{{ zone }}/app"] 119 | httphealthcheck_name: hc 120 | httphealthcheck_port: 80 121 | httphealthcheck_path: "/" 122 | service_account_email: "{{ service_account_email }}" 123 | project_id: "{{ project_id }}" 124 | credentials_file: "{{ credentials_file }}" 125 | tags: 126 | - recipe7 127 | 128 | - name: stop the instance 129 | gce: 130 | instance_names: app 131 | zone: "{{ zone }}" 132 | machine_type: f1-micro 133 | image: centos-7 134 | state: stopped 135 | service_account_email: "{{ service_account_email }}" 136 | credentials_file: "{{ credentials_file }}" 137 | project_id: "{{ project_id }}" 138 | disk_size: 15 139 | metadata: "{{ instance_metadata }}" 140 | tags: 141 | - recipe8 142 | 143 | - name: create image 144 | gce_img: 145 | name: app-image 146 | source: app 147 | zone: "{{ zone }}" 148 | state: present 149 | service_account_email: "{{ service_account_email }}" 150 | pem_file: "{{ credentials_file }}" 151 | project_id: "{{ project_id }}" 152 | tags: 153 | - recipe8 154 | 155 | - name: start the instance 156 | gce: 157 | instance_names: app 158 | zone: "{{ zone }}" 159 | machine_type: f1-micro 160 | image: centos-7 161 | state: started 162 | service_account_email: "{{ service_account_email }}" 163 | credentials_file: "{{ credentials_file }}" 164 | project_id: "{{ project_id }}" 165 | disk_size: 15 166 | metadata: "{{ instance_metadata }}" 167 | tags: 168 | - recipe8 169 | 170 | - name: create instance template named app-template 171 | gce_instance_template: 172 | name: app-template 173 | size: f1-micro 174 | tags: http,http-server 175 | image: app-image 176 | state: present 177 | subnetwork: public-subnet 178 | network: my-network 179 | subnetwork_region: us-west1 180 | service_account_email: "{{ service_account_email }}" 181 | credentials_file: "{{ credentials_file }}" 182 | project_id: "{{ project_id }}" 183 | tags: 184 | - recipe9 185 | 186 | - name: create an instance group with autoscaling 187 | gce_mig: 188 | name: app-mig 189 | zone: "{{ zone }}" 190 | service_account_email: "{{ service_account_email }}" 191 | credentials_file: "{{ credentials_file }}" 192 | project_id: "{{ project_id }}" 193 | state: present 194 | size: 2 195 | named_ports: 196 | - name: http 197 | port: 80 198 | template: app-template 199 | autoscaling: 200 | enabled: yes 201 | name: app-autoscaler 202 | policy: 203 | min_instances: 2 204 | max_instances: 5 205 | cool_down_period: 90 206 | cpu_utilization: 207 | target: 0.6 208 | load_balancing_utilization: 209 | target: 0.8 210 | tags: 211 | - recipe10 212 | 213 | - name: create a bucket 214 | gc_storage: 215 | bucket: packt-mybucket 216 | mode: create 217 | gs_access_key: "{{ gs_access_key }}" 218 | gs_secret_key: "{{ gs_secret_key }}" 219 | tags: 220 | - recipe11 221 | 222 | - name: upload an object 223 | gc_storage: 224 | bucket: packt-mybucket 225 | object: key.txt 226 | src: /home/packt/ansible/putmyfile.txt 227 | mode: put 228 | permission: public-read 229 | gs_access_key: "{{ gs_access_key }}" 230 | gs_secret_key: "{{ gs_secret_key }}" 231 | tags: 232 | - recipe11 233 | 234 | - name: download an object 235 | gc_storage: 236 | bucket: packt-mybucket 237 | object: key.txt 238 | dest: /home/packt/ansible/getmyfile.txt 239 | mode: get 240 | gs_access_key: "{{ gs_access_key }}" 241 | gs_secret_key: "{{ gs_secret_key }}" 242 | tags: 243 | - recipe11 244 | 245 | - name: Configure Google Cloud SDK 246 | import_tasks: configure_gcloud.yml 247 | become: yes 248 | tags: 249 | - recipe12 250 | 251 | - name: enable Cloud SQL APIs 252 | command: gcloud service-management enable sqladmin.googleapis.com 253 | tags: 254 | - recipe12 255 | 256 | - name: wait for a couple of minutes 257 | pause: 258 | minutes: 2 259 | tags: 260 | - recipe12 261 | 262 | - name: check if the instance is up 263 | command: gcloud sql instances describe app-db 264 | register: availability 265 | ignore_errors: yes 266 | tags: 267 | - recipe12 268 | 269 | - name: boot SQL instance if it not present 270 | command: gcloud sql instances create app-db --tier=db-f1-micro --region=us-west1 --authorized-networks={{ app_eip.address }}/32 271 | when: availability.rc != 0 272 | tags: 273 | - recipe12 274 | 275 | - name: set root password 276 | command: gcloud sql users set-password root % --instance app-db --password {{ mysql_root_password }} 277 | tags: 278 | - recipe12 279 | -------------------------------------------------------------------------------- /Chapter04/roles/gce/templates/google-cloud-sdk.list: -------------------------------------------------------------------------------- 1 | deb http://packages.cloud.google.com/apt {{ ansible_distribution_release }} main 2 | -------------------------------------------------------------------------------- /Chapter04/roles/gce/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | service_account_email: "ansible@logical-bolt-181411.iam.gserviceaccount.com" 3 | credentials_file: "/home/packt/gce-default.json" 4 | zone: "us-west1-a" 5 | project_id: "logical-bolt-181411" 6 | instance_metadata: '{"sshKeys":"aditya:ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCbg83WYIxUfXWJ4bQiYfZYHceDwMJxnGfJqgYtHL/DAtegVY+Nm8MX3CRZYisfskt0m9CQ6y/Ux1OITTz+O11fgxLJcroZmKJbWW0K39gfHvFqR7FIe0zuJaxqUQUuyc0i6RCBRiZPiQQOPes2yDtfHgDWx3q9knS3ZXIAXcGLZrgfC1XnIK8CLAnZDved9Rue2bhsCnO9Mleh9g/CTtehMDAzD4NeSv9eETlHYkYSpJg8gFA3BFICpBxTqWSjf1mMQGSmiudFOhRjHIxL1Tvh+pnjSoL/jrLcP3RtMVuG0ZU0qkoAts1qpTwmyAJUz9Ts2EeyDJ0tXsFAiOFbmuMd"}' 7 | -------------------------------------------------------------------------------- /Chapter04/roles/gce/vars/secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | gs_access_key: GOOG3Q2HVD6KN345G8FTYQL7Z 3 | gs_secret_key: mcV7G7ft8og5xDhi/j5tepG4zkhfds358790jgUtmBG6BQS 4 | mysql_root_password: secretPassword 5 | -------------------------------------------------------------------------------- /Chapter04/roles/phonebook/files/phone-book.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple Phone Book 3 | 4 | [Service] 5 | WorkingDirectory=/opt/phone-book 6 | ExecStartPre=/bin/bash /opt/phone-book/init.sh 7 | ExecStart=/usr/bin/uwsgi --http-socket 0.0.0.0:8080 --manage-script-name --mount /phonebook=app:app 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | 14 | -------------------------------------------------------------------------------- /Chapter04/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include secrets 3 | include_vars: secrets.yml 4 | 5 | - name: install epel repository 6 | package: 7 | name: epel-release 8 | state: present 9 | 10 | - name: install dependencies 11 | package: 12 | name: "{{ item }}" 13 | state: present 14 | with_items: 15 | - git 16 | - python-pip 17 | - gcc 18 | - python-devel 19 | - mysql-devel 20 | 21 | - name: install python libraries 22 | pip: 23 | name: "{{ item }}" 24 | state: present 25 | with_items: 26 | - flask 27 | - flask-sqlalchemy 28 | - flask-migrate 29 | - uwsgi 30 | - MySQL-python 31 | 32 | - name: get the application code 33 | git: 34 | repo: https://github.com/DevopsNexus/phone-book-mysql/ 35 | dest: /opt/phone-book 36 | force: yes 37 | 38 | - name: upload systemd unit file 39 | copy: 40 | src: phone-book.service 41 | dest: /etc/systemd/system/phone-book.service 42 | 43 | - name: upload app config file 44 | template: 45 | src: config.py 46 | dest: /opt/phone-book/config.py 47 | 48 | - name: create phonebook database 49 | mysql_db: 50 | name: phonebook 51 | state: present 52 | login_host: "{{ mysql_host }}" 53 | login_user: root 54 | login_password: "{{ mysql_root_password }}" 55 | 56 | - name: create app user for phonebook database 57 | mysql_user: 58 | name: app 59 | password: "{{ mysql_app_password }}" 60 | priv: 'phonebook.*:ALL' 61 | host: "%" 62 | state: present 63 | login_host: "{{ mysql_host }}" 64 | login_user: root 65 | login_password: "{{ mysql_root_password }}" 66 | 67 | - name: start phonebook 68 | systemd: 69 | state: started 70 | daemon_reload: yes 71 | name: phone-book 72 | enabled: yes 73 | -------------------------------------------------------------------------------- /Chapter04/roles/phonebook/templates/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | SQLALCHEMY_DATABASE_URI = 'mysql://app:{{ mysql_app_password }}@{{ mysql_host }}/phonebook' 4 | SECRET_KEY = os.urandom(32) 5 | -------------------------------------------------------------------------------- /Chapter04/roles/phonebook/vars/secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | mysql_app_password: appSecretPassword 3 | mysql_root_password: secretPassword 4 | mysql_host: 35.199.168.191 5 | -------------------------------------------------------------------------------- /Chapter05/azure_rm.ini: -------------------------------------------------------------------------------- 1 | # 2 | # Configuration file for azure_rm.py 3 | # 4 | [azure] 5 | # Control which resource groups are included. By default all resources groups are included. 6 | # Set resource_groups to a comma separated list of resource groups names. 7 | #resource_groups= 8 | 9 | # Control which tags are included. Set tags to a comma separated list of keys or key:value pairs 10 | #tags= 11 | 12 | # Control which locations are included. Set locations to a comma separated list (e.g. eastus,eastus2,westus) 13 | #locations= 14 | 15 | # Include powerstate. If you don't need powerstate information, turning it off improves runtime performance. 16 | include_powerstate=yes 17 | 18 | # Control grouping with the following boolean flags. Valid values: yes, no, true, false, True, False, 0, 1. 19 | group_by_resource_group=yes 20 | group_by_location=yes 21 | group_by_security_group=yes 22 | group_by_tag=yes 23 | -------------------------------------------------------------------------------- /Chapter05/azure_rm.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright (c) 2016 Matt Davis, 4 | # Chris Houseknecht, 5 | # 6 | # This file is part of Ansible 7 | # 8 | # Ansible is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # Ansible is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with Ansible. If not, see . 20 | # 21 | 22 | ''' 23 | Azure External Inventory Script 24 | =============================== 25 | Generates dynamic inventory by making API requests to the Azure Resource 26 | Manager using the Azure Python SDK. For instruction on installing the 27 | Azure Python SDK see http://azure-sdk-for-python.readthedocs.org/ 28 | 29 | Authentication 30 | -------------- 31 | The order of precedence is command line arguments, environment variables, 32 | and finally the [default] profile found in ~/.azure/credentials. 33 | 34 | If using a credentials file, it should be an ini formatted file with one or 35 | more sections, which we refer to as profiles. The script looks for a 36 | [default] section, if a profile is not specified either on the command line 37 | or with an environment variable. The keys in a profile will match the 38 | list of command line arguments below. 39 | 40 | For command line arguments and environment variables specify a profile found 41 | in your ~/.azure/credentials file, or a service principal or Active Directory 42 | user. 43 | 44 | Command line arguments: 45 | - profile 46 | - client_id 47 | - secret 48 | - subscription_id 49 | - tenant 50 | - ad_user 51 | - password 52 | - cloud_environment 53 | 54 | Environment variables: 55 | - AZURE_PROFILE 56 | - AZURE_CLIENT_ID 57 | - AZURE_SECRET 58 | - AZURE_SUBSCRIPTION_ID 59 | - AZURE_TENANT 60 | - AZURE_AD_USER 61 | - AZURE_PASSWORD 62 | - AZURE_CLOUD_ENVIRONMENT 63 | 64 | Run for Specific Host 65 | ----------------------- 66 | When run for a specific host using the --host option, a resource group is 67 | required. For a specific host, this script returns the following variables: 68 | 69 | { 70 | "ansible_host": "XXX.XXX.XXX.XXX", 71 | "computer_name": "computer_name2", 72 | "fqdn": null, 73 | "id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Compute/virtualMachines/object-name", 74 | "image": { 75 | "offer": "CentOS", 76 | "publisher": "OpenLogic", 77 | "sku": "7.1", 78 | "version": "latest" 79 | }, 80 | "location": "westus", 81 | "mac_address": "00-00-5E-00-53-FE", 82 | "name": "object-name", 83 | "network_interface": "interface-name", 84 | "network_interface_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkInterfaces/object-name1", 85 | "network_security_group": null, 86 | "network_security_group_id": null, 87 | "os_disk": { 88 | "name": "object-name", 89 | "operating_system_type": "Linux" 90 | }, 91 | "plan": null, 92 | "powerstate": "running", 93 | "private_ip": "172.26.3.6", 94 | "private_ip_alloc_method": "Static", 95 | "provisioning_state": "Succeeded", 96 | "public_ip": "XXX.XXX.XXX.XXX", 97 | "public_ip_alloc_method": "Static", 98 | "public_ip_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/publicIPAddresses/object-name", 99 | "public_ip_name": "object-name", 100 | "resource_group": "galaxy-production", 101 | "security_group": "object-name", 102 | "security_group_id": "/subscriptions/subscription-id/resourceGroups/galaxy-production/providers/Microsoft.Network/networkSecurityGroups/object-name", 103 | "tags": { 104 | "db": "database" 105 | }, 106 | "type": "Microsoft.Compute/virtualMachines", 107 | "virtual_machine_size": "Standard_DS4" 108 | } 109 | 110 | Groups 111 | ------ 112 | When run in --list mode, instances are grouped by the following categories: 113 | - azure 114 | - location 115 | - resource_group 116 | - security_group 117 | - tag key 118 | - tag key_value 119 | 120 | Control groups using azure_rm.ini or set environment variables: 121 | 122 | AZURE_GROUP_BY_RESOURCE_GROUP=yes 123 | AZURE_GROUP_BY_LOCATION=yes 124 | AZURE_GROUP_BY_SECURITY_GROUP=yes 125 | AZURE_GROUP_BY_TAG=yes 126 | 127 | Select hosts within specific resource groups by assigning a comma separated list to: 128 | 129 | AZURE_RESOURCE_GROUPS=resource_group_a,resource_group_b 130 | 131 | Select hosts for specific tag key by assigning a comma separated list of tag keys to: 132 | 133 | AZURE_TAGS=key1,key2,key3 134 | 135 | Select hosts for specific locations: 136 | 137 | AZURE_LOCATIONS=eastus,westus,eastus2 138 | 139 | Or, select hosts for specific tag key:value pairs by assigning a comma separated list key:value pairs to: 140 | 141 | AZURE_TAGS=key1:value1,key2:value2 142 | 143 | If you don't need the powerstate, you can improve performance by turning off powerstate fetching: 144 | AZURE_INCLUDE_POWERSTATE=no 145 | 146 | azure_rm.ini 147 | ------------ 148 | As mentioned above, you can control execution using environment variables or a .ini file. A sample 149 | azure_rm.ini is included. The name of the .ini file is the basename of the inventory script (in this case 150 | 'azure_rm') with a .ini extension. It also assumes the .ini file is alongside the script. To specify 151 | a different path for the .ini file, define the AZURE_INI_PATH environment variable: 152 | 153 | export AZURE_INI_PATH=/path/to/custom.ini 154 | 155 | Powerstate: 156 | ----------- 157 | The powerstate attribute indicates whether or not a host is running. If the value is 'running', the machine is 158 | up. If the value is anything other than 'running', the machine is down, and will be unreachable. 159 | 160 | Examples: 161 | --------- 162 | Execute /bin/uname on all instances in the galaxy-qa resource group 163 | $ ansible -i azure_rm.py galaxy-qa -m shell -a "/bin/uname -a" 164 | 165 | Use the inventory script to print instance specific information 166 | $ contrib/inventory/azure_rm.py --host my_instance_host_name --pretty 167 | 168 | Use with a playbook 169 | $ ansible-playbook -i contrib/inventory/azure_rm.py my_playbook.yml --limit galaxy-qa 170 | 171 | 172 | Insecure Platform Warning 173 | ------------------------- 174 | If you receive InsecurePlatformWarning from urllib3, install the 175 | requests security packages: 176 | 177 | pip install requests[security] 178 | 179 | 180 | author: 181 | - Chris Houseknecht (@chouseknecht) 182 | - Matt Davis (@nitzmahone) 183 | 184 | Company: Ansible by Red Hat 185 | 186 | Version: 1.0.0 187 | ''' 188 | 189 | import argparse 190 | import json 191 | import os 192 | import re 193 | import sys 194 | import inspect 195 | 196 | try: 197 | # python2 198 | import ConfigParser as cp 199 | except ImportError: 200 | # python3 201 | import configparser as cp 202 | 203 | from packaging.version import Version 204 | 205 | from os.path import expanduser 206 | import ansible.module_utils.six.moves.urllib.parse as urlparse 207 | 208 | HAS_AZURE = True 209 | HAS_AZURE_EXC = None 210 | 211 | try: 212 | from msrestazure.azure_exceptions import CloudError 213 | from msrestazure import azure_cloud 214 | from azure.mgmt.compute import __version__ as azure_compute_version 215 | from azure.common import AzureMissingResourceHttpError, AzureHttpError 216 | from azure.common.credentials import ServicePrincipalCredentials, UserPassCredentials 217 | from azure.mgmt.network import NetworkManagementClient 218 | from azure.mgmt.resource.resources import ResourceManagementClient 219 | from azure.mgmt.compute import ComputeManagementClient 220 | except ImportError as exc: 221 | HAS_AZURE_EXC = exc 222 | HAS_AZURE = False 223 | 224 | 225 | AZURE_CREDENTIAL_ENV_MAPPING = dict( 226 | profile='AZURE_PROFILE', 227 | subscription_id='AZURE_SUBSCRIPTION_ID', 228 | client_id='AZURE_CLIENT_ID', 229 | secret='AZURE_SECRET', 230 | tenant='AZURE_TENANT', 231 | ad_user='AZURE_AD_USER', 232 | password='AZURE_PASSWORD', 233 | cloud_environment='AZURE_CLOUD_ENVIRONMENT', 234 | ) 235 | 236 | AZURE_CONFIG_SETTINGS = dict( 237 | resource_groups='AZURE_RESOURCE_GROUPS', 238 | tags='AZURE_TAGS', 239 | locations='AZURE_LOCATIONS', 240 | include_powerstate='AZURE_INCLUDE_POWERSTATE', 241 | group_by_resource_group='AZURE_GROUP_BY_RESOURCE_GROUP', 242 | group_by_location='AZURE_GROUP_BY_LOCATION', 243 | group_by_security_group='AZURE_GROUP_BY_SECURITY_GROUP', 244 | group_by_tag='AZURE_GROUP_BY_TAG' 245 | ) 246 | 247 | AZURE_MIN_VERSION = "2.0.0" 248 | 249 | 250 | def azure_id_to_dict(id): 251 | pieces = re.sub(r'^\/', '', id).split('/') 252 | result = {} 253 | index = 0 254 | while index < len(pieces) - 1: 255 | result[pieces[index]] = pieces[index + 1] 256 | index += 1 257 | return result 258 | 259 | 260 | class AzureRM(object): 261 | 262 | def __init__(self, args): 263 | self._args = args 264 | self._cloud_environment = None 265 | self._compute_client = None 266 | self._resource_client = None 267 | self._network_client = None 268 | 269 | self.debug = False 270 | if args.debug: 271 | self.debug = True 272 | 273 | self.credentials = self._get_credentials(args) 274 | if not self.credentials: 275 | self.fail("Failed to get credentials. Either pass as parameters, set environment variables, " 276 | "or define a profile in ~/.azure/credentials.") 277 | 278 | # if cloud_environment specified, look up/build Cloud object 279 | raw_cloud_env = self.credentials.get('cloud_environment') 280 | if not raw_cloud_env: 281 | self._cloud_environment = azure_cloud.AZURE_PUBLIC_CLOUD # SDK default 282 | else: 283 | # try to look up "well-known" values via the name attribute on azure_cloud members 284 | all_clouds = [x[1] for x in inspect.getmembers(azure_cloud) if isinstance(x[1], azure_cloud.Cloud)] 285 | matched_clouds = [x for x in all_clouds if x.name == raw_cloud_env] 286 | if len(matched_clouds) == 1: 287 | self._cloud_environment = matched_clouds[0] 288 | elif len(matched_clouds) > 1: 289 | self.fail("Azure SDK failure: more than one cloud matched for cloud_environment name '{0}'".format(raw_cloud_env)) 290 | else: 291 | if not urlparse.urlparse(raw_cloud_env).scheme: 292 | self.fail("cloud_environment must be an endpoint discovery URL or one of {0}".format([x.name for x in all_clouds])) 293 | try: 294 | self._cloud_environment = azure_cloud.get_cloud_from_metadata_endpoint(raw_cloud_env) 295 | except Exception as e: 296 | self.fail("cloud_environment {0} could not be resolved: {1}".format(raw_cloud_env, e.message)) 297 | 298 | if self.credentials.get('subscription_id', None) is None: 299 | self.fail("Credentials did not include a subscription_id value.") 300 | self.log("setting subscription_id") 301 | self.subscription_id = self.credentials['subscription_id'] 302 | 303 | if self.credentials.get('client_id') is not None and \ 304 | self.credentials.get('secret') is not None and \ 305 | self.credentials.get('tenant') is not None: 306 | self.azure_credentials = ServicePrincipalCredentials(client_id=self.credentials['client_id'], 307 | secret=self.credentials['secret'], 308 | tenant=self.credentials['tenant'], 309 | cloud_environment=self._cloud_environment) 310 | elif self.credentials.get('ad_user') is not None and self.credentials.get('password') is not None: 311 | tenant = self.credentials.get('tenant') 312 | if not tenant: 313 | tenant = 'common' 314 | self.azure_credentials = UserPassCredentials(self.credentials['ad_user'], 315 | self.credentials['password'], 316 | tenant=tenant, 317 | cloud_environment=self._cloud_environment) 318 | else: 319 | self.fail("Failed to authenticate with provided credentials. Some attributes were missing. " 320 | "Credentials must include client_id, secret and tenant or ad_user and password.") 321 | 322 | def log(self, msg): 323 | if self.debug: 324 | print(msg + u'\n') 325 | 326 | def fail(self, msg): 327 | raise Exception(msg) 328 | 329 | def _get_profile(self, profile="default"): 330 | path = expanduser("~") 331 | path += "/.azure/credentials" 332 | try: 333 | config = cp.ConfigParser() 334 | config.read(path) 335 | except Exception as exc: 336 | self.fail("Failed to access {0}. Check that the file exists and you have read " 337 | "access. {1}".format(path, str(exc))) 338 | credentials = dict() 339 | for key in AZURE_CREDENTIAL_ENV_MAPPING: 340 | try: 341 | credentials[key] = config.get(profile, key, raw=True) 342 | except: 343 | pass 344 | 345 | if credentials.get('client_id') is not None or credentials.get('ad_user') is not None: 346 | return credentials 347 | 348 | return None 349 | 350 | def _get_env_credentials(self): 351 | env_credentials = dict() 352 | for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): 353 | env_credentials[attribute] = os.environ.get(env_variable, None) 354 | 355 | if env_credentials['profile'] is not None: 356 | credentials = self._get_profile(env_credentials['profile']) 357 | return credentials 358 | 359 | if env_credentials['client_id'] is not None or env_credentials['ad_user'] is not None: 360 | return env_credentials 361 | 362 | return None 363 | 364 | def _get_credentials(self, params): 365 | # Get authentication credentials. 366 | # Precedence: cmd line parameters-> environment variables-> default profile in ~/.azure/credentials. 367 | 368 | self.log('Getting credentials') 369 | 370 | arg_credentials = dict() 371 | for attribute, env_variable in AZURE_CREDENTIAL_ENV_MAPPING.items(): 372 | arg_credentials[attribute] = getattr(params, attribute) 373 | 374 | # try module params 375 | if arg_credentials['profile'] is not None: 376 | self.log('Retrieving credentials with profile parameter.') 377 | credentials = self._get_profile(arg_credentials['profile']) 378 | return credentials 379 | 380 | if arg_credentials['client_id'] is not None: 381 | self.log('Received credentials from parameters.') 382 | return arg_credentials 383 | 384 | if arg_credentials['ad_user'] is not None: 385 | self.log('Received credentials from parameters.') 386 | return arg_credentials 387 | 388 | # try environment 389 | env_credentials = self._get_env_credentials() 390 | if env_credentials: 391 | self.log('Received credentials from env.') 392 | return env_credentials 393 | 394 | # try default profile from ~./azure/credentials 395 | default_credentials = self._get_profile() 396 | if default_credentials: 397 | self.log('Retrieved default profile credentials from ~/.azure/credentials.') 398 | return default_credentials 399 | 400 | return None 401 | 402 | def _register(self, key): 403 | try: 404 | # We have to perform the one-time registration here. Otherwise, we receive an error the first 405 | # time we attempt to use the requested client. 406 | resource_client = self.rm_client 407 | resource_client.providers.register(key) 408 | except Exception as exc: 409 | self.log("One-time registration of {0} failed - {1}".format(key, str(exc))) 410 | self.log("You might need to register {0} using an admin account".format(key)) 411 | self.log(("To register a provider using the Python CLI: " 412 | "https://docs.microsoft.com/azure/azure-resource-manager/" 413 | "resource-manager-common-deployment-errors#noregisteredproviderfound")) 414 | 415 | @property 416 | def network_client(self): 417 | self.log('Getting network client') 418 | if not self._network_client: 419 | self._network_client = NetworkManagementClient( 420 | self.azure_credentials, 421 | self.subscription_id, 422 | base_url=self._cloud_environment.endpoints.resource_manager, 423 | api_version='2017-06-01' 424 | ) 425 | self._register('Microsoft.Network') 426 | return self._network_client 427 | 428 | @property 429 | def rm_client(self): 430 | self.log('Getting resource manager client') 431 | if not self._resource_client: 432 | self._resource_client = ResourceManagementClient( 433 | self.azure_credentials, 434 | self.subscription_id, 435 | base_url=self._cloud_environment.endpoints.resource_manager, 436 | api_version='2017-05-10' 437 | ) 438 | return self._resource_client 439 | 440 | @property 441 | def compute_client(self): 442 | self.log('Getting compute client') 443 | if not self._compute_client: 444 | self._compute_client = ComputeManagementClient( 445 | self.azure_credentials, 446 | self.subscription_id, 447 | base_url=self._cloud_environment.endpoints.resource_manager, 448 | api_version='2017-03-30' 449 | ) 450 | self._register('Microsoft.Compute') 451 | return self._compute_client 452 | 453 | 454 | class AzureInventory(object): 455 | 456 | def __init__(self): 457 | 458 | self._args = self._parse_cli_args() 459 | 460 | try: 461 | rm = AzureRM(self._args) 462 | except Exception as e: 463 | sys.exit("{0}".format(str(e))) 464 | 465 | self._compute_client = rm.compute_client 466 | self._network_client = rm.network_client 467 | self._resource_client = rm.rm_client 468 | self._security_groups = None 469 | 470 | self.resource_groups = [] 471 | self.tags = None 472 | self.locations = None 473 | self.replace_dash_in_groups = False 474 | self.group_by_resource_group = True 475 | self.group_by_location = True 476 | self.group_by_security_group = True 477 | self.group_by_tag = True 478 | self.include_powerstate = True 479 | 480 | self._inventory = dict( 481 | _meta=dict( 482 | hostvars=dict() 483 | ), 484 | azure=[] 485 | ) 486 | 487 | self._get_settings() 488 | 489 | if self._args.resource_groups: 490 | self.resource_groups = self._args.resource_groups.split(',') 491 | 492 | if self._args.tags: 493 | self.tags = self._args.tags.split(',') 494 | 495 | if self._args.locations: 496 | self.locations = self._args.locations.split(',') 497 | 498 | if self._args.no_powerstate: 499 | self.include_powerstate = False 500 | 501 | self.get_inventory() 502 | print(self._json_format_dict(pretty=self._args.pretty)) 503 | sys.exit(0) 504 | 505 | def _parse_cli_args(self): 506 | # Parse command line arguments 507 | parser = argparse.ArgumentParser( 508 | description='Produce an Ansible Inventory file for an Azure subscription') 509 | parser.add_argument('--list', action='store_true', default=True, 510 | help='List instances (default: True)') 511 | parser.add_argument('--debug', action='store_true', default=False, 512 | help='Send debug messages to STDOUT') 513 | parser.add_argument('--host', action='store', 514 | help='Get all information about an instance') 515 | parser.add_argument('--pretty', action='store_true', default=False, 516 | help='Pretty print JSON output(default: False)') 517 | parser.add_argument('--profile', action='store', 518 | help='Azure profile contained in ~/.azure/credentials') 519 | parser.add_argument('--subscription_id', action='store', 520 | help='Azure Subscription Id') 521 | parser.add_argument('--client_id', action='store', 522 | help='Azure Client Id ') 523 | parser.add_argument('--secret', action='store', 524 | help='Azure Client Secret') 525 | parser.add_argument('--tenant', action='store', 526 | help='Azure Tenant Id') 527 | parser.add_argument('--ad_user', action='store', 528 | help='Active Directory User') 529 | parser.add_argument('--password', action='store', 530 | help='password') 531 | parser.add_argument('--cloud_environment', action='store', 532 | help='Azure Cloud Environment name or metadata discovery URL') 533 | parser.add_argument('--resource-groups', action='store', 534 | help='Return inventory for comma separated list of resource group names') 535 | parser.add_argument('--tags', action='store', 536 | help='Return inventory for comma separated list of tag key:value pairs') 537 | parser.add_argument('--locations', action='store', 538 | help='Return inventory for comma separated list of locations') 539 | parser.add_argument('--no-powerstate', action='store_true', default=False, 540 | help='Do not include the power state of each virtual host') 541 | return parser.parse_args() 542 | 543 | def get_inventory(self): 544 | if len(self.resource_groups) > 0: 545 | # get VMs for requested resource groups 546 | for resource_group in self.resource_groups: 547 | try: 548 | virtual_machines = self._compute_client.virtual_machines.list(resource_group) 549 | except Exception as exc: 550 | sys.exit("Error: fetching virtual machines for resource group {0} - {1}".format(resource_group, str(exc))) 551 | if self._args.host or self.tags: 552 | selected_machines = self._selected_machines(virtual_machines) 553 | self._load_machines(selected_machines) 554 | else: 555 | self._load_machines(virtual_machines) 556 | else: 557 | # get all VMs within the subscription 558 | try: 559 | virtual_machines = self._compute_client.virtual_machines.list_all() 560 | except Exception as exc: 561 | sys.exit("Error: fetching virtual machines - {0}".format(str(exc))) 562 | 563 | if self._args.host or self.tags or self.locations: 564 | selected_machines = self._selected_machines(virtual_machines) 565 | self._load_machines(selected_machines) 566 | else: 567 | self._load_machines(virtual_machines) 568 | 569 | def _load_machines(self, machines): 570 | for machine in machines: 571 | id_dict = azure_id_to_dict(machine.id) 572 | 573 | # TODO - The API is returning an ID value containing resource group name in ALL CAPS. If/when it gets 574 | # fixed, we should remove the .lower(). Opened Issue 575 | # #574: https://github.com/Azure/azure-sdk-for-python/issues/574 576 | resource_group = id_dict['resourceGroups'].lower() 577 | 578 | if self.group_by_security_group: 579 | self._get_security_groups(resource_group) 580 | 581 | host_vars = dict( 582 | ansible_host=None, 583 | private_ip=None, 584 | private_ip_alloc_method=None, 585 | public_ip=None, 586 | public_ip_name=None, 587 | public_ip_id=None, 588 | public_ip_alloc_method=None, 589 | fqdn=None, 590 | location=machine.location, 591 | name=machine.name, 592 | type=machine.type, 593 | id=machine.id, 594 | tags=machine.tags, 595 | network_interface_id=None, 596 | network_interface=None, 597 | resource_group=resource_group, 598 | mac_address=None, 599 | plan=(machine.plan.name if machine.plan else None), 600 | virtual_machine_size=machine.hardware_profile.vm_size, 601 | computer_name=(machine.os_profile.computer_name if machine.os_profile else None), 602 | provisioning_state=machine.provisioning_state, 603 | ) 604 | 605 | host_vars['os_disk'] = dict( 606 | name=machine.storage_profile.os_disk.name, 607 | operating_system_type=machine.storage_profile.os_disk.os_type.value 608 | ) 609 | 610 | if self.include_powerstate: 611 | host_vars['powerstate'] = self._get_powerstate(resource_group, machine.name) 612 | 613 | if machine.storage_profile.image_reference: 614 | host_vars['image'] = dict( 615 | offer=machine.storage_profile.image_reference.offer, 616 | publisher=machine.storage_profile.image_reference.publisher, 617 | sku=machine.storage_profile.image_reference.sku, 618 | version=machine.storage_profile.image_reference.version 619 | ) 620 | 621 | # Add windows details 622 | if machine.os_profile is not None and machine.os_profile.windows_configuration is not None: 623 | host_vars['ansible_connection'] = 'winrm' 624 | host_vars['windows_auto_updates_enabled'] = \ 625 | machine.os_profile.windows_configuration.enable_automatic_updates 626 | host_vars['windows_timezone'] = machine.os_profile.windows_configuration.time_zone 627 | host_vars['windows_rm'] = None 628 | if machine.os_profile.windows_configuration.win_rm is not None: 629 | host_vars['windows_rm'] = dict(listeners=None) 630 | if machine.os_profile.windows_configuration.win_rm.listeners is not None: 631 | host_vars['windows_rm']['listeners'] = [] 632 | for listener in machine.os_profile.windows_configuration.win_rm.listeners: 633 | host_vars['windows_rm']['listeners'].append(dict(protocol=listener.protocol, 634 | certificate_url=listener.certificate_url)) 635 | 636 | for interface in machine.network_profile.network_interfaces: 637 | interface_reference = self._parse_ref_id(interface.id) 638 | network_interface = self._network_client.network_interfaces.get( 639 | interface_reference['resourceGroups'], 640 | interface_reference['networkInterfaces']) 641 | if network_interface.primary: 642 | if self.group_by_security_group and \ 643 | self._security_groups[resource_group].get(network_interface.id, None): 644 | host_vars['security_group'] = \ 645 | self._security_groups[resource_group][network_interface.id]['name'] 646 | host_vars['security_group_id'] = \ 647 | self._security_groups[resource_group][network_interface.id]['id'] 648 | host_vars['network_interface'] = network_interface.name 649 | host_vars['network_interface_id'] = network_interface.id 650 | host_vars['mac_address'] = network_interface.mac_address 651 | for ip_config in network_interface.ip_configurations: 652 | host_vars['private_ip'] = ip_config.private_ip_address 653 | host_vars['private_ip_alloc_method'] = ip_config.private_ip_allocation_method 654 | if ip_config.public_ip_address: 655 | public_ip_reference = self._parse_ref_id(ip_config.public_ip_address.id) 656 | public_ip_address = self._network_client.public_ip_addresses.get( 657 | public_ip_reference['resourceGroups'], 658 | public_ip_reference['publicIPAddresses']) 659 | host_vars['ansible_host'] = public_ip_address.ip_address 660 | host_vars['public_ip'] = public_ip_address.ip_address 661 | host_vars['public_ip_name'] = public_ip_address.name 662 | host_vars['public_ip_alloc_method'] = public_ip_address.public_ip_allocation_method 663 | host_vars['public_ip_id'] = public_ip_address.id 664 | if public_ip_address.dns_settings: 665 | host_vars['fqdn'] = public_ip_address.dns_settings.fqdn 666 | 667 | self._add_host(host_vars) 668 | 669 | def _selected_machines(self, virtual_machines): 670 | selected_machines = [] 671 | for machine in virtual_machines: 672 | if self._args.host and self._args.host == machine.name: 673 | selected_machines.append(machine) 674 | if self.tags and self._tags_match(machine.tags, self.tags): 675 | selected_machines.append(machine) 676 | if self.locations and machine.location in self.locations: 677 | selected_machines.append(machine) 678 | return selected_machines 679 | 680 | def _get_security_groups(self, resource_group): 681 | ''' For a given resource_group build a mapping of network_interface.id to security_group name ''' 682 | if not self._security_groups: 683 | self._security_groups = dict() 684 | if not self._security_groups.get(resource_group): 685 | self._security_groups[resource_group] = dict() 686 | for group in self._network_client.network_security_groups.list(resource_group): 687 | if group.network_interfaces: 688 | for interface in group.network_interfaces: 689 | self._security_groups[resource_group][interface.id] = dict( 690 | name=group.name, 691 | id=group.id 692 | ) 693 | 694 | def _get_powerstate(self, resource_group, name): 695 | try: 696 | vm = self._compute_client.virtual_machines.get(resource_group, 697 | name, 698 | expand='instanceview') 699 | except Exception as exc: 700 | sys.exit("Error: fetching instanceview for host {0} - {1}".format(name, str(exc))) 701 | 702 | return next((s.code.replace('PowerState/', '') 703 | for s in vm.instance_view.statuses if s.code.startswith('PowerState')), None) 704 | 705 | def _add_host(self, vars): 706 | 707 | host_name = self._to_safe(vars['name']) 708 | resource_group = self._to_safe(vars['resource_group']) 709 | security_group = None 710 | if vars.get('security_group'): 711 | security_group = self._to_safe(vars['security_group']) 712 | 713 | if self.group_by_resource_group: 714 | if not self._inventory.get(resource_group): 715 | self._inventory[resource_group] = [] 716 | self._inventory[resource_group].append(host_name) 717 | 718 | if self.group_by_location: 719 | if not self._inventory.get(vars['location']): 720 | self._inventory[vars['location']] = [] 721 | self._inventory[vars['location']].append(host_name) 722 | 723 | if self.group_by_security_group and security_group: 724 | if not self._inventory.get(security_group): 725 | self._inventory[security_group] = [] 726 | self._inventory[security_group].append(host_name) 727 | 728 | self._inventory['_meta']['hostvars'][host_name] = vars 729 | self._inventory['azure'].append(host_name) 730 | 731 | if self.group_by_tag and vars.get('tags'): 732 | for key, value in vars['tags'].items(): 733 | safe_key = self._to_safe(key) 734 | safe_value = safe_key + '_' + self._to_safe(value) 735 | if not self._inventory.get(safe_key): 736 | self._inventory[safe_key] = [] 737 | if not self._inventory.get(safe_value): 738 | self._inventory[safe_value] = [] 739 | self._inventory[safe_key].append(host_name) 740 | self._inventory[safe_value].append(host_name) 741 | 742 | def _json_format_dict(self, pretty=False): 743 | # convert inventory to json 744 | if pretty: 745 | return json.dumps(self._inventory, sort_keys=True, indent=2) 746 | else: 747 | return json.dumps(self._inventory) 748 | 749 | def _get_settings(self): 750 | # Load settings from the .ini, if it exists. Otherwise, 751 | # look for environment values. 752 | file_settings = self._load_settings() 753 | if file_settings: 754 | for key in AZURE_CONFIG_SETTINGS: 755 | if key in ('resource_groups', 'tags', 'locations') and file_settings.get(key): 756 | values = file_settings.get(key).split(',') 757 | if len(values) > 0: 758 | setattr(self, key, values) 759 | elif file_settings.get(key): 760 | val = self._to_boolean(file_settings[key]) 761 | setattr(self, key, val) 762 | else: 763 | env_settings = self._get_env_settings() 764 | for key in AZURE_CONFIG_SETTINGS: 765 | if key in('resource_groups', 'tags', 'locations') and env_settings.get(key): 766 | values = env_settings.get(key).split(',') 767 | if len(values) > 0: 768 | setattr(self, key, values) 769 | elif env_settings.get(key, None) is not None: 770 | val = self._to_boolean(env_settings[key]) 771 | setattr(self, key, val) 772 | 773 | def _parse_ref_id(self, reference): 774 | response = {} 775 | keys = reference.strip('/').split('/') 776 | for index in range(len(keys)): 777 | if index < len(keys) - 1 and index % 2 == 0: 778 | response[keys[index]] = keys[index + 1] 779 | return response 780 | 781 | def _to_boolean(self, value): 782 | if value in ['Yes', 'yes', 1, 'True', 'true', True]: 783 | result = True 784 | elif value in ['No', 'no', 0, 'False', 'false', False]: 785 | result = False 786 | else: 787 | result = True 788 | return result 789 | 790 | def _get_env_settings(self): 791 | env_settings = dict() 792 | for attribute, env_variable in AZURE_CONFIG_SETTINGS.items(): 793 | env_settings[attribute] = os.environ.get(env_variable, None) 794 | return env_settings 795 | 796 | def _load_settings(self): 797 | basename = os.path.splitext(os.path.basename(__file__))[0] 798 | default_path = os.path.join(os.path.dirname(__file__), (basename + '.ini')) 799 | path = os.path.expanduser(os.path.expandvars(os.environ.get('AZURE_INI_PATH', default_path))) 800 | config = None 801 | settings = None 802 | try: 803 | config = cp.ConfigParser() 804 | config.read(path) 805 | except: 806 | pass 807 | 808 | if config is not None: 809 | settings = dict() 810 | for key in AZURE_CONFIG_SETTINGS: 811 | try: 812 | settings[key] = config.get('azure', key, raw=True) 813 | except: 814 | pass 815 | 816 | return settings 817 | 818 | def _tags_match(self, tag_obj, tag_args): 819 | ''' 820 | Return True if the tags object from a VM contains the requested tag values. 821 | 822 | :param tag_obj: Dictionary of string:string pairs 823 | :param tag_args: List of strings in the form key=value 824 | :return: boolean 825 | ''' 826 | 827 | if not tag_obj: 828 | return False 829 | 830 | matches = 0 831 | for arg in tag_args: 832 | arg_key = arg 833 | arg_value = None 834 | if re.search(r':', arg): 835 | arg_key, arg_value = arg.split(':') 836 | if arg_value and tag_obj.get(arg_key, None) == arg_value: 837 | matches += 1 838 | elif not arg_value and tag_obj.get(arg_key, None) is not None: 839 | matches += 1 840 | if matches == len(tag_args): 841 | return True 842 | return False 843 | 844 | def _to_safe(self, word): 845 | ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' 846 | regex = r"[^A-Za-z0-9\_" 847 | if not self.replace_dash_in_groups: 848 | regex += r"\-" 849 | return re.sub(regex + "]", "_", word) 850 | 851 | 852 | def main(): 853 | if not HAS_AZURE: 854 | sys.exit("The Azure python sdk is not installed (try `pip install 'azure>={0}' --upgrade`) - {1}".format(AZURE_MIN_VERSION, HAS_AZURE_EXC)) 855 | 856 | AzureInventory() 857 | 858 | 859 | if __name__ == '__main__': 860 | main() 861 | -------------------------------------------------------------------------------- /Chapter05/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: first_vm 3 | roles: 4 | - phonebook 5 | -------------------------------------------------------------------------------- /Chapter05/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - ec2 5 | -------------------------------------------------------------------------------- /Chapter05/roles/azure/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Virtual Network 3 | azure_rm_virtualnetwork: 4 | name: vnet01 5 | resource_group: example 6 | address_prefixes_cidr: 7 | - "10.2.0.0/16" 8 | - "172.1.0.0/16" 9 | tags: 10 | env: testing 11 | state: present 12 | tags: 13 | - recipe2 14 | 15 | - name: Create subnet 16 | azure_rm_subnet: 17 | name: subnet01 18 | virtual_network_name: my_first_subnet 19 | resource_group: example 20 | address_prefix_cidr: "10.2.0.0/24" 21 | state: present 22 | tags: 23 | - recipe2 24 | 25 | - name: Create network interface card 26 | azure_rm_networkinterface: 27 | name: nic01 28 | resource_group: example 29 | virtual_network_name: vnet01 30 | subnet_name: subnet01 31 | public_ip: no 32 | state: present 33 | register: network_interface 34 | tags: 35 | - recipe2 36 | 37 | - name: Show private ip 38 | debug: 39 | msg: "{{network_interface.ip_configuration.private_ip_address}}" 40 | tags: 41 | - recipe2 42 | 43 | - name: Create Public IP address 44 | azure_rm_publicipaddress: 45 | resource_group: example 46 | name: pip01 47 | allocation_method: Static 48 | domain_name: test 49 | state: present 50 | register: publicip 51 | tag: 52 | - recipe3 53 | 54 | - name: Show Public IP address 55 | debug: 56 | msg: "{{ publicip.ip_address }}" 57 | tag: 58 | - recipe3 59 | 60 | - name: Create network interface card using existing public ip address 61 | azure_rm_networkinterface: 62 | name: nic02 63 | resource_group: example 64 | virtual_network_name: vnet01 65 | subnet_name: subnet01 66 | public_ip_address_name: pip01 67 | state: present 68 | register: network_interface02 69 | tags: 70 | - recipe4 71 | 72 | - name: Create VM using existing virtual machine 73 | azure_rm_virtualmachine: 74 | resource_group: example 75 | name: first_vm 76 | location: “{{azure_region}}” 77 | vm_size: Standard_D4 78 | storage_account: examplestorage01 79 | admin_username: cookbook 80 | ssh_public_keys: 81 | - path: /home/admin/.ssh/authorized_keys 82 | key_data: “ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDq8ddP3LGDr586Njl9lqScZvakv4DvGPsK9PNCw+MWaLZsSovUECLm1v3IxfBhbGUrbQMFAbff0Piie9+6aas5vSFaqn1LMhEyVNjJkaFaztg/FiYbhcSzb4zc7hrKyZriUyou2gj68o9113g38wh0tK6TSjfQ+DrN2HiV8bo4jLYmGnh+A3O6HMWR1ceCclN5c3g4RRjrLzSC9YolufMDLzs4CWxjEDLufYwsPqafOrvcXUlLeAzfjYrG8Re82sH6uE8Zw1WHRDk9hhRZU8s5jFCtepLeHL0jgftMXHGEP7F/cFXZb9KzdO1sqIie7OMfQ44hAPAcA1KexEPt6gb1” 83 | image: 84 | offer: UbuntuServer 85 | publisher: Ubuntu 86 | sku: '16.04-LTS' 87 | version: latest 88 | network_interfaces: nic02 89 | tags: 90 | - recipe4 91 | 92 | - name: Create network security group 93 | azure_rm_securitygroup: 94 | resource_group: example 95 | name: mysg01 96 | purge_rules: yes 97 | rules: 98 | - name: 'AllowSSH' 99 | protocol: TCP 100 | source_address_prefix: * 101 | destination_port_range: 22 102 | access: Allow 103 | priority: 100 104 | direction: Inbound 105 | - name: 'AllowHTTP' 106 | protocol: TCP 107 | source_address_prefix: * 108 | destination_port_range: 80 109 | priority: 101 110 | direction: Inbound 111 | - name: 'AllowHTTPS' 112 | protocol: TCP 113 | source_address_prefix: * 114 | destination_port_range: 443 115 | priority: 102 116 | direction: Inbound 117 | - name: 'DenyAll' 118 | protocol: TCP 119 | source_address_prefix: * 120 | destination_port_range: * 121 | priority: 103 122 | direction: Inbound 123 | tags: 124 | - recipe5 125 | 126 | - name: Create subnet 127 | azure_rm_subnet: 128 | name: subnet01 129 | virtual_network_name: my_first_subnet 130 | resource_group: example 131 | address_prefix_cidr: "10.2.0.0/24" 132 | state: present 133 | security_group_name: mysg01 134 | tags: 135 | - recipe5 136 | 137 | - name: Create network interface card using existing public ip address and security group 138 | azure_rm_networkinterface: 139 | name: nic02 140 | resource_group: example 141 | virtual_network_name: vnet01 142 | subnet_name: subnet01 143 | public_ip_address_name: pip01 144 | security_group_name: mysg01 145 | state: present 146 | register: network_interface02 147 | tags: 148 | - recipe5 149 | 150 | - name: Create storage container 151 | azure_rm_storageblob: 152 | resource_group: example 153 | storage_account_name: examplestroage01 154 | container: cookbook 155 | state: present 156 | tags: 157 | - recipe6 158 | 159 | - name: Upload a file to existing container 160 | azure_rm_storageblob: 161 | resource_group: example 162 | storage_account_name: examplestorage01 163 | container: cookbook 164 | blob: myfile.png 165 | src: /tmp/myfile.png 166 | public_access: blob 167 | content_type: 'application/image' 168 | tags: 169 | - recipe6 170 | 171 | - name: Download blob object 172 | azure_rm_storageblob: 173 | resource_group: example 174 | storage_account_name: examplestorage01 175 | container: cookbook 176 | blob: myfile.png 177 | dest: /tmp/download_file.png 178 | tags: 179 | - recipe6 180 | -------------------------------------------------------------------------------- /Chapter05/roles/phonebook/files/phone-book.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple Phone Book 3 | 4 | [Service] 5 | WorkingDirectory=/opt/phone-book 6 | ExecStartPre=/bin/bash /opt/phone-book/init.sh 7 | ExecStart=/usr/bin/uwsgi --http-socket 0.0.0.0:8080 --manage-script-name --mount /phonebook=app:app 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /Chapter05/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install epel repository 3 | package: 4 | name: epel-release 5 | state: present 6 | 7 | - name: install dependencies 8 | package: 9 | name: "{{ item }}" 10 | state: present 11 | with_items: 12 | - git 13 | - python-pip 14 | - gcc 15 | - python-devel 16 | 17 | - name: install python libraries 18 | pip: 19 | name: "{{ item }}" 20 | state: present 21 | with_items: 22 | - flask 23 | - flask-sqlalchemy 24 | - flask-migrate 25 | - uwsgi 26 | 27 | - name: get the application code 28 | git: 29 | repo: https://github.com/adimania/phone-book.git 30 | dest: /opt/phone-book 31 | 32 | - name: upload systemd unit file 33 | copy: 34 | src: phone-book.service 35 | dest: /etc/systemd/system/phone-book.service 36 | 37 | - name: start phonebook 38 | systemd: 39 | state: started 40 | daemon_reload: yes 41 | name: phone-book 42 | enabled: yes 43 | -------------------------------------------------------------------------------- /Chapter06/digital_ocean.ini: -------------------------------------------------------------------------------- 1 | # Ansible DigitalOcean external inventory script settings 2 | # 3 | 4 | [digital_ocean] 5 | 6 | # The module needs your DigitalOcean API Token. 7 | # It may also be specified on the command line via --api-token 8 | # or via the environment variables DO_API_TOKEN or DO_API_KEY 9 | # 10 | api_token = 0fcb0c6b98825f29a17df512fghjkl4567ujh54edfghyui9iujhgvb5adff4c21 11 | 12 | 13 | # API calls to DigitalOcean may be slow. For this reason, we cache the results 14 | # of an API call. Set this to the path you want cache files to be written to. 15 | # One file will be written to this directory: 16 | # - ansible-digital_ocean.cache 17 | # 18 | cache_path = /tmp 19 | 20 | 21 | # The number of seconds a cache file is considered valid. After this many 22 | # seconds, a new API call will be made, and the cache file will be updated. 23 | # 24 | cache_max_age = 300 25 | 26 | # Use the private network IP address instead of the public when available. 27 | # 28 | use_private_network = False 29 | 30 | # Pass variables to every group, e.g.: 31 | # 32 | # group_variables = { 'ansible_user': 'root' } 33 | # 34 | group_variables = {} 35 | -------------------------------------------------------------------------------- /Chapter06/digital_ocean.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' 4 | DigitalOcean external inventory script 5 | ====================================== 6 | 7 | Generates Ansible inventory of DigitalOcean Droplets. 8 | 9 | In addition to the --list and --host options used by Ansible, there are options 10 | for generating JSON of other DigitalOcean data. This is useful when creating 11 | droplets. For example, --regions will return all the DigitalOcean Regions. 12 | This information can also be easily found in the cache file, whose default 13 | location is /tmp/ansible-digital_ocean.cache). 14 | 15 | The --pretty (-p) option pretty-prints the output for better human readability. 16 | 17 | ---- 18 | Although the cache stores all the information received from DigitalOcean, 19 | the cache is not used for current droplet information (in --list, --host, 20 | --all, and --droplets). This is so that accurate droplet information is always 21 | found. You can force this script to use the cache with --force-cache. 22 | 23 | ---- 24 | Configuration is read from `digital_ocean.ini`, then from environment variables, 25 | and then from command-line arguments. 26 | 27 | Most notably, the DigitalOcean API Token must be specified. It can be specified 28 | in the INI file or with the following environment variables: 29 | export DO_API_TOKEN='abc123' or 30 | export DO_API_KEY='abc123' 31 | 32 | Alternatively, it can be passed on the command-line with --api-token. 33 | 34 | If you specify DigitalOcean credentials in the INI file, a handy way to 35 | get them into your environment (e.g., to use the digital_ocean module) 36 | is to use the output of the --env option with export: 37 | export $(digital_ocean.py --env) 38 | 39 | ---- 40 | The following groups are generated from --list: 41 | - ID (droplet ID) 42 | - NAME (droplet NAME) 43 | - image_ID 44 | - image_NAME 45 | - distro_NAME (distribution NAME from image) 46 | - region_NAME 47 | - size_NAME 48 | - status_STATUS 49 | 50 | For each host, the following variables are registered: 51 | - do_backup_ids 52 | - do_created_at 53 | - do_disk 54 | - do_features - list 55 | - do_id 56 | - do_image - object 57 | - do_ip_address 58 | - do_private_ip_address 59 | - do_kernel - object 60 | - do_locked 61 | - do_memory 62 | - do_name 63 | - do_networks - object 64 | - do_next_backup_window 65 | - do_region - object 66 | - do_size - object 67 | - do_size_slug 68 | - do_snapshot_ids - list 69 | - do_status 70 | - do_tags 71 | - do_vcpus 72 | - do_volume_ids 73 | 74 | ----- 75 | ``` 76 | usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] 77 | [--droplets] [--regions] [--images] [--sizes] 78 | [--ssh-keys] [--domains] [--pretty] 79 | [--cache-path CACHE_PATH] 80 | [--cache-max_age CACHE_MAX_AGE] 81 | [--force-cache] 82 | [--refresh-cache] 83 | [--api-token API_TOKEN] 84 | 85 | Produce an Ansible Inventory file based on DigitalOcean credentials 86 | 87 | optional arguments: 88 | -h, --help show this help message and exit 89 | --list List all active Droplets as Ansible inventory 90 | (default: True) 91 | --host HOST Get all Ansible inventory variables about a specific 92 | Droplet 93 | --all List all DigitalOcean information as JSON 94 | --droplets List Droplets as JSON 95 | --regions List Regions as JSON 96 | --images List Images as JSON 97 | --sizes List Sizes as JSON 98 | --ssh-keys List SSH keys as JSON 99 | --domains List Domains as JSON 100 | --pretty, -p Pretty-print results 101 | --cache-path CACHE_PATH 102 | Path to the cache files (default: .) 103 | --cache-max_age CACHE_MAX_AGE 104 | Maximum age of the cached items (default: 0) 105 | --force-cache Only use data from the cache 106 | --refresh-cache Force refresh of cache by making API requests to 107 | DigitalOcean (default: False - use cache files) 108 | --api-token API_TOKEN, -a API_TOKEN 109 | DigitalOcean API Token 110 | ``` 111 | 112 | ''' 113 | 114 | # (c) 2013, Evan Wies 115 | # 116 | # Inspired by the EC2 inventory plugin: 117 | # https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py 118 | # 119 | # This file is part of Ansible, 120 | # 121 | # Ansible is free software: you can redistribute it and/or modify 122 | # it under the terms of the GNU General Public License as published by 123 | # the Free Software Foundation, either version 3 of the License, or 124 | # (at your option) any later version. 125 | # 126 | # Ansible is distributed in the hope that it will be useful, 127 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 128 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 129 | # GNU General Public License for more details. 130 | # 131 | # You should have received a copy of the GNU General Public License 132 | # along with Ansible. If not, see . 133 | 134 | ###################################################################### 135 | 136 | import os 137 | import sys 138 | import re 139 | import argparse 140 | from time import time 141 | import ast 142 | 143 | try: 144 | import ConfigParser 145 | except ImportError: 146 | import configparser as ConfigParser 147 | 148 | try: 149 | import json 150 | except ImportError: 151 | import simplejson as json 152 | 153 | try: 154 | from dopy.manager import DoManager 155 | except ImportError as e: 156 | sys.exit("failed=True msg={}".format(e.message)) 157 | 158 | 159 | class DigitalOceanInventory(object): 160 | 161 | ########################################################################### 162 | # Main execution path 163 | ########################################################################### 164 | 165 | def __init__(self): 166 | ''' Main execution path ''' 167 | 168 | # DigitalOceanInventory data 169 | self.data = {} # All DigitalOcean data 170 | self.inventory = {} # Ansible Inventory 171 | 172 | # Define defaults 173 | self.cache_path = '.' 174 | self.cache_max_age = 0 175 | self.use_private_network = False 176 | self.group_variables = {} 177 | 178 | # Read settings, environment variables, and CLI arguments 179 | self.read_settings() 180 | self.read_environment() 181 | self.read_cli_args() 182 | 183 | # Verify credentials were set 184 | if not hasattr(self, 'api_token'): 185 | sys.stderr.write('''Could not find values for DigitalOcean api_token. 186 | They must be specified via either ini file, command line argument (--api-token), 187 | or environment variables (DO_API_TOKEN)\n''') 188 | sys.exit(-1) 189 | 190 | # env command, show DigitalOcean credentials 191 | if self.args.env: 192 | print("DO_API_TOKEN=%s" % self.api_token) 193 | sys.exit(0) 194 | 195 | # Manage cache 196 | self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" 197 | self.cache_refreshed = False 198 | 199 | if self.is_cache_valid(): 200 | self.load_from_cache() 201 | if len(self.data) == 0: 202 | if self.args.force_cache: 203 | sys.stderr.write('''Cache is empty and --force-cache was specified\n''') 204 | sys.exit(-1) 205 | 206 | self.manager = DoManager(None, self.api_token, api_version=2) 207 | 208 | # Pick the json_data to print based on the CLI command 209 | if self.args.droplets: 210 | self.load_from_digital_ocean('droplets') 211 | json_data = {'droplets': self.data['droplets']} 212 | elif self.args.regions: 213 | self.load_from_digital_ocean('regions') 214 | json_data = {'regions': self.data['regions']} 215 | elif self.args.images: 216 | self.load_from_digital_ocean('images') 217 | json_data = {'images': self.data['images']} 218 | elif self.args.sizes: 219 | self.load_from_digital_ocean('sizes') 220 | json_data = {'sizes': self.data['sizes']} 221 | elif self.args.ssh_keys: 222 | self.load_from_digital_ocean('ssh_keys') 223 | json_data = {'ssh_keys': self.data['ssh_keys']} 224 | elif self.args.domains: 225 | self.load_from_digital_ocean('domains') 226 | json_data = {'domains': self.data['domains']} 227 | elif self.args.all: 228 | self.load_from_digital_ocean() 229 | json_data = self.data 230 | elif self.args.host: 231 | json_data = self.load_droplet_variables_for_host() 232 | else: # '--list' this is last to make it default 233 | self.load_from_digital_ocean('droplets') 234 | self.build_inventory() 235 | json_data = self.inventory 236 | 237 | if self.cache_refreshed: 238 | self.write_to_cache() 239 | 240 | if self.args.pretty: 241 | print(json.dumps(json_data, sort_keys=True, indent=2)) 242 | else: 243 | print(json.dumps(json_data)) 244 | # That's all she wrote... 245 | 246 | ########################################################################### 247 | # Script configuration 248 | ########################################################################### 249 | 250 | def read_settings(self): 251 | ''' Reads the settings from the digital_ocean.ini file ''' 252 | config = ConfigParser.SafeConfigParser() 253 | config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') 254 | 255 | # Credentials 256 | if config.has_option('digital_ocean', 'api_token'): 257 | self.api_token = config.get('digital_ocean', 'api_token') 258 | 259 | # Cache related 260 | if config.has_option('digital_ocean', 'cache_path'): 261 | self.cache_path = config.get('digital_ocean', 'cache_path') 262 | if config.has_option('digital_ocean', 'cache_max_age'): 263 | self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') 264 | 265 | # Private IP Address 266 | if config.has_option('digital_ocean', 'use_private_network'): 267 | self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') 268 | 269 | # Group variables 270 | if config.has_option('digital_ocean', 'group_variables'): 271 | self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) 272 | 273 | def read_environment(self): 274 | ''' Reads the settings from environment variables ''' 275 | # Setup credentials 276 | if os.getenv("DO_API_TOKEN"): 277 | self.api_token = os.getenv("DO_API_TOKEN") 278 | if os.getenv("DO_API_KEY"): 279 | self.api_token = os.getenv("DO_API_KEY") 280 | 281 | def read_cli_args(self): 282 | ''' Command line argument processing ''' 283 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') 284 | 285 | parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') 286 | parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') 287 | 288 | parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') 289 | parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') 290 | parser.add_argument('--regions', action='store_true', help='List Regions as JSON') 291 | parser.add_argument('--images', action='store_true', help='List Images as JSON') 292 | parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') 293 | parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') 294 | parser.add_argument('--domains', action='store_true', help='List Domains as JSON') 295 | 296 | parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') 297 | 298 | parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') 299 | parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') 300 | parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') 301 | parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, 302 | help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') 303 | 304 | parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') 305 | parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') 306 | 307 | self.args = parser.parse_args() 308 | 309 | if self.args.api_token: 310 | self.api_token = self.args.api_token 311 | 312 | # Make --list default if none of the other commands are specified 313 | if (not self.args.droplets and not self.args.regions and 314 | not self.args.images and not self.args.sizes and 315 | not self.args.ssh_keys and not self.args.domains and 316 | not self.args.all and not self.args.host): 317 | self.args.list = True 318 | 319 | ########################################################################### 320 | # Data Management 321 | ########################################################################### 322 | 323 | def load_from_digital_ocean(self, resource=None): 324 | '''Get JSON from DigitalOcean API''' 325 | if self.args.force_cache and os.path.isfile(self.cache_filename): 326 | return 327 | # We always get fresh droplets 328 | if self.is_cache_valid() and not (resource == 'droplets' or resource is None): 329 | return 330 | if self.args.refresh_cache: 331 | resource = None 332 | 333 | if resource == 'droplets' or resource is None: 334 | self.data['droplets'] = self.manager.all_active_droplets() 335 | self.cache_refreshed = True 336 | if resource == 'regions' or resource is None: 337 | self.data['regions'] = self.manager.all_regions() 338 | self.cache_refreshed = True 339 | if resource == 'images' or resource is None: 340 | self.data['images'] = self.manager.all_images(filter=None) 341 | self.cache_refreshed = True 342 | if resource == 'sizes' or resource is None: 343 | self.data['sizes'] = self.manager.sizes() 344 | self.cache_refreshed = True 345 | if resource == 'ssh_keys' or resource is None: 346 | self.data['ssh_keys'] = self.manager.all_ssh_keys() 347 | self.cache_refreshed = True 348 | if resource == 'domains' or resource is None: 349 | self.data['domains'] = self.manager.all_domains() 350 | self.cache_refreshed = True 351 | 352 | def build_inventory(self): 353 | '''Build Ansible inventory of droplets''' 354 | self.inventory = { 355 | 'all': { 356 | 'hosts': [], 357 | 'vars': self.group_variables 358 | }, 359 | '_meta': {'hostvars': {}} 360 | } 361 | 362 | # add all droplets by id and name 363 | for droplet in self.data['droplets']: 364 | # when using private_networking, the API reports the private one in "ip_address". 365 | if 'private_networking' in droplet['features'] and not self.use_private_network: 366 | for net in droplet['networks']['v4']: 367 | if net['type'] == 'public': 368 | dest = net['ip_address'] 369 | else: 370 | continue 371 | else: 372 | dest = droplet['ip_address'] 373 | 374 | self.inventory['all']['hosts'].append(dest) 375 | 376 | self.inventory[droplet['id']] = [dest] 377 | self.inventory[droplet['name']] = [dest] 378 | 379 | # groups that are always present 380 | for group in ('region_' + droplet['region']['slug'], 381 | 'image_' + str(droplet['image']['id']), 382 | 'size_' + droplet['size']['slug'], 383 | 'distro_' + self.to_safe(droplet['image']['distribution']), 384 | 'status_' + droplet['status']): 385 | if group not in self.inventory: 386 | self.inventory[group] = {'hosts': [], 'vars': {}} 387 | self.inventory[group]['hosts'].append(dest) 388 | 389 | # groups that are not always present 390 | for group in (droplet['image']['slug'], 391 | droplet['image']['name']): 392 | if group: 393 | image = 'image_' + self.to_safe(group) 394 | if image not in self.inventory: 395 | self.inventory[image] = {'hosts': [], 'vars': {}} 396 | self.inventory[image]['hosts'].append(dest) 397 | 398 | if droplet['tags']: 399 | for tag in droplet['tags']: 400 | if tag not in self.inventory: 401 | self.inventory[tag] = {'hosts': [], 'vars': {}} 402 | self.inventory[tag]['hosts'].append(dest) 403 | 404 | # hostvars 405 | info = self.do_namespace(droplet) 406 | self.inventory['_meta']['hostvars'][dest] = info 407 | 408 | def load_droplet_variables_for_host(self): 409 | '''Generate a JSON response to a --host call''' 410 | host = int(self.args.host) 411 | droplet = self.manager.show_droplet(host) 412 | info = self.do_namespace(droplet) 413 | return {'droplet': info} 414 | 415 | ########################################################################### 416 | # Cache Management 417 | ########################################################################### 418 | 419 | def is_cache_valid(self): 420 | ''' Determines if the cache files have expired, or if it is still valid ''' 421 | if os.path.isfile(self.cache_filename): 422 | mod_time = os.path.getmtime(self.cache_filename) 423 | current_time = time() 424 | if (mod_time + self.cache_max_age) > current_time: 425 | return True 426 | return False 427 | 428 | def load_from_cache(self): 429 | ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' 430 | try: 431 | cache = open(self.cache_filename, 'r') 432 | json_data = cache.read() 433 | cache.close() 434 | data = json.loads(json_data) 435 | except IOError: 436 | data = {'data': {}, 'inventory': {}} 437 | 438 | self.data = data['data'] 439 | self.inventory = data['inventory'] 440 | 441 | def write_to_cache(self): 442 | ''' Writes data in JSON format to a file ''' 443 | data = {'data': self.data, 'inventory': self.inventory} 444 | json_data = json.dumps(data, sort_keys=True, indent=2) 445 | 446 | cache = open(self.cache_filename, 'w') 447 | cache.write(json_data) 448 | cache.close() 449 | 450 | ########################################################################### 451 | # Utilities 452 | ########################################################################### 453 | 454 | def push(self, my_dict, key, element): 455 | ''' Pushed an element onto an array that may not have been defined in the dict ''' 456 | if key in my_dict: 457 | my_dict[key].append(element) 458 | else: 459 | my_dict[key] = [element] 460 | 461 | def to_safe(self, word): 462 | ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' 463 | return re.sub("[^A-Za-z0-9\-\.]", "_", word) 464 | 465 | def do_namespace(self, data): 466 | ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' 467 | info = {} 468 | for k, v in data.items(): 469 | info['do_' + k] = v 470 | return info 471 | 472 | 473 | ########################################################################### 474 | # Run the script 475 | DigitalOceanInventory() 476 | -------------------------------------------------------------------------------- /Chapter06/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: app 3 | roles: 4 | - phonebook 5 | -------------------------------------------------------------------------------- /Chapter06/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - openstack 5 | -------------------------------------------------------------------------------- /Chapter06/roles/digitalocean/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: include secrets 3 | include_vars: secrets.yml 4 | 5 | - name: Add SSH key to DO account 6 | digital_ocean_sshkey: 7 | name: "cookbook-key" 8 | ssh_pub_key: "{{ ssh_public_key }}" 9 | oauth_token: "{{ DO_OAUTH_TOKEN }}" 10 | register: result 11 | tags: 12 | - recipe2 13 | 14 | - name: check for the droplet 15 | shell: doctl compute droplet list --output json -t "{{ DO_OAUTH_TOKEN }}" 16 | register: droplets 17 | tags: 18 | - recipe3 19 | 20 | - name: find the id of droplet 21 | debug: 22 | msg: "{{ droplets.stdout|from_json|json_query(\"[].{id: id, name: name} | [?name=='app'].id \")}}" 23 | register: app 24 | tags: 25 | - recipe3 26 | 27 | - name: Creating app Droplet 28 | digital_ocean: 29 | id: "{{ app.msg[0] | default('0') }}" 30 | state: present 31 | command: droplet 32 | name: app 33 | api_token: "{{ DO_OAUTH_TOKEN }}" 34 | size_id: 512mb 35 | region_id: blr1 36 | image_id: centos-7-x64 37 | wait_timeout: 500 38 | ssh_key_ids: "{{ result.data.ssh_key.fingerprint }}" 39 | register: app_droplet 40 | tags: 41 | - recipe3 42 | 43 | - digital_ocean_block_storage: 44 | state: present 45 | command: create 46 | api_token: "{{ DO_OAUTH_TOKEN }}" 47 | region: blr1 48 | block_size: 10 49 | volume_name: cookbook1 50 | tags: 51 | - recipe4 52 | 53 | - digital_ocean_block_storage: 54 | state: present 55 | command: attach 56 | api_token: "{{ DO_OAUTH_TOKEN }}" 57 | volume_name: cookbook1 58 | region: blr1 59 | droplet_id: "{{ app_droplet.droplet.id }}" 60 | tags: 61 | - recipe4 62 | 63 | - name: attach a floating IP 64 | digital_ocean_floating_ip: 65 | state: present 66 | droplet_id: "{{ app_droplet.droplet.id }}" 67 | api_token: "{{ DO_OAUTH_TOKEN }}" 68 | tags: 69 | - recipe5 70 | 71 | - name: create a load balancer 72 | shell: doctl compute load-balancer create --name lb1 --droplet-ids {{ app_droplet.droplet.id }} --forwarding-rules {{ forwarding_rules }} --health-check {{ health_check }} --region blr1 -t {{ DO_OAUTH_TOKEN }} 73 | tags: 74 | - recipe6 75 | 76 | - name: associate domain and IP 77 | digital_ocean_domain: 78 | state: present 79 | name: ansiblecloudbook.com 80 | ip: "{{ app_droplet.droplet.ip_address }}" 81 | api_token: "{{ DO_OAUTH_TOKEN }}" 82 | tags: 83 | - recipe7 84 | -------------------------------------------------------------------------------- /Chapter06/roles/digitalocean/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssh_public_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCbg83WYIxUfXWJ4bQiYfZYHceDwMJxnGfJqgYtHL/DAtegVY+Nm8MX3CRZYisfskt0m9CQ6y/Ux1OITTz+O11fgxLJcroZmKJbWW0K39gfHvFqR7FIe0zuJaxqUQUuyc0i6RCBRiZPiQQOPes2yDtfHgDWx3q9knS3ZXIAXcGLZrgfC1XnIK8CLAnZDved9Rue2bhsCnO9Mleh9g/CTtehMDAzD4NeSv9eETlHYkYSpJg8gFA3BFICpBxTqWSjf1mMQGSmiudFOhRjHIxL1Tvh+pnjSoL/jrLcP3RtMVuG0ZU0qkoAts1qpTwmyAJUz9Ts2EeyDJ0tXsFAiOFbmuMd aditya@devopsnexus.com 3 | forwarding_rules: entry_protocol:http,entry_port:8080,target_protocol:http,target_port:8080 4 | health_check: protocol:http,port:8080,path:/,check_interval_seconds:10,response_timeout_seconds:5,healthy_threshold:3,unhealthy_threshold:3 5 | -------------------------------------------------------------------------------- /Chapter06/roles/digitalocean/vars/secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | DO_OAUTH_TOKEN: "0fcb0c6b98825f29a17df512fghjkl4567ujh54edfghyui9iujhgvb5adff4c21" 3 | -------------------------------------------------------------------------------- /Chapter06/roles/phonebook/files/phone-book.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple Phone Book 3 | 4 | [Service] 5 | WorkingDirectory=/opt/phone-book 6 | ExecStartPre=/bin/bash /opt/phone-book/init.sh 7 | ExecStart=/usr/bin/uwsgi --http-socket 0.0.0.0:8080 --manage-script-name --mount /phonebook=app:app 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /Chapter06/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install epel repository 3 | package: 4 | name: epel-release 5 | state: present 6 | 7 | - name: install dependencies 8 | package: 9 | name: "{{ item }}" 10 | state: present 11 | with_items: 12 | - git 13 | - python-pip 14 | - gcc 15 | - python-devel 16 | 17 | - name: install python libraries 18 | pip: 19 | name: "{{ item }}" 20 | state: present 21 | with_items: 22 | - flask 23 | - flask-sqlalchemy 24 | - flask-migrate 25 | - uwsgi 26 | 27 | - name: get the application code 28 | git: 29 | repo: https://github.com/adimania/phone-book 30 | dest: /opt/phone-book 31 | 32 | - name: upload systemd unit file 33 | copy: 34 | src: phone-book.service 35 | dest: /etc/systemd/system/phone-book.service 36 | 37 | - name: start phonebook 38 | systemd: 39 | state: started 40 | daemon_reload: yes 41 | name: phone-book 42 | enabled: yes 43 | -------------------------------------------------------------------------------- /Chapter07/docker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: no 4 | roles: 5 | - docker 6 | -------------------------------------------------------------------------------- /Chapter07/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: no 4 | roles: 5 | - docker 6 | - phonebook 7 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_compose/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.4-alpine 2 | ADD . /code 3 | WORKDIR /code 4 | RUN pip install -r requirements.txt 5 | CMD ["python", "app.py"] 6 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_compose/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from redis import Redis 3 | 4 | app = Flask(__name__) 5 | redis = Redis(host='redis', port=6379) 6 | 7 | @app.route('/') 8 | def hello(): 9 | count = redis.incr('hits') 10 | return 'Hello World! I have been seen {} times.\n'.format(count) 11 | 12 | if __name__ == "__main__": 13 | app.run(host="0.0.0.0", debug=True) 14 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_compose/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | web: 4 | build: . 5 | ports: 6 | - "5000" 7 | networks: 8 | - l1-tier 9 | - l2-tier 10 | redis: 11 | image: "redis:alpine" 12 | links: 13 | - web 14 | networks: 15 | - l2-tier 16 | lb: 17 | image: dockercloud/haproxy 18 | ports: 19 | - "5001:80" 20 | links: 21 | - web 22 | networks: 23 | - l1-tier 24 | - l2-tier 25 | volumes: 26 | - /var/run/docker.sock:/var/run/docker.sock 27 | 28 | networks: 29 | l1-tier: 30 | driver: bridge 31 | l2-tier: 32 | driver: bridge 33 | 34 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_compose/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | redis 3 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_files/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | 3 | CMD ["sleep", "infinity;"] 4 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_files/index.html: -------------------------------------------------------------------------------- 1 |

Hello World

2 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/files/docker_files/nginx.conf: -------------------------------------------------------------------------------- 1 | user www-data; 2 | worker_processes 1; 3 | 4 | events { 5 | worker_connections 1024; 6 | } 7 | 8 | http { 9 | server { 10 | listen 80; 11 | root /var/www/site; 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /Chapter07/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create and start a container using Ubuntu Image 2 | docker_container: 3 | name: myfirstcontainer 4 | state: started 5 | image: ubuntu:14.04 6 | command: sleep infinity 7 | tag: 8 | - recipe2 9 | 10 | - name: Download docker image 11 | docker_image: 12 | name: centos/python-35-centos7 13 | tag: 14 | - recipe3 15 | 16 | - name: Build docker image 17 | docker_image: 18 | name: ansible-built 19 | tag: firstbuilt 20 | path: "{{role_path}}/files/docker_files/" 21 | state: present 22 | tag: 23 | - recipe3 24 | 25 | - name: Create volume for docker instance 26 | docker_volume: 27 | name: first_volume 28 | tag: 29 | - recipe4 30 | 31 | - name: Mount volume to container 32 | docker_container: 33 | name: myfirstcontainer 34 | state: started 35 | image: ubuntu:14.04 36 | volumes: 37 | - first_volume:/app 38 | command: sleep infinity 39 | tag: 40 | - recipe4 41 | 42 | - name: Setting up a docker registry 43 | docker_container: 44 | name: registry 45 | image: registry:2 46 | exposed_ports: 47 | - 5000 48 | ports: 49 | - 5000:5000 50 | tag: 51 | - recipe5 52 | 53 | - name: Log into DockerHub 54 | docker_login: 55 | username: vikas17a 56 | password: vikas@sirsa 57 | email: vikas17a@gmail.com 58 | tag: 59 | - recipe6 60 | 61 | - name: Push image to Dockerhub 62 | docker_image: 63 | repository: vikas17a/cookbook 64 | name: cookbook 65 | tag: firstbuilt 66 | path: "{{role_path}}/files/docker_files/" 67 | push: yes 68 | tag: 69 | - recipe6 70 | 71 | - name: Create application using compose 72 | docker_service: 73 | project_src: "{{role_path}}/files/docker_compose" 74 | state: present 75 | tag: 76 | - recipe7 77 | 78 | - name: Scale up web service of compose application 79 | docker_service: 80 | project_src: "{{role_path}}/files/docker_compose" 81 | scale: 82 | web: 2 83 | tag: 84 | - recipe8 85 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.4-alpine 2 | ADD . /code 3 | WORKDIR /code 4 | RUN pip install -r requirements.txt 5 | RUN FLASK_APP=app.py flask db init 6 | RUN FLASK_APP=app.py flask db migrate 7 | RUN FLASK_APP=app.py flask db upgrade 8 | CMD ["python", "app.py"] 9 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/app.py: -------------------------------------------------------------------------------- 1 | from flask import Flask 2 | from flask_sqlalchemy import SQLAlchemy 3 | from flask import Flask, request, flash, url_for, redirect, render_template 4 | from flask_migrate import Migrate 5 | import os 6 | 7 | app = Flask(__name__) 8 | app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:////tmp/test.db' 9 | app.secret_key = os.urandom(32) 10 | db = SQLAlchemy(app) 11 | migrate = Migrate(app, db) 12 | 13 | class Contacts(db.Model): 14 | id = db.Column(db.Integer, primary_key=True) 15 | name = db.Column(db.String(80), unique=True) 16 | email = db.Column(db.String(120), unique=True) 17 | phone = db.Column(db.String(15), unique=True) 18 | 19 | def __init__(self, name, email, phone): 20 | self.name = name 21 | self.email = email 22 | self.phone = phone 23 | 24 | def __repr__(self): 25 | return '' % self.name 26 | 27 | @app.route('/') 28 | def get_contacts(): 29 | return render_template('index.html', contacts=Contacts.query.all()) 30 | 31 | @app.route('/new', methods=['GET', 'POST']) 32 | def create_contact(): 33 | if request.method == 'POST': 34 | if not request.form['name'] or not request.form['email'] or not request.form['phone']: 35 | flash('Please enter all the fields', 'error') 36 | else: 37 | contact = Contacts(request.form['name'], 38 | request.form['email'], 39 | request.form['phone']) 40 | db.session.add(contact) 41 | db.session.commit() 42 | 43 | flash('Contact was successfully submitted') 44 | 45 | return redirect(url_for('get_contacts')) 46 | 47 | return render_template('new.html') 48 | 49 | 50 | if __name__ == '__main__': 51 | app.run( 52 | host="0.0.0.0", 53 | port=8080 54 | ) 55 | 56 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | services: 3 | phonebook: 4 | build: . 5 | ports: 6 | - "8080:8080" 7 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/init.sh: -------------------------------------------------------------------------------- 1 | #! /bin/bash 2 | export FLASK_APP=app.py 3 | flask db init 4 | flask db migrate 5 | flask db upgrade 6 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/requirements.txt: -------------------------------------------------------------------------------- 1 | flask 2 | flask_sqlalchemy 3 | flask_migrate 4 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 |
9 |

Phone Book

10 |
11 |
12 | {%- for category, message in get_flashed_messages(with_categories=true) %} 13 |
14 | {{ message }} 15 |
16 | {%- endfor %} 17 | 18 |

Contacts (Add contact)

19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | {% for contact in contacts %} 29 | 30 | 31 | 32 | 33 | 34 | {% endfor %} 35 | 36 |
NameEmailPhone
{{ contact.name }}{{ contact.email }}{{ contact.phone }}
37 |
38 | 39 | 40 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/files/phonebook-docker/templates/new.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 |
8 |
9 |

New Contact

10 |
11 |
12 | {%- for category, message in get_flashed_messages(with_categories=true) %} 13 |
14 | {{ message }} 15 |
16 | {%- endfor %} 17 | 18 |
19 |
20 | 21 |
22 | 23 |
24 |
25 |
26 | 27 |
28 | 29 |
30 |
31 |
32 | 33 |
34 | 35 |
36 |
37 |
38 |
39 |
40 | 41 |
42 |
43 | 44 | 45 | 46 | 47 | 48 | -------------------------------------------------------------------------------- /Chapter07/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Compose and start phonebook application 3 | docker_service: 4 | project_src: "{{role_path}}/files/phonebook-docker" 5 | state: present 6 | tags: 7 | - phonebookapp 8 | -------------------------------------------------------------------------------- /Chapter08/openstack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | # Copyright (c) 2012, Marco Vito Moscaritolo 4 | # Copyright (c) 2013, Jesse Keating 5 | # Copyright (c) 2015, Hewlett-Packard Development Company, L.P. 6 | # Copyright (c) 2016, Rackspace Australia 7 | # 8 | # This module is free software: you can redistribute it and/or modify 9 | # it under the terms of the GNU General Public License as published by 10 | # the Free Software Foundation, either version 3 of the License, or 11 | # (at your option) any later version. 12 | # 13 | # This software is distributed in the hope that it will be useful, 14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 | # GNU General Public License for more details. 17 | # 18 | # You should have received a copy of the GNU General Public License 19 | # along with this software. If not, see . 20 | 21 | # The OpenStack Inventory module uses os-client-config for configuration. 22 | # https://github.com/openstack/os-client-config 23 | # This means it will either: 24 | # - Respect normal OS_* environment variables like other OpenStack tools 25 | # - Read values from a clouds.yaml file. 26 | # If you want to configure via clouds.yaml, you can put the file in: 27 | # - Current directory 28 | # - ~/.config/openstack/clouds.yaml 29 | # - /etc/openstack/clouds.yaml 30 | # - /etc/ansible/openstack.yml 31 | # The clouds.yaml file can contain entries for multiple clouds and multiple 32 | # regions of those clouds. If it does, this inventory module will by default 33 | # connect to all of them and present them as one contiguous inventory. You 34 | # can limit to one cloud by passing the `--cloud` parameter, or use the 35 | # OS_CLOUD environment variable. If caching is enabled, and a cloud is 36 | # selected, then per-cloud cache folders will be used. 37 | # 38 | # See the adjacent openstack.yml file for an example config file 39 | # There are two ansible inventory specific options that can be set in 40 | # the inventory section. 41 | # expand_hostvars controls whether or not the inventory will make extra API 42 | # calls to fill out additional information about each server 43 | # use_hostnames changes the behavior from registering every host with its UUID 44 | # and making a group of its hostname to only doing this if the 45 | # hostname in question has more than one server 46 | # fail_on_errors causes the inventory to fail and return no hosts if one cloud 47 | # has failed (for example, bad credentials or being offline). 48 | # When set to False, the inventory will return hosts from 49 | # whichever other clouds it can contact. (Default: True) 50 | # 51 | # Also it is possible to pass the correct user by setting an ansible_user: $myuser 52 | # metadata attribute. 53 | 54 | import argparse 55 | import collections 56 | import os 57 | import sys 58 | import time 59 | from distutils.version import StrictVersion 60 | 61 | try: 62 | import json 63 | except: 64 | import simplejson as json 65 | 66 | import os_client_config 67 | import shade 68 | import shade.inventory 69 | 70 | CONFIG_FILES = ['/etc/ansible/openstack.yaml', '/etc/ansible/openstack.yml'] 71 | 72 | 73 | def get_groups_from_server(server_vars, namegroup=True): 74 | groups = [] 75 | 76 | region = server_vars['region'] 77 | cloud = server_vars['cloud'] 78 | metadata = server_vars.get('metadata', {}) 79 | 80 | # Create a group for the cloud 81 | groups.append(cloud) 82 | 83 | # Create a group on region 84 | groups.append(region) 85 | 86 | # And one by cloud_region 87 | groups.append("%s_%s" % (cloud, region)) 88 | 89 | # Check if group metadata key in servers' metadata 90 | if 'group' in metadata: 91 | groups.append(metadata['group']) 92 | 93 | for extra_group in metadata.get('groups', '').split(','): 94 | if extra_group: 95 | groups.append(extra_group.strip()) 96 | 97 | groups.append('instance-%s' % server_vars['id']) 98 | if namegroup: 99 | groups.append(server_vars['name']) 100 | 101 | for key in ('flavor', 'image'): 102 | if 'name' in server_vars[key]: 103 | groups.append('%s-%s' % (key, server_vars[key]['name'])) 104 | 105 | for key, value in iter(metadata.items()): 106 | groups.append('meta-%s_%s' % (key, value)) 107 | 108 | az = server_vars.get('az', None) 109 | if az: 110 | # Make groups for az, region_az and cloud_region_az 111 | groups.append(az) 112 | groups.append('%s_%s' % (region, az)) 113 | groups.append('%s_%s_%s' % (cloud, region, az)) 114 | return groups 115 | 116 | 117 | def get_host_groups(inventory, refresh=False, cloud=None): 118 | (cache_file, cache_expiration_time) = get_cache_settings(cloud) 119 | if is_cache_stale(cache_file, cache_expiration_time, refresh=refresh): 120 | groups = to_json(get_host_groups_from_cloud(inventory)) 121 | open(cache_file, 'w').write(groups) 122 | else: 123 | groups = open(cache_file, 'r').read() 124 | return groups 125 | 126 | 127 | def append_hostvars(hostvars, groups, key, server, namegroup=False): 128 | hostvars[key] = dict( 129 | ansible_ssh_host=server['interface_ip'], 130 | ansible_host=server['interface_ip'], 131 | openstack=server) 132 | 133 | metadata = server.get('metadata', {}) 134 | if 'ansible_user' in metadata: 135 | hostvars[key]['ansible_user'] = metadata['ansible_user'] 136 | 137 | for group in get_groups_from_server(server, namegroup=namegroup): 138 | groups[group].append(key) 139 | 140 | 141 | def get_host_groups_from_cloud(inventory): 142 | groups = collections.defaultdict(list) 143 | firstpass = collections.defaultdict(list) 144 | hostvars = {} 145 | list_args = {} 146 | if hasattr(inventory, 'extra_config'): 147 | use_hostnames = inventory.extra_config['use_hostnames'] 148 | list_args['expand'] = inventory.extra_config['expand_hostvars'] 149 | if StrictVersion(shade.__version__) >= StrictVersion("1.6.0"): 150 | list_args['fail_on_cloud_config'] = \ 151 | inventory.extra_config['fail_on_errors'] 152 | else: 153 | use_hostnames = False 154 | 155 | for server in inventory.list_hosts(**list_args): 156 | 157 | if 'interface_ip' not in server: 158 | continue 159 | firstpass[server['name']].append(server) 160 | for name, servers in firstpass.items(): 161 | if len(servers) == 1 and use_hostnames: 162 | append_hostvars(hostvars, groups, name, servers[0]) 163 | else: 164 | server_ids = set() 165 | # Trap for duplicate results 166 | for server in servers: 167 | server_ids.add(server['id']) 168 | if len(server_ids) == 1 and use_hostnames: 169 | append_hostvars(hostvars, groups, name, servers[0]) 170 | else: 171 | for server in servers: 172 | append_hostvars( 173 | hostvars, groups, server['id'], server, 174 | namegroup=True) 175 | groups['_meta'] = {'hostvars': hostvars} 176 | return groups 177 | 178 | 179 | def is_cache_stale(cache_file, cache_expiration_time, refresh=False): 180 | ''' Determines if cache file has expired, or if it is still valid ''' 181 | if refresh: 182 | return True 183 | if os.path.isfile(cache_file) and os.path.getsize(cache_file) > 0: 184 | mod_time = os.path.getmtime(cache_file) 185 | current_time = time.time() 186 | if (mod_time + cache_expiration_time) > current_time: 187 | return False 188 | return True 189 | 190 | 191 | def get_cache_settings(cloud=None): 192 | config = os_client_config.config.OpenStackConfig( 193 | config_files=os_client_config.config.CONFIG_FILES + CONFIG_FILES) 194 | # For inventory-wide caching 195 | cache_expiration_time = config.get_cache_expiration_time() 196 | cache_path = config.get_cache_path() 197 | if cloud: 198 | cache_path = '{0}_{1}'.format(cache_path, cloud) 199 | if not os.path.exists(cache_path): 200 | os.makedirs(cache_path) 201 | cache_file = os.path.join(cache_path, 'ansible-inventory.cache') 202 | return (cache_file, cache_expiration_time) 203 | 204 | 205 | def to_json(in_dict): 206 | return json.dumps(in_dict, sort_keys=True, indent=2) 207 | 208 | 209 | def parse_args(): 210 | parser = argparse.ArgumentParser(description='OpenStack Inventory Module') 211 | parser.add_argument('--cloud', default=os.environ.get('OS_CLOUD'), 212 | help='Cloud name (default: None') 213 | parser.add_argument('--private', 214 | action='store_true', 215 | help='Use private address for ansible host') 216 | parser.add_argument('--refresh', action='store_true', 217 | help='Refresh cached information') 218 | parser.add_argument('--debug', action='store_true', default=False, 219 | help='Enable debug output') 220 | group = parser.add_mutually_exclusive_group(required=True) 221 | group.add_argument('--list', action='store_true', 222 | help='List active servers') 223 | group.add_argument('--host', help='List details about the specific host') 224 | 225 | return parser.parse_args() 226 | 227 | 228 | def main(): 229 | args = parse_args() 230 | try: 231 | config_files = os_client_config.config.CONFIG_FILES + CONFIG_FILES 232 | shade.simple_logging(debug=args.debug) 233 | inventory_args = dict( 234 | refresh=args.refresh, 235 | config_files=config_files, 236 | private=args.private, 237 | cloud=args.cloud, 238 | ) 239 | if hasattr(shade.inventory.OpenStackInventory, 'extra_config'): 240 | inventory_args.update(dict( 241 | config_key='ansible', 242 | config_defaults={ 243 | 'use_hostnames': False, 244 | 'expand_hostvars': True, 245 | 'fail_on_errors': True, 246 | } 247 | )) 248 | 249 | inventory = shade.inventory.OpenStackInventory(**inventory_args) 250 | 251 | if args.list: 252 | output = get_host_groups(inventory, refresh=args.refresh, cloud=args.cloud) 253 | elif args.host: 254 | output = to_json(inventory.get_host(args.host)) 255 | print(output) 256 | except shade.OpenStackCloudException as e: 257 | sys.stderr.write('%s\n' % e.message) 258 | sys.exit(1) 259 | sys.exit(0) 260 | 261 | 262 | if __name__ == '__main__': 263 | main() 264 | -------------------------------------------------------------------------------- /Chapter08/phonebook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: phonebook 3 | roles: 4 | - phonebook 5 | -------------------------------------------------------------------------------- /Chapter08/playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - digitalocean 5 | -------------------------------------------------------------------------------- /Chapter08/roles/openstack/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: adding public key 3 | os_keypair: 4 | name: aditya 5 | public_key: {{ aditya_pub_key }} 6 | tags: 7 | - recipe2 8 | 9 | - name: delete a public key 10 | os_keypair: 11 | name: aditya 12 | state: absent 13 | tags: 14 | - recipe2 15 | 16 | - name: create a security group for web servers 17 | os_security_group: 18 | name: web-sg 19 | state: present 20 | description: security group for web servers 21 | tags: 22 | - recipe3 23 | 24 | - name: allow port 80 for http 25 | os_security_group_rule: 26 | security_group: web-sg 27 | protocol: tcp 28 | port_range_min: 80 29 | port_range_max: 80 30 | remote_ip_prefix: 0.0.0.0/0 31 | tags: 32 | - recipe3 33 | 34 | - name: allow port 22 for SSH 35 | os_security_group_rule: 36 | security_group: web-sg 37 | protocol: tcp 38 | port_range_min: 22 39 | port_range_max: 22 40 | remote_ip_prefix: 0.0.0.0/0 41 | tags: 42 | - recipe3 43 | 44 | - name: creating a private network 45 | os_network: 46 | state: present 47 | name: private 48 | tags: 49 | - recipe4 50 | 51 | - name: creating a private subnet 52 | os_subnet: 53 | state: present 54 | network_name: private 55 | name: app 56 | cidr: 192.168.0.0/24 57 | dns_nameservers: 58 | - 8.8.4.4 59 | - 8.8.8.8 60 | host_routes: 61 | - destination: 0.0.0.0/0 62 | nexthop: 104.131.86.234 63 | - destination: 192.168.0.0/24 64 | nexthop: 192.168.0.1 65 | tags: 66 | - recipe4 67 | 68 | - name: Deploy an instance 69 | os_server: 70 | state: present 71 | name: webserver 72 | image: cirros 73 | key_name: aditya 74 | security_groups: web-sg 75 | wait: yes 76 | flavor: m1.tiny 77 | auto_floating_ip: yes 78 | network: private 79 | meta: 80 | hostname: webserver.localdomain 81 | tags: 82 | - recipe5 83 | 84 | - name: stop the webserver 85 | os_server_action: 86 | action: stop 87 | server: webserver 88 | tags: 89 | - recipe5 90 | 91 | - name: create 5G test volume 92 | os_volume: 93 | state: present 94 | size: 5 95 | display_name: data 96 | tags: 97 | - recipe6 98 | 99 | - name: attach volume to host 100 | os_server_volume: 101 | state: present 102 | server: webserver 103 | volume: data 104 | tags: 105 | - recipe6 106 | 107 | - name: create an object container 108 | os_object: 109 | state: present 110 | container: backup 111 | tags: 112 | - recipe7 113 | 114 | - name: upload backup.tar to backup container 115 | os_object: 116 | filename: /opt/backup.tar 117 | container: backup 118 | name: backup.tar 119 | state: present 120 | tags: 121 | - recipe7 122 | 123 | - name: creating a demo domain 124 | os_keystone_domain: 125 | name: demodomain 126 | description: Demo Domain 127 | state: present 128 | register: demo_domain 129 | tags: 130 | - recipe8 131 | 132 | - name: creating a demo role 133 | os_keystone_role: 134 | state: present 135 | name: demorole 136 | tags: 137 | - recipe8 138 | 139 | - name: creating a demo project 140 | os_project: 141 | state: present 142 | name: demoproject 143 | description: Demo Project 144 | domain_id: ""{{ demo_domain.id }}"" 145 | enabled: True 146 | tags: 147 | - recipe8 148 | 149 | - name: creating a demo group 150 | os_group: 151 | state: present 152 | name: demogroup 153 | description: ""Demo Group"" 154 | domain_id: ""{{ demo_domain.id }}"" 155 | tags: 156 | - recipe8 157 | 158 | - name: creating a demo user 159 | os_user: 160 | name: demouser 161 | password: secret-pass 162 | update_password: on_create 163 | email: demo@example.com 164 | domain: ""{{ demo_domain.id }}"" 165 | state: present 166 | tags: 167 | - recipe8 168 | 169 | - name: adding user to the group 170 | os_user_group: 171 | user: demouser 172 | group: demogroup 173 | tags: 174 | - recipe8 175 | 176 | - name: adding role to the group 177 | os_user_role: 178 | group: demo2 179 | role: demorole 180 | domain: ""{{ demo_domain.id }}"" 181 | tags: 182 | - recipe8 183 | 184 | - name: Create a custom flavor 185 | os_nova_flavor: 186 | name: custom1 187 | ram: 1024 188 | vcpus: 1 189 | disk: 10 190 | ephemeral: 10 191 | state: present 192 | tags: 193 | - recipe9 194 | 195 | - name: adding Fedora 27 as an image 196 | os_image: 197 | name: fedora-cloud-27 198 | container_format: bare 199 | disk_format: qcow2 200 | id: ""{{ ansible_date_time.epoch | to_uuid }}"" 201 | filename: /opt/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 202 | state: present 203 | tags: 204 | - recipe10 205 | -------------------------------------------------------------------------------- /Chapter08/roles/openstack/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | aditya_pub_key: ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCbg83WYIxUfXWJ4bQiYfZYHceDwMJxnGfJqgYtHL/DAtegVY+Nm8MX3CRZYisfskt0m9CQ6y/Ux1OITTz+O11fgxLJcroZmKJbWW0K39gfHvFqR7FIe0zuJaxqUQUuyc0i6RCBRiZPiQQOPes2yDtfHgDWx3q9knS3ZXIAXcGLZrgfC1XnIK8CLAnZDved9Rue2bhsCnO9Mleh9g/CTtehMDAzD4NeSv9eETlHYkYSpJg8gFA3BFICpBxTqWSjf1mMQGSmiudFOhRjHIxL1Tvh+pnjSoL/jrLcP3RtMVuG0ZU0qkoAts1qpTwmyAJUz9Ts2EeyDJ0tXsFAiOFbmuMd aditya@devopsnexus.com 3 | 4 | -------------------------------------------------------------------------------- /Chapter08/roles/phonebook/files/phone-book.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Simple Phone Book 3 | 4 | [Service] 5 | WorkingDirectory=/opt/phone-book 6 | ExecStartPre=/bin/bash /opt/phone-book/init.sh 7 | ExecStart=/usr/bin/uwsgi --http-socket 0.0.0.0:8080 --manage-script-name --mount /phonebook=app:app 8 | Restart=on-failure 9 | RestartSec=5 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /Chapter08/roles/phonebook/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install epel repository 3 | package: 4 | name: epel-release 5 | state: present 6 | 7 | - name: install dependencies 8 | package: 9 | name: "{{ item }}" 10 | state: present 11 | with_items: 12 | - git 13 | - python-pip 14 | - gcc 15 | - python-devel 16 | 17 | - name: install python libraries 18 | pip: 19 | name: "{{ item }}" 20 | state: present 21 | with_items: 22 | - flask 23 | - flask-sqlalchemy 24 | - flask-migrate 25 | - uwsgi 26 | 27 | - name: get the application code 28 | git: 29 | repo: https://github.com/adimania/phone-book 30 | dest: /opt/phone-book 31 | 32 | - name: upload systemd unit file 33 | copy: 34 | src: phone-book.service 35 | dest: /etc/systemd/system/phone-book.service 36 | 37 | - name: start phonebook 38 | systemd: 39 | state: started 40 | daemon_reload: yes 41 | name: phone-book 42 | enabled: yes 43 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 Packt 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible 2 Automation Cookbook 2 | This is the code repository for [Ansible 2 Automation Cookbook](https://www.packtpub.com/virtualization-and-cloud/ansible-2-cloud-automation-cookbook?utm_source=github&utm_medium=repository&utm_campaign=9781788295826), published by [Packt](https://www.packtpub.com/?utm_source=github). It contains all the supporting project files necessary to work through the book from start to finish. 3 | ## About the Book 4 | Ansible has a large collection of inbuilt modules for managing various cloud resources. The book starts with the concepts needed to safeguard your credentials and explains how you interact with cloud providers to manage resources. Each chapter begins with an introduction to using the right modules to manage a given cloud provider. The book also includes Amazon Web Services, Google Cloud, Microsoft Azure, and other providers. Each chapter guides you through creating basic computing resources along with other resources that you might use to deploy an application. Finally, you will deploy a sample application to demonstrate various use patterns and utilities for resources. 5 | 6 | ## Instructions and Navigation 7 | All of the code is organized into folders. Each folder starts with a number followed by the application name. For example, Chapter02. 8 | 9 | 10 | 11 | The code will look like the following: 12 | ``` 13 | - name: Create Custom Network 14 | gce_net: 15 | name: my-network 16 | mode: custom 17 | subnet_name: "public-subnet" 18 | subnet_region: us-west1 19 | ipv4_range: '10.0.0.0/24' 20 | state: "present" 21 | service_account_email: "{{ service_account_email }}" 22 | project_id: "{{ project_id }}" 23 | credentials_file: "{{ credentials_file }}" 24 | tags: 25 | - recipe1 26 | ``` 27 | 28 | This book assumes that readers are already familiar with the basics of Ansible and the cloud provider they are going to work on. The book helps the readers to write infrastructure as code and automation. Readers will need a way to authenticate and authorize themselves to the desired cloud providers. Usually, that requires creating an account with said cloud provider. Although care has been taken to use trial and free-tier cloud providers wherever possible, certain recipes might cost users a small amount of money. Please be aware of the financial implications of that. 29 | 30 | From a hardware point of view, any modern computer running 64-bit Linux flavor will be able to run the recipes. We have run these recipes from a single core 1 GB RAM compute instance. 31 | 32 | ## Errata 33 | * Page 141: In the section labeled **Managing security groups**, - name: allow port 80 for SSH _should be_ - name: allow port 22 for SSH 34 | 35 | ## Related Products 36 | * [Microsoft System Center Data Protection Manager Cookbook](https://www.packtpub.com/virtualization-and-cloud/microsoft-system-center-data-protection-manager-cookbook-0?utm_source=github&utm_medium=repository&utm_campaign=9781787289284) 37 | 38 | * [React Native Cookbook - Second Edition](https://www.packtpub.com/application-development/react-native-cookbook-second-edition?utm_source=github&utm_medium=repository&utm_campaign=9781788991926) 39 | 40 | * [QT5 Python GUI Programming Cookbook](https://www.packtpub.com/application-development/qt5-python-gui-programming-cookbook?utm_source=github&utm_medium=repository&utm_campaign=9781788831000) 41 | 42 | ### Suggestions and Feedback 43 | [Click here](https://docs.google.com/forms/d/e/1FAIpQLSe5qwunkGf6PUvzPirPDtuy1Du5Rlzew23UBp2S-P3wB-GcwQ/viewform) if you have any feedback or suggestions. 44 | ### Download a free PDF 45 | 46 | If you have already purchased a print or Kindle version of this book, you can get a DRM-free PDF version at no cost.
Simply click on the link to claim your free PDF.
47 |

https://packt.link/free-ebook/9781788295826

48 | --------------------------------------------------------------------------------