├── site.yml ├── files └── artifacts-placeholder ├── roles └── common │ ├── meta │ └── main.yml │ ├── files │ └── sample.txt │ ├── tasks │ ├── redhat.yml │ ├── main.yml │ ├── amazon.yml │ └── ubuntu.yml │ ├── handlers │ └── main.yml │ └── vars │ └── main.yml ├── TODO ├── ssh_config ├── group_vars ├── all └── packages ├── playbooks ├── plbk-system.yml └── plbk-sample-service-go.yml ├── Makefile ├── inventories ├── dev-sample ├── prd-sample ├── stg-sample ├── ec2.ini └── ec2.py ├── Dockerfile ├── README.md └── max /site.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /files/artifacts-placeholder: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/common/meta/main.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/common/files/sample.txt: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /roles/common/tasks/redhat.yml: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /TODO: -------------------------------------------------------------------------------- 1 | # TODO LIST 2 | - ADD service sample 3 | - ADD service status 4 | - ADD service logs 5 | - ADD fetch service repo 6 | - ADD build service 7 | - ADD push to registry 8 | -------------------------------------------------------------------------------- /roles/common/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: #roles/common/handlers/main.yml 3 | 4 | - name: restart-ntp 5 | service: name=ntp state=restarted 6 | tags: restart-ntp 7 | -------------------------------------------------------------------------------- /ssh_config: -------------------------------------------------------------------------------- 1 | RSAAuthentication yes 2 | ForwardAgent yes 3 | Compression yes 4 | Port 22 5 | ConnectTimeout 60 6 | 7 | # Example 8 | #Host 172.* 9 | # ProxyCommand ssh -l ubuntu 172.22.6.70 -W %h:%p 10 | # #IdentityFile ~/.ssh/... 11 | -------------------------------------------------------------------------------- /roles/common/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: #roles/common/tasks/main.yml 3 | 4 | - name: SYSTEM / redhat 5 | include: redhat.yml 6 | when: ansible_distribution == "Redhat" 7 | 8 | - name: SYSTEM / amazon 9 | include: amazon.yml 10 | when: ansible_distribution == "Amazon" 11 | 12 | - name: SYSTEM / ubuntu 13 | include: ubuntu.yml 14 | when: ansible_distribution == 'Ubuntu' 15 | -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # file: group_vars/all 3 | 4 | # Core Project Settings 5 | 6 | project_name: your_name_here 7 | project_group: "{{ project_name }}" 8 | project_src_root: /DATA/src 9 | project_etc_root: /DATA/etc 10 | docker_root: /DATA/DOCKER 11 | 12 | git_root: "https://github.com/include" 13 | git_branch: master 14 | 15 | docker_registry: include 16 | 17 | # Service vars 18 | 19 | sample_service_go: 20 | HOST_PORT: 8000 21 | CONTAINER_PORT: 8080 22 | -------------------------------------------------------------------------------- /roles/common/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | system_pkgs: 4 | - jq 5 | - git 6 | - vim 7 | - zip 8 | - ntp 9 | - mtr 10 | - ntp 11 | - curl 12 | - tmux 13 | - lsof 14 | - dstat 15 | - iotop 16 | - telnet 17 | - ntpdate 18 | - sysstat 19 | - vim-tiny 20 | - traceroute 21 | - python-simplejson 22 | - build-essential 23 | - libssl-dev 24 | - libxml2-dev 25 | - libgnutls-dev 26 | - libexpat1-dev 27 | - libghc-zlib-dev 28 | - libcurl4-gnutls-dev 29 | -------------------------------------------------------------------------------- /playbooks/plbk-system.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: #playbooks/plbk-servicetemplate.yml 3 | 4 | - hosts: system 5 | strategy: linear 6 | 7 | vars: 8 | max_action: "{{ lookup('env', 'MAX_ACTION')}}" 9 | max_what: "{{ lookup('env', 'MAX_WHAT')}}" 10 | max_env: "{{ lookup('env', 'MAX_ENV')}}" 11 | max_who: "{{ lookup('env', 'MAX_WHO')}}" 12 | max_sudo: "{{ lookup('env', 'MAX_SUDO')}}" 13 | 14 | vars_files: 15 | - ../group_vars/all 16 | - ../group_vars/packages 17 | 18 | roles: 19 | - { role: ../roles/common/ } 20 | 21 | tasks: 22 | 23 | - name: ping 24 | ping: 25 | register: result 26 | when: max_action == "ping" 27 | tags: ping 28 | 29 | - debug: msg="{{ result }}" 30 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | DOCKER_REGISTRY ?= include 2 | DOCKER_TAG ?= latest 3 | 4 | all: build 5 | 6 | build: 7 | docker build -t $(DOCKER_REGISTRY)/ansible:$(DOCKER_TAG) . 8 | 9 | push: 10 | docker push $(DOCKER_REGISTRY)/ansible:$(DOCKER_TAG) 11 | 12 | run: 13 | docker run -e AWS_ACCESS_KEY_ID="$(AWS_ACCESS_KEY_ID)" \ 14 | -e AWS_SECRET_ACCESS_KEY="$(AWS_SECRET_ACCESS_KEY)" \ 15 | -e EC2_REGION="$(EC2_REGION)" \ 16 | -e ANSIBLE_HOSTS="./inventories/ec2.py" \ 17 | -e EC2_INIT_PATH="./inventories/ec2.ini" \ 18 | -v ~/.ssh:/root/.ssh \ 19 | -ti $(DOCKER_REGISTRY)/ansible:$(DOCKER_TAG) /bin/bash 20 | -------------------------------------------------------------------------------- /inventories/dev-sample: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_connection=local 3 | 4 | [dockerhost] 5 | dockerhost ansible_host=192.168.33.10 6 | [dockerhost:vars] 7 | ansible_user=include 8 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 9 | 10 | [container-cluster-A-template] 11 | srv-1 ansible_host=10.0.0.1 12 | srv-2 ansible_host=10.0.0.2 13 | srv-3 ansible_host=10.0.0.3 14 | srv-4 ansible_host=10.0.0.4 15 | [container-cluster-A-template:vars] 16 | ansible_user=include 17 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 18 | 19 | [container-cluster-B-template] 20 | srv-11 ansible_host=10.0.0.11 21 | srv-22 ansible_host=10.0.0.22 22 | srv-33 ansible_host=10.0.0.33 23 | srv-44 ansible_host=10.0.0.44 24 | [container-cluster-B-template:vars] 25 | ansible_user=include 26 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 27 | -------------------------------------------------------------------------------- /inventories/prd-sample: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_connection=local 3 | 4 | [dockerhost] 5 | dockerhost ansible_host=192.168.33.10 6 | [dockerhost:vars] 7 | ansible_user=include 8 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 9 | 10 | [container-cluster-A-template] 11 | srv-1 ansible_host=10.0.0.1 12 | srv-2 ansible_host=10.0.0.2 13 | srv-3 ansible_host=10.0.0.3 14 | srv-4 ansible_host=10.0.0.4 15 | [container-cluster-A-template:vars] 16 | ansible_user=include 17 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 18 | 19 | [container-cluster-B-template] 20 | srv-11 ansible_host=10.0.0.11 21 | srv-22 ansible_host=10.0.0.22 22 | srv-33 ansible_host=10.0.0.33 23 | srv-44 ansible_host=10.0.0.44 24 | [container-cluster-B-template:vars] 25 | ansible_user=include 26 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 27 | -------------------------------------------------------------------------------- /inventories/stg-sample: -------------------------------------------------------------------------------- 1 | [localhost] 2 | localhost ansible_connection=local 3 | 4 | [dockerhost] 5 | dockerhost ansible_host=192.168.33.10 6 | [dockerhost:vars] 7 | ansible_user=include 8 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 9 | 10 | [container-cluster-A-template] 11 | srv-1 ansible_host=10.0.0.1 12 | srv-2 ansible_host=10.0.0.2 13 | srv-3 ansible_host=10.0.0.3 14 | srv-4 ansible_host=10.0.0.4 15 | [container-cluster-A-template:vars] 16 | ansible_user=include 17 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 18 | 19 | [container-cluster-B-template] 20 | srv-11 ansible_host=10.0.0.11 21 | srv-22 ansible_host=10.0.0.22 22 | srv-33 ansible_host=10.0.0.33 23 | srv-44 ansible_host=10.0.0.44 24 | [container-cluster-B-template:vars] 25 | ansible_user=include 26 | ansible_ssh_private_key_file=~/.ssh/include_rsa.pub 27 | -------------------------------------------------------------------------------- /group_vars/packages: -------------------------------------------------------------------------------- 1 | --- 2 | # file: group_vars/packages 3 | 4 | # DRY, packages with same name on both systems 5 | common_pkgs: 6 | - git 7 | - vim 8 | - zip 9 | - ntp 10 | - mtr 11 | - ntp 12 | - curl 13 | - tmux 14 | - lsof 15 | - dstat 16 | - iotop 17 | - telnet 18 | - ntpdate 19 | - sysstat 20 | - traceroute 21 | - python-simplejson 22 | 23 | # amazon packages 24 | amazon_pkgs: 25 | - docker 26 | - redhat-lsb-core 27 | - epel-release 28 | - zlib-devel 29 | - expat-devel 30 | - openssl-devel 31 | - libxml2-devel 32 | - libcurl-devel 33 | - libpng-devel 34 | - libjpeg-turbo-devel 35 | 36 | amazon_group_pkgs: 37 | - "@Console internet tools" 38 | - "@Development tools" 39 | - "@Networking Tools" 40 | - "@Perl Support" 41 | - "@System Tools" 42 | 43 | # ubuntu packages 44 | ubuntu_pkgs: 45 | - build-essential 46 | - libssl-dev 47 | - libxml2-dev 48 | - libgnutls-dev 49 | - libexpat1-dev 50 | - libghc-zlib-dev 51 | - libcurl4-gnutls-dev -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ubuntu:latest 2 | MAINTAINER include 3 | 4 | ENV DEBIAN_FRONTEND noninteractive 5 | ENV LANGUAGE en_US.UTF-8 6 | ENV LANG en_US.UTF-8 7 | ENV LC_ALL en_US.UTF-8 8 | 9 | RUN apt-get update && apt-get upgrade -y && apt-get install -y \ 10 | language-pack-en && \ 11 | locale-gen en_US.UTF-8 && dpkg-reconfigure locales && \ 12 | apt-get install -y \ 13 | build-essential \ 14 | gcc \ 15 | make \ 16 | software-properties-common && \ 17 | apt-add-repository ppa:ansible/ansible -y && \ 18 | apt-get update && apt-get install -y \ 19 | ansible \ 20 | python-dev \ 21 | python-software-properties \ 22 | python-simplejson \ 23 | python-boto3 \ 24 | python3-boto3 \ 25 | awscli \ 26 | wget \ 27 | curl \ 28 | git \ 29 | s3cmd && \ 30 | apt-get clean 31 | 32 | RUN curl -O https://bootstrap.pypa.io/get-pip.py && \ 33 | python get-pip.py 34 | 35 | WORKDIR /ansible 36 | 37 | ADD ./files ./files 38 | ADD ./group_vars ./group_vars 39 | ADD ./inventories ./inventories 40 | ADD ./playbooks ./playbooks 41 | ADD ./roles ./roles 42 | ADD ./ansible.cfg ./ 43 | ADD ./site.yml ./ 44 | ADD ./ssh_config ./ 45 | -------------------------------------------------------------------------------- /roles/common/tasks/amazon.yml: -------------------------------------------------------------------------------- 1 | 2 | # file: #roles/common/tasks/amazon.yml 3 | 4 | # System configs 5 | 6 | - name: common / creates /DATA/etc 7 | file: 8 | path="{{ project_etc_root }}" 9 | mode=0644 10 | owner=root 11 | group=root 12 | state=directory 13 | become: True 14 | when: max_action == "setup" and max_what == "system" 15 | tags: setup 16 | 17 | - name: common / creates /DATA/src 18 | file: 19 | path="{{ project_etc_root }}" 20 | mode=0644 21 | owner=root 22 | group=root 23 | state=directory 24 | become: True 25 | when: max_action == "setup" and max_what == "system" 26 | tags: setup 27 | 28 | 29 | # packages 30 | 31 | - name: install amazon base packages 1 32 | yum: pkg={{ item }} state=latest 33 | with_items: "{{ amazon_pkgs }}" 34 | become: True 35 | when: max_action == "setup" and max_what == "system" 36 | tags: setup 37 | 38 | - name: install amazon group packages 2 39 | yum: pkg="{{ item }}" state=latest 40 | with_items: "{{ amazon_group_pkgs }}" 41 | become: True 42 | when: max_action == "setup" and max_what == "system" 43 | tags: setup 44 | 45 | - name: install amazon group packages 3 46 | yum: pkg={{ item }} state=latest 47 | with_items: "{{ common_pkgs }}" 48 | become: True 49 | tags: setup 50 | 51 | - name: yum clean all 52 | command: yum clean all 53 | become: True 54 | when: max_action == "setup" and max_what == "system" 55 | tags: setup 56 | 57 | 58 | # setup daemons 59 | 60 | - service: name=docker state=started enabled=True 61 | become: True 62 | when: max_action == "setup" and max_what == "system" 63 | tags: setup 64 | -------------------------------------------------------------------------------- /roles/common/tasks/ubuntu.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: #roles/common/tasks/main.yml 3 | 4 | # System configs 5 | 6 | # Prepare System Layout to Receive Configurations 7 | - name: common / creates /DATA/etc 8 | file: 9 | path="{{ project_etc_root }}" 10 | mode=0644 11 | owner=root 12 | group=root 13 | state=directory 14 | when: max_action == "sync" and max_what == "system" 15 | tags: sync 16 | 17 | - name: common / reates /DATA/src 18 | file: 19 | path="{{ project_etc_root }}" 20 | mode=0644 21 | owner=root 22 | group=root 23 | state=directory 24 | when: max_action == "sync" and max_what == "system" 25 | tags: sync 26 | 27 | # packages 28 | 29 | - name: common / apt-get update 30 | apt: update_cache=yes cache_valid_time=3600 31 | when: max_action == "sync" and max_env == "prd" and max_what == "system" 32 | tags: pkgs 33 | 34 | - name: common / install ubuntu packages 35 | apt: pkg={{ item }} state=latest 36 | with_items: system_pkgs 37 | when: max_action == "sync" and max_env == "prd" and max_what == "system" 38 | tags: pkgs 39 | 40 | - name: common / apt-get autoclean 41 | command: apt-get autoclean 42 | ignore_errors: true 43 | when: max_action == "sync" and max_env == "prd" and max_what == "system" 44 | tags: pkgs 45 | 46 | - name: common / apt-get clean 47 | command: apt-get clean 48 | ignore_errors: true 49 | when: max_action == "sync" and max_env == "prd" and max_what == "system" 50 | tags: pkgs 51 | 52 | - name: fix /etc/timezone 53 | command: > 54 | echo "Europe/Lisbon" >> /etc/timezone && 55 | dpkg-reconfigure --frontend noninteractive tzdata 56 | when: max_action == "sync" and max_env == "prd" 57 | notify: restart-ntp 58 | tags: sync 59 | -------------------------------------------------------------------------------- /playbooks/plbk-sample-service-go.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # file: #playbooks/plbk-sample-service-go.yml 3 | 4 | - hosts: all 5 | strategy: linear 6 | 7 | vars: 8 | max_action: "{{ lookup('env', 'MAX_ACTION')}}" 9 | max_what: "{{ lookup('env', 'MAX_WHAT')}}" 10 | max_env: "{{ lookup('env', 'MAX_ENV')}}" 11 | max_who: "{{ lookup('env', 'MAX_WHO')}}" 12 | max_sudo: "{{ lookup('env', 'MAX_SUDO')}}" 13 | max_version: "{{ lookup('env', 'MAX_VERSION')}}" 14 | 15 | vars_files: 16 | - ../group_vars/all 17 | 18 | tasks: 19 | 20 | - name: fetch repository 21 | local_action: git repo={{ git_root }}/{{ max_what }}.git 22 | accept_hostkey=yes 23 | key_file=~/.ssh/id_rsa 24 | dest=/tmp/{{ max_what }} 25 | version={{ git_branch }} 26 | run_once: True 27 | when: max_action == "fetch" and inventory_hostname in groups['{{ max_what }}'] 28 | tags: fetch 29 | 30 | - name: build container image 31 | local_action: shell cd /tmp/{{ max_what }} && VERSION={{ max_version }} make 32 | run_once: True 33 | when: max_action == "build" 34 | tags: build 35 | 36 | - name: test service 37 | local_action: shell cd /tmp/{{ max_what }} && make test 38 | run_once: True 39 | register: result 40 | when: max_action == "test" 41 | tags: test 42 | #- debug: msg="{{ result }}" 43 | 44 | - name: push image to registry 45 | local_action: shell docker push {{ docker_registry }}/{{ max_what }}:{{ max_version }} 46 | #sudo docker push {{ docker_registry }}/{{ max_what }}:{{ max_version }} 47 | run_once: True 48 | when: max_action == "push" 49 | tags: push 50 | 51 | - name: deploy container to server 52 | command: > 53 | docker run -d --restart=always --name {{ max_what }} 54 | -p {{ sample_service_go.HOST_PORT }}:{{ sample_service_go.CONTAINER_PORT }} 55 | {{ docker_registry }}/{{ max_what }}:{{ max_version }} 56 | become: true 57 | when: max_action == "deploy" and inventory_hostname in groups['{{ max_what }}'] 58 | tags: deploy 59 | 60 | - name: undeploy container from server 61 | shell: docker stop {{ max_what}} && docker rm -f {{ max_what }} 62 | when: max_action == "undeploy" and inventory_hostname in groups['{{ max_what }}'] 63 | become: true 64 | tags: undeploy 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible 2 | 3 | ## This is a simple Ansible setup to work with AWS. 4 | 5 | The current directory layout is the one I use everyday. 6 | I don't have any plans to share the playbooks I use here which, mostly, 7 | are fetched from Ansible Galaxy and customized. 8 | 9 | https://registry.hub.docker.com/u/include/ansible/ 10 | 11 | ## MAX 12 | 13 | Max is a private joke still it is a simple wrapper around some ugly `ansible-playbook`commands, to help anyone using it easily every day without thinking too much. Just pick-up a service name for your playbook ```[WHAT]```, a task ```[ACTION]``` and apply it to a destination ```[ENVIRONMENT]```. Thats it. 14 | 15 | Lets take a look... 16 | 17 | ```sh 18 | ./max 19 | 20 | Usage: 21 | ./max.sh [WHAT] [ACTION] [ENVIRONMENT] 22 | 23 | ie: 24 | ./max.sh sample-service-go fetch prd 25 | 26 | list of services: 27 | 28 | playbooks/plbk-sample-service-go.yml 29 | TASK TAGS: [build, deploy, fetch, push, test, undeploy] 30 | playbooks/plbk-system.yml 31 | TASK TAGS: [docker, ping] 32 | ``` 33 | 34 | Playbooks are stored in its default Ansible location, ```playbooks/``` but I have prefixed mine with ```plbk-```. If you want to change this just edit ```max``` bash script and change ```PLBK_PREFIX="plbk-"``` to anything else to your taste or even leave it blank. 35 | 36 | ## Inventory 37 | 38 | Dive into ```inventories/stg-sample``` but jump into Ansible manual if you need help on this. 39 | 40 | I have a set of server which belong to a cluster ```[services-cluster01]```; and ```[sample-service-go]```, our use case here is child of that same cluster. Notice I've configured some variables specific to that service, ```[sample-servive-go:vars]```. 41 | 42 | ```ini 43 | [localhost] 44 | localhost ansible_connection=local 45 | 46 | [services-cluster01] 47 | a ansible_host=172.21.70.188 48 | b ansible_host=172.21.70.189 49 | 50 | [system:children] 51 | services-cluster01 52 | 53 | [sample-service-go:children] 54 | services-cluster01 55 | 56 | [sample-service-go:vars] 57 | HOST_PORT=8080 58 | CONTAINER_PORT=8080 59 | 60 | [all:children] 61 | services-cluster01 62 | system 63 | sample-service-go 64 | 65 | [all:vars] 66 | ansible_user=ec2-user 67 | ansible_private_key_file=~/.ssh/lust-stg-master-key.pem 68 | ``` 69 | 70 | ## Playbooks and sample task 71 | 72 | ```yaml 73 | ... 74 | - name: deploy container to server 75 | command: > 76 | sudo docker run -d --restart=always --name {{ max_what }} 77 | -p {{ HOST_PORT }}:{{ CONTAINER_PORT }} 78 | {{ docker_registry }}/{{ max_what }}:{{ max_version }} 79 | when: max_action == "deploy" and inventory_hostname in groups['{{ max_what }}'] 80 | tags: deploy 81 | ... 82 | ``` 83 | -------------------------------------------------------------------------------- /max: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | SCRIPTNAME=`basename $0` 4 | ROOTDIR="../../../" 5 | CURDIR=`pwd` 6 | 7 | ANSIBLE_OPTS="-v" 8 | ANSIBLE_PLAYBOOK="`which ansible-playbook` ${ANSIBLE_OPTS}" 9 | 10 | PLBK_PREFIX="plbk-" 11 | 12 | export MAX_WHAT=$1; shift 13 | export MAX_ACTION=$1; shift 14 | export MAX_ENV=$1; shift 15 | 16 | # shift can result in error, so we move this over here and test MAX_ENV before 17 | # doing anything 18 | set -e 19 | 20 | # Extracts image version (tag) from cmdline like "sample-service-go:onbuild" 21 | # if blank, defaults to 'latest' 22 | export MAX_VERSION=$(echo "${MAX_WHAT}" | awk -F':' '{print $2}') 23 | 24 | if [ "${MAX_VERSION}" == "" ]; then 25 | export MAX_VERSION="latest" 26 | fi 27 | 28 | export MAX_WHAT=$(echo $MAX_WHAT | awk -F':' '{print $1}') 29 | 30 | # Reads current user running max.sh and passes that variable into playbooks. 31 | # If this playboook is going to be targeted localy, local user is used, instead 32 | # root is used (on remote servers). 33 | if [[ "${MAX_ENV}" == "dev" ]]; then 34 | export MAX_WHO=$(whoami) 35 | export MAX_SUDO="no" 36 | else 37 | export MAX_WHO="" 38 | export MAX_SUDO="" 39 | fi 40 | 41 | # Loads different inventories based on ENV variable captured via cmdline. 42 | ANSIBLE_HOSTS="" 43 | function getEnvironment() { 44 | if [ -f "inventories/${MAX_ENV}" ]; then 45 | export ANSIBLE_HOSTS="inventories/${MAX_ENV}" 46 | elif [ -f "inventories/${MAX_ENV}.ini" ]; then 47 | export ANSIBLE_HOSTS="inventories/${MAX_ENV}.ini" 48 | else 49 | printf "ERROR: unknown Environment!\n" 50 | exit 1; 51 | fi 52 | } 53 | 54 | # Prepares the command line statement to be executed by Ansible. 55 | function runPlay() { 56 | getEnvironment 57 | printf "${MAX_ACTION}'ing ${MAX_WHAT} in ${MAX_ENV}...\n" 58 | CMD="${ANSIBLE_PLAYBOOK} -i ${ANSIBLE_HOSTS} ./playbooks/${PLBK_PREFIX}${MAX_WHAT}.yml" 59 | echo ${CMD} $@ 60 | [ ! -z "$DEBUG" ] || exec ${CMD} $@ 61 | } 62 | 63 | function getListofServices { 64 | for i in playbooks/"${PLBK_PREFIX}"*.yml 65 | do 66 | printf " "; basename $i | sed -e "s/${PLBK_PREFIX}//" -e 's/\.ya?ml//' 67 | printf " "; ansible-playbook --list-tags -i inventories/prd-sample $i | grep "TASK TAGS:" | sed -e 's/TASK TAGS://' 68 | done 69 | } 70 | 71 | function myhelp() { 72 | cat << EOH 73 | Usage: 74 | ./max.sh [WHAT] [ACTION] [ENVIRONMENT] 75 | 76 | ie: 77 | ./max.sh sample-service-go fetch prd 78 | 79 | list of services: 80 | EOH 81 | getListofServices 82 | } 83 | 84 | # user passed in all params or we throw him the help in his face 85 | if [ -z "$MAX_ENV" ]; then 86 | myhelp 87 | exit 1 88 | fi 89 | 90 | case "${MAX_ACTION}" in 91 | "help" | "" ) # show the help 92 | myhelp 93 | ;; 94 | 95 | 'list') # list services 96 | getListofServices 97 | ;; 98 | 99 | *) # default stuff 100 | runPlay $@ 101 | ;; 102 | esac 103 | 104 | exit 0; 105 | -------------------------------------------------------------------------------- /inventories/ec2.ini: -------------------------------------------------------------------------------- 1 | # Ansible EC2 external inventory script settings 2 | # 3 | 4 | [ec2] 5 | 6 | # to talk to a private eucalyptus instance uncomment these lines 7 | # and edit edit eucalyptus_host to be the host name of your cloud controller 8 | #eucalyptus = True 9 | #eucalyptus_host = clc.cloud.domain.org 10 | 11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions 12 | # in AWS and merge the results together. Alternatively, set this to a comma 13 | # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' and do not 14 | # provide the 'regions_exclude' option. If this is set to 'auto', AWS_REGION or 15 | # AWS_DEFAULT_REGION environment variable will be read to determine the region. 16 | regions = all 17 | regions_exclude = us-gov-west-1, cn-north-1 18 | 19 | # When generating inventory, Ansible needs to know how to address a server. 20 | # Each EC2 instance has a lot of variables associated with it. Here is the list: 21 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance 22 | # Below are 2 variables that are used as the address of a server: 23 | # - destination_variable 24 | # - vpc_destination_variable 25 | 26 | # This is the normal destination variable to use. If you are running Ansible 27 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are 28 | # running Ansible from within EC2, then perhaps you want to use the internal 29 | # address, and should set this to 'private_dns_name'. The key of an EC2 tag 30 | # may optionally be used; however the boto instance variables hold precedence 31 | # in the event of a collision. 32 | destination_variable = public_dns_name 33 | 34 | # This allows you to override the inventory_name with an ec2 variable, instead 35 | # of using the destination_variable above. Addressing (aka ansible_ssh_host) 36 | # will still use destination_variable. Tags should be written as 'tag_TAGNAME'. 37 | #hostname_variable = tag_Name 38 | 39 | # For server inside a VPC, using DNS names may not make sense. When an instance 40 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting 41 | # this to 'ip_address' will return the public IP address. For instances in a 42 | # private subnet, this should be set to 'private_ip_address', and Ansible must 43 | # be run from within EC2. The key of an EC2 tag may optionally be used; however 44 | # the boto instance variables hold precedence in the event of a collision. 45 | # WARNING: - instances that are in the private vpc, _without_ public ip address 46 | # will not be listed in the inventory until You set: 47 | vpc_destination_variable = private_ip_address 48 | # vpc_destination_variable = ip_address 49 | 50 | # The following two settings allow flexible ansible host naming based on a 51 | # python format string and a comma-separated list of ec2 tags. Note that: 52 | # 53 | # 1) If the tags referenced are not present for some instances, empty strings 54 | # will be substituted in the format string. 55 | # 2) This overrides both destination_variable and vpc_destination_variable. 56 | # 57 | #destination_format = {0}.{1}.example.com 58 | #destination_format_tags = Name,environment 59 | 60 | # To tag instances on EC2 with the resource records that point to them from 61 | # Route53, set 'route53' to True. 62 | route53 = False 63 | 64 | # To use Route53 records as the inventory hostnames, uncomment and set 65 | # to equal the domain name you wish to use. You must also have 'route53' (above) 66 | # set to True. 67 | route53_hostnames = .example.com 68 | 69 | # To exclude RDS instances from the inventory, uncomment and set to False. 70 | rds = False 71 | 72 | # To exclude ElastiCache instances from the inventory, uncomment and set to False. 73 | elasticache = False 74 | 75 | # Additionally, you can specify the list of zones to exclude looking up in 76 | # 'route53_excluded_zones' as a comma-separated list. 77 | # route53_excluded_zones = samplezone1.com, samplezone2.com 78 | 79 | # By default, only EC2 instances in the 'running' state are returned. Set 80 | # 'all_instances' to True to return all instances regardless of state. 81 | all_instances = False 82 | 83 | # By default, only EC2 instances in the 'running' state are returned. Specify 84 | # EC2 instance states to return as a comma-separated list. This 85 | # option is overridden when 'all_instances' is True. 86 | # instance_states = pending, running, shutting-down, terminated, stopping, stopped 87 | 88 | # By default, only RDS instances in the 'available' state are returned. Set 89 | # 'all_rds_instances' to True return all RDS instances regardless of state. 90 | all_rds_instances = False 91 | 92 | # Include RDS cluster information (Aurora etc.) 93 | include_rds_clusters = False 94 | 95 | # By default, only ElastiCache clusters and nodes in the 'available' state 96 | # are returned. Set 'all_elasticache_clusters' and/or 'all_elastic_nodes' 97 | # to True return all ElastiCache clusters and nodes, regardless of state. 98 | # 99 | # Note that all_elasticache_nodes only applies to listed clusters. That means 100 | # if you set all_elastic_clusters to false, no node will be return from 101 | # unavailable clusters, regardless of the state and to what you set for 102 | # all_elasticache_nodes. 103 | all_elasticache_replication_groups = False 104 | all_elasticache_clusters = False 105 | all_elasticache_nodes = False 106 | 107 | # API calls to EC2 are slow. For this reason, we cache the results of an API 108 | # call. Set this to the path you want cache files to be written to. Two files 109 | # will be written to this directory: 110 | # - ansible-ec2.cache 111 | # - ansible-ec2.index 112 | cache_path = ~/.ansible/tmp 113 | 114 | # The number of seconds a cache file is considered valid. After this many 115 | # seconds, a new API call will be made, and the cache file will be updated. 116 | # To disable the cache, set this value to 0 117 | cache_max_age = 300 118 | 119 | # Organize groups into a nested/hierarchy instead of a flat namespace. 120 | nested_groups = False 121 | 122 | # Replace - tags when creating groups to avoid issues with ansible 123 | replace_dash_in_groups = True 124 | 125 | # If set to true, any tag of the form "a,b,c" is expanded into a list 126 | # and the results are used to create additional tag_* inventory groups. 127 | expand_csv_tags = False 128 | 129 | # The EC2 inventory output can become very large. To manage its size, 130 | # configure which groups should be created. 131 | group_by_instance_id = True 132 | group_by_region = True 133 | group_by_availability_zone = True 134 | group_by_aws_account = False 135 | group_by_ami_id = True 136 | group_by_instance_type = True 137 | group_by_instance_state = False 138 | group_by_key_pair = True 139 | group_by_vpc_id = True 140 | group_by_security_group = True 141 | group_by_tag_keys = True 142 | group_by_tag_none = True 143 | group_by_route53_names = True 144 | group_by_rds_engine = True 145 | group_by_rds_parameter_group = True 146 | group_by_elasticache_engine = True 147 | group_by_elasticache_cluster = True 148 | group_by_elasticache_parameter_group = True 149 | group_by_elasticache_replication_group = True 150 | 151 | # If you only want to include hosts that match a certain regular expression 152 | # pattern_include = staging-* 153 | 154 | # If you want to exclude any hosts that match a certain regular expression 155 | # pattern_exclude = staging-* 156 | 157 | # Instance filters can be used to control which instances are retrieved for 158 | # inventory. For the full list of possible filters, please read the EC2 API 159 | # docs: http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DescribeInstances.html#query-DescribeInstances-filters 160 | # Filters are key/value pairs separated by '=', to list multiple filters use 161 | # a list separated by commas. See examples below. 162 | 163 | # If you want to apply multiple filters simultaneously, set stack_filters to 164 | # True. Default behaviour is to combine the results of all filters. Stacking 165 | # allows the use of multiple conditions to filter down, for example by 166 | # environment and type of host. 167 | stack_filters = False 168 | 169 | # Retrieve only instances with (key=value) env=staging tag 170 | # instance_filters = tag:env=staging 171 | instance_filters = tag:Environment=prd 172 | 173 | # Retrieve only instances with role=webservers OR role=dbservers tag 174 | # instance_filters = tag:role=webservers,tag:role=dbservers 175 | 176 | # Retrieve only t1.micro instances OR instances with tag env=staging 177 | # instance_filters = instance-type=t1.micro,tag:env=staging 178 | 179 | # You can use wildcards in filter values also. Below will list instances which 180 | # tag Name value matches webservers1* 181 | # (ex. webservers15, webservers1a, webservers123 etc) 182 | # instance_filters = tag:Name=webservers1* 183 | 184 | # An IAM role can be assumed, so all requests are run as that role. 185 | # This can be useful for connecting across different accounts, or to limit user 186 | # access 187 | # iam_role = role-arn 188 | 189 | # A boto configuration profile may be used to separate out credentials 190 | # see http://boto.readthedocs.org/en/latest/boto_config_tut.html 191 | # boto_profile = some-boto-profile-name 192 | 193 | 194 | [credentials] 195 | 196 | # The AWS credentials can optionally be specified here. Credentials specified 197 | # here are ignored if the environment variable AWS_ACCESS_KEY_ID or 198 | # AWS_PROFILE is set, or if the boto_profile property above is set. 199 | # 200 | # Supplying AWS credentials here is not recommended, as it introduces 201 | # non-trivial security concerns. When going down this route, please make sure 202 | # to set access permissions for this file correctly, e.g. handle it the same 203 | # way as you would a private SSH key. 204 | # 205 | # Unlike the boto and AWS configure files, this section does not support 206 | # profiles. 207 | # 208 | # aws_access_key_id = AXXXXXXXXXXXXXX 209 | # aws_secret_access_key = XXXXXXXXXXXXXXXXXXX 210 | # aws_security_token = XXXXXXXXXXXXXXXXXXXXXXXXXXXX -------------------------------------------------------------------------------- /inventories/ec2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' 4 | EC2 external inventory script 5 | ================================= 6 | 7 | Generates inventory that Ansible can understand by making API request to 8 | AWS EC2 using the Boto library. 9 | 10 | NOTE: This script assumes Ansible is being executed where the environment 11 | variables needed for Boto have already been set: 12 | export AWS_ACCESS_KEY_ID='AK123' 13 | export AWS_SECRET_ACCESS_KEY='abc123' 14 | 15 | optional region environment variable if region is 'auto' 16 | 17 | This script also assumes there is an ec2.ini file alongside it. To specify a 18 | different path to ec2.ini, define the EC2_INI_PATH environment variable: 19 | 20 | export EC2_INI_PATH=/path/to/my_ec2.ini 21 | 22 | If you're using eucalyptus you need to set the above variables and 23 | you need to define: 24 | 25 | export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus 26 | 27 | If you're using boto profiles (requires boto>=2.24.0) you can choose a profile 28 | using the --boto-profile command line argument (e.g. ec2.py --boto-profile prod) or using 29 | the AWS_PROFILE variable: 30 | 31 | AWS_PROFILE=prod ansible-playbook -i ec2.py myplaybook.yml 32 | 33 | For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html 34 | 35 | When run against a specific host, this script returns the following variables: 36 | - ec2_ami_launch_index 37 | - ec2_architecture 38 | - ec2_association 39 | - ec2_attachTime 40 | - ec2_attachment 41 | - ec2_attachmentId 42 | - ec2_block_devices 43 | - ec2_client_token 44 | - ec2_deleteOnTermination 45 | - ec2_description 46 | - ec2_deviceIndex 47 | - ec2_dns_name 48 | - ec2_eventsSet 49 | - ec2_group_name 50 | - ec2_hypervisor 51 | - ec2_id 52 | - ec2_image_id 53 | - ec2_instanceState 54 | - ec2_instance_type 55 | - ec2_ipOwnerId 56 | - ec2_ip_address 57 | - ec2_item 58 | - ec2_kernel 59 | - ec2_key_name 60 | - ec2_launch_time 61 | - ec2_monitored 62 | - ec2_monitoring 63 | - ec2_networkInterfaceId 64 | - ec2_ownerId 65 | - ec2_persistent 66 | - ec2_placement 67 | - ec2_platform 68 | - ec2_previous_state 69 | - ec2_private_dns_name 70 | - ec2_private_ip_address 71 | - ec2_publicIp 72 | - ec2_public_dns_name 73 | - ec2_ramdisk 74 | - ec2_reason 75 | - ec2_region 76 | - ec2_requester_id 77 | - ec2_root_device_name 78 | - ec2_root_device_type 79 | - ec2_security_group_ids 80 | - ec2_security_group_names 81 | - ec2_shutdown_state 82 | - ec2_sourceDestCheck 83 | - ec2_spot_instance_request_id 84 | - ec2_state 85 | - ec2_state_code 86 | - ec2_state_reason 87 | - ec2_status 88 | - ec2_subnet_id 89 | - ec2_tenancy 90 | - ec2_virtualization_type 91 | - ec2_vpc_id 92 | 93 | These variables are pulled out of a boto.ec2.instance object. There is a lack of 94 | consistency with variable spellings (camelCase and underscores) since this 95 | just loops through all variables the object exposes. It is preferred to use the 96 | ones with underscores when multiple exist. 97 | 98 | In addition, if an instance has AWS Tags associated with it, each tag is a new 99 | variable named: 100 | - ec2_tag_[Key] = [Value] 101 | 102 | Security groups are comma-separated in 'ec2_security_group_ids' and 103 | 'ec2_security_group_names'. 104 | ''' 105 | 106 | # (c) 2012, Peter Sankauskas 107 | # 108 | # This file is part of Ansible, 109 | # 110 | # Ansible is free software: you can redistribute it and/or modify 111 | # it under the terms of the GNU General Public License as published by 112 | # the Free Software Foundation, either version 3 of the License, or 113 | # (at your option) any later version. 114 | # 115 | # Ansible is distributed in the hope that it will be useful, 116 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 117 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 118 | # GNU General Public License for more details. 119 | # 120 | # You should have received a copy of the GNU General Public License 121 | # along with Ansible. If not, see . 122 | 123 | ###################################################################### 124 | 125 | import sys 126 | import os 127 | import argparse 128 | import re 129 | from time import time 130 | import boto 131 | from boto import ec2 132 | from boto import rds 133 | from boto import elasticache 134 | from boto import route53 135 | from boto import sts 136 | import six 137 | 138 | from ansible.module_utils import ec2 as ec2_utils 139 | 140 | HAS_BOTO3 = False 141 | try: 142 | import boto3 143 | HAS_BOTO3 = True 144 | except ImportError: 145 | pass 146 | 147 | from six.moves import configparser 148 | from collections import defaultdict 149 | 150 | try: 151 | import json 152 | except ImportError: 153 | import simplejson as json 154 | 155 | 156 | class Ec2Inventory(object): 157 | 158 | def _empty_inventory(self): 159 | return {"_meta": {"hostvars": {}}} 160 | 161 | def __init__(self): 162 | ''' Main execution path ''' 163 | 164 | # Inventory grouped by instance IDs, tags, security groups, regions, 165 | # and availability zones 166 | self.inventory = self._empty_inventory() 167 | 168 | self.aws_account_id = None 169 | 170 | # Index of hostname (address) to instance ID 171 | self.index = {} 172 | 173 | # Boto profile to use (if any) 174 | self.boto_profile = None 175 | 176 | # AWS credentials. 177 | self.credentials = {} 178 | 179 | # Read settings and parse CLI arguments 180 | self.parse_cli_args() 181 | self.read_settings() 182 | 183 | # Make sure that profile_name is not passed at all if not set 184 | # as pre 2.24 boto will fall over otherwise 185 | if self.boto_profile: 186 | if not hasattr(boto.ec2.EC2Connection, 'profile_name'): 187 | self.fail_with_error("boto version must be >= 2.24 to use profile") 188 | 189 | # Cache 190 | if self.args.refresh_cache: 191 | self.do_api_calls_update_cache() 192 | elif not self.is_cache_valid(): 193 | self.do_api_calls_update_cache() 194 | 195 | # Data to print 196 | if self.args.host: 197 | data_to_print = self.get_host_info() 198 | 199 | elif self.args.list: 200 | # Display list of instances for inventory 201 | if self.inventory == self._empty_inventory(): 202 | data_to_print = self.get_inventory_from_cache() 203 | else: 204 | data_to_print = self.json_format_dict(self.inventory, True) 205 | 206 | print(data_to_print) 207 | 208 | def is_cache_valid(self): 209 | ''' Determines if the cache files have expired, or if it is still valid ''' 210 | 211 | if os.path.isfile(self.cache_path_cache): 212 | mod_time = os.path.getmtime(self.cache_path_cache) 213 | current_time = time() 214 | if (mod_time + self.cache_max_age) > current_time: 215 | if os.path.isfile(self.cache_path_index): 216 | return True 217 | 218 | return False 219 | 220 | def read_settings(self): 221 | ''' Reads the settings from the ec2.ini file ''' 222 | 223 | scriptbasename = __file__ 224 | scriptbasename = os.path.basename(scriptbasename) 225 | scriptbasename = scriptbasename.replace('.py', '') 226 | 227 | defaults = { 228 | 'ec2': { 229 | 'ini_fallback': os.path.join(os.path.dirname(__file__), 'ec2.ini'), 230 | 'ini_path': os.path.join(os.path.dirname(__file__), '%s.ini' % scriptbasename) 231 | } 232 | } 233 | 234 | if six.PY3: 235 | config = configparser.ConfigParser() 236 | else: 237 | config = configparser.SafeConfigParser() 238 | ec2_ini_path = os.environ.get('EC2_INI_PATH', defaults['ec2']['ini_path']) 239 | ec2_ini_path = os.path.expanduser(os.path.expandvars(ec2_ini_path)) 240 | 241 | if not os.path.isfile(ec2_ini_path): 242 | ec2_ini_path = os.path.expanduser(defaults['ec2']['ini_fallback']) 243 | 244 | config.read(ec2_ini_path) 245 | 246 | # is eucalyptus? 247 | self.eucalyptus_host = None 248 | self.eucalyptus = False 249 | if config.has_option('ec2', 'eucalyptus'): 250 | self.eucalyptus = config.getboolean('ec2', 'eucalyptus') 251 | if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): 252 | self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') 253 | 254 | # Regions 255 | self.regions = [] 256 | configRegions = config.get('ec2', 'regions') 257 | if (configRegions == 'all'): 258 | if self.eucalyptus_host: 259 | self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name, **self.credentials) 260 | else: 261 | configRegions_exclude = config.get('ec2', 'regions_exclude') 262 | for regionInfo in ec2.regions(): 263 | if regionInfo.name not in configRegions_exclude: 264 | self.regions.append(regionInfo.name) 265 | else: 266 | self.regions = configRegions.split(",") 267 | if 'auto' in self.regions: 268 | env_region = os.environ.get('AWS_REGION') 269 | if env_region is None: 270 | env_region = os.environ.get('AWS_DEFAULT_REGION') 271 | self.regions = [env_region] 272 | 273 | # Destination addresses 274 | self.destination_variable = config.get('ec2', 'destination_variable') 275 | self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') 276 | 277 | if config.has_option('ec2', 'hostname_variable'): 278 | self.hostname_variable = config.get('ec2', 'hostname_variable') 279 | else: 280 | self.hostname_variable = None 281 | 282 | if config.has_option('ec2', 'destination_format') and \ 283 | config.has_option('ec2', 'destination_format_tags'): 284 | self.destination_format = config.get('ec2', 'destination_format') 285 | self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') 286 | else: 287 | self.destination_format = None 288 | self.destination_format_tags = None 289 | 290 | # Route53 291 | self.route53_enabled = config.getboolean('ec2', 'route53') 292 | if config.has_option('ec2', 'route53_hostnames'): 293 | self.route53_hostnames = config.get('ec2', 'route53_hostnames') 294 | else: 295 | self.route53_hostnames = None 296 | self.route53_excluded_zones = [] 297 | if config.has_option('ec2', 'route53_excluded_zones'): 298 | self.route53_excluded_zones.extend( 299 | config.get('ec2', 'route53_excluded_zones', '').split(',')) 300 | 301 | # Include RDS instances? 302 | self.rds_enabled = True 303 | if config.has_option('ec2', 'rds'): 304 | self.rds_enabled = config.getboolean('ec2', 'rds') 305 | 306 | # Include RDS cluster instances? 307 | if config.has_option('ec2', 'include_rds_clusters'): 308 | self.include_rds_clusters = config.getboolean('ec2', 'include_rds_clusters') 309 | else: 310 | self.include_rds_clusters = False 311 | 312 | # Include ElastiCache instances? 313 | self.elasticache_enabled = True 314 | if config.has_option('ec2', 'elasticache'): 315 | self.elasticache_enabled = config.getboolean('ec2', 'elasticache') 316 | 317 | # Return all EC2 instances? 318 | if config.has_option('ec2', 'all_instances'): 319 | self.all_instances = config.getboolean('ec2', 'all_instances') 320 | else: 321 | self.all_instances = False 322 | 323 | # Instance states to be gathered in inventory. Default is 'running'. 324 | # Setting 'all_instances' to 'yes' overrides this option. 325 | ec2_valid_instance_states = [ 326 | 'pending', 327 | 'running', 328 | 'shutting-down', 329 | 'terminated', 330 | 'stopping', 331 | 'stopped' 332 | ] 333 | self.ec2_instance_states = [] 334 | if self.all_instances: 335 | self.ec2_instance_states = ec2_valid_instance_states 336 | elif config.has_option('ec2', 'instance_states'): 337 | for instance_state in config.get('ec2', 'instance_states').split(','): 338 | instance_state = instance_state.strip() 339 | if instance_state not in ec2_valid_instance_states: 340 | continue 341 | self.ec2_instance_states.append(instance_state) 342 | else: 343 | self.ec2_instance_states = ['running'] 344 | 345 | # Return all RDS instances? (if RDS is enabled) 346 | if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: 347 | self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') 348 | else: 349 | self.all_rds_instances = False 350 | 351 | # Return all ElastiCache replication groups? (if ElastiCache is enabled) 352 | if config.has_option('ec2', 'all_elasticache_replication_groups') and self.elasticache_enabled: 353 | self.all_elasticache_replication_groups = config.getboolean('ec2', 'all_elasticache_replication_groups') 354 | else: 355 | self.all_elasticache_replication_groups = False 356 | 357 | # Return all ElastiCache clusters? (if ElastiCache is enabled) 358 | if config.has_option('ec2', 'all_elasticache_clusters') and self.elasticache_enabled: 359 | self.all_elasticache_clusters = config.getboolean('ec2', 'all_elasticache_clusters') 360 | else: 361 | self.all_elasticache_clusters = False 362 | 363 | # Return all ElastiCache nodes? (if ElastiCache is enabled) 364 | if config.has_option('ec2', 'all_elasticache_nodes') and self.elasticache_enabled: 365 | self.all_elasticache_nodes = config.getboolean('ec2', 'all_elasticache_nodes') 366 | else: 367 | self.all_elasticache_nodes = False 368 | 369 | # boto configuration profile (prefer CLI argument then environment variables then config file) 370 | self.boto_profile = self.args.boto_profile or os.environ.get('AWS_PROFILE') 371 | if config.has_option('ec2', 'boto_profile') and not self.boto_profile: 372 | self.boto_profile = config.get('ec2', 'boto_profile') 373 | 374 | # AWS credentials (prefer environment variables) 375 | if not (self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID') or 376 | os.environ.get('AWS_PROFILE')): 377 | if config.has_option('credentials', 'aws_access_key_id'): 378 | aws_access_key_id = config.get('credentials', 'aws_access_key_id') 379 | else: 380 | aws_access_key_id = None 381 | if config.has_option('credentials', 'aws_secret_access_key'): 382 | aws_secret_access_key = config.get('credentials', 'aws_secret_access_key') 383 | else: 384 | aws_secret_access_key = None 385 | if config.has_option('credentials', 'aws_security_token'): 386 | aws_security_token = config.get('credentials', 'aws_security_token') 387 | else: 388 | aws_security_token = None 389 | if aws_access_key_id: 390 | self.credentials = { 391 | 'aws_access_key_id': aws_access_key_id, 392 | 'aws_secret_access_key': aws_secret_access_key 393 | } 394 | if aws_security_token: 395 | self.credentials['security_token'] = aws_security_token 396 | 397 | # Cache related 398 | cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) 399 | if self.boto_profile: 400 | cache_dir = os.path.join(cache_dir, 'profile_' + self.boto_profile) 401 | if not os.path.exists(cache_dir): 402 | os.makedirs(cache_dir) 403 | 404 | cache_name = 'ansible-ec2' 405 | cache_id = self.boto_profile or os.environ.get('AWS_ACCESS_KEY_ID', self.credentials.get('aws_access_key_id')) 406 | if cache_id: 407 | cache_name = '%s-%s' % (cache_name, cache_id) 408 | self.cache_path_cache = os.path.join(cache_dir, "%s.cache" % cache_name) 409 | self.cache_path_index = os.path.join(cache_dir, "%s.index" % cache_name) 410 | self.cache_max_age = config.getint('ec2', 'cache_max_age') 411 | 412 | if config.has_option('ec2', 'expand_csv_tags'): 413 | self.expand_csv_tags = config.getboolean('ec2', 'expand_csv_tags') 414 | else: 415 | self.expand_csv_tags = False 416 | 417 | # Configure nested groups instead of flat namespace. 418 | if config.has_option('ec2', 'nested_groups'): 419 | self.nested_groups = config.getboolean('ec2', 'nested_groups') 420 | else: 421 | self.nested_groups = False 422 | 423 | # Replace dash or not in group names 424 | if config.has_option('ec2', 'replace_dash_in_groups'): 425 | self.replace_dash_in_groups = config.getboolean('ec2', 'replace_dash_in_groups') 426 | else: 427 | self.replace_dash_in_groups = True 428 | 429 | # IAM role to assume for connection 430 | if config.has_option('ec2', 'iam_role'): 431 | self.iam_role = config.get('ec2', 'iam_role') 432 | else: 433 | self.iam_role = None 434 | 435 | # Configure which groups should be created. 436 | group_by_options = [ 437 | 'group_by_instance_id', 438 | 'group_by_region', 439 | 'group_by_availability_zone', 440 | 'group_by_ami_id', 441 | 'group_by_instance_type', 442 | 'group_by_instance_state', 443 | 'group_by_key_pair', 444 | 'group_by_vpc_id', 445 | 'group_by_security_group', 446 | 'group_by_tag_keys', 447 | 'group_by_tag_none', 448 | 'group_by_route53_names', 449 | 'group_by_rds_engine', 450 | 'group_by_rds_parameter_group', 451 | 'group_by_elasticache_engine', 452 | 'group_by_elasticache_cluster', 453 | 'group_by_elasticache_parameter_group', 454 | 'group_by_elasticache_replication_group', 455 | 'group_by_aws_account', 456 | ] 457 | for option in group_by_options: 458 | if config.has_option('ec2', option): 459 | setattr(self, option, config.getboolean('ec2', option)) 460 | else: 461 | setattr(self, option, True) 462 | 463 | # Do we need to just include hosts that match a pattern? 464 | try: 465 | pattern_include = config.get('ec2', 'pattern_include') 466 | if pattern_include and len(pattern_include) > 0: 467 | self.pattern_include = re.compile(pattern_include) 468 | else: 469 | self.pattern_include = None 470 | except configparser.NoOptionError: 471 | self.pattern_include = None 472 | 473 | # Do we need to exclude hosts that match a pattern? 474 | try: 475 | pattern_exclude = config.get('ec2', 'pattern_exclude') 476 | if pattern_exclude and len(pattern_exclude) > 0: 477 | self.pattern_exclude = re.compile(pattern_exclude) 478 | else: 479 | self.pattern_exclude = None 480 | except configparser.NoOptionError: 481 | self.pattern_exclude = None 482 | 483 | # Do we want to stack multiple filters? 484 | if config.has_option('ec2', 'stack_filters'): 485 | self.stack_filters = config.getboolean('ec2', 'stack_filters') 486 | else: 487 | self.stack_filters = False 488 | 489 | # Instance filters (see boto and EC2 API docs). Ignore invalid filters. 490 | self.ec2_instance_filters = defaultdict(list) 491 | if config.has_option('ec2', 'instance_filters'): 492 | 493 | filters = [f for f in config.get('ec2', 'instance_filters').split(',') if f] 494 | 495 | for instance_filter in filters: 496 | instance_filter = instance_filter.strip() 497 | if not instance_filter or '=' not in instance_filter: 498 | continue 499 | filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] 500 | if not filter_key: 501 | continue 502 | self.ec2_instance_filters[filter_key].append(filter_value) 503 | 504 | def parse_cli_args(self): 505 | ''' Command line argument processing ''' 506 | 507 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') 508 | parser.add_argument('--list', action='store_true', default=True, 509 | help='List instances (default: True)') 510 | parser.add_argument('--host', action='store', 511 | help='Get all the variables about a specific instance') 512 | parser.add_argument('--refresh-cache', action='store_true', default=False, 513 | help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') 514 | parser.add_argument('--profile', '--boto-profile', action='store', dest='boto_profile', 515 | help='Use boto profile for connections to EC2') 516 | self.args = parser.parse_args() 517 | 518 | def do_api_calls_update_cache(self): 519 | ''' Do API calls to each region, and save data in cache files ''' 520 | 521 | if self.route53_enabled: 522 | self.get_route53_records() 523 | 524 | for region in self.regions: 525 | self.get_instances_by_region(region) 526 | if self.rds_enabled: 527 | self.get_rds_instances_by_region(region) 528 | if self.elasticache_enabled: 529 | self.get_elasticache_clusters_by_region(region) 530 | self.get_elasticache_replication_groups_by_region(region) 531 | if self.include_rds_clusters: 532 | self.include_rds_clusters_by_region(region) 533 | 534 | self.write_to_cache(self.inventory, self.cache_path_cache) 535 | self.write_to_cache(self.index, self.cache_path_index) 536 | 537 | def connect(self, region): 538 | ''' create connection to api server''' 539 | if self.eucalyptus: 540 | conn = boto.connect_euca(host=self.eucalyptus_host, **self.credentials) 541 | conn.APIVersion = '2010-08-31' 542 | else: 543 | conn = self.connect_to_aws(ec2, region) 544 | return conn 545 | 546 | def boto_fix_security_token_in_profile(self, connect_args): 547 | ''' monkey patch for boto issue boto/boto#2100 ''' 548 | profile = 'profile ' + self.boto_profile 549 | if boto.config.has_option(profile, 'aws_security_token'): 550 | connect_args['security_token'] = boto.config.get(profile, 'aws_security_token') 551 | return connect_args 552 | 553 | def connect_to_aws(self, module, region): 554 | connect_args = self.credentials 555 | 556 | # only pass the profile name if it's set (as it is not supported by older boto versions) 557 | if self.boto_profile: 558 | connect_args['profile_name'] = self.boto_profile 559 | self.boto_fix_security_token_in_profile(connect_args) 560 | 561 | if self.iam_role: 562 | sts_conn = sts.connect_to_region(region, **connect_args) 563 | role = sts_conn.assume_role(self.iam_role, 'ansible_dynamic_inventory') 564 | connect_args['aws_access_key_id'] = role.credentials.access_key 565 | connect_args['aws_secret_access_key'] = role.credentials.secret_key 566 | connect_args['security_token'] = role.credentials.session_token 567 | 568 | conn = module.connect_to_region(region, **connect_args) 569 | # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported 570 | if conn is None: 571 | self.fail_with_error("region name: %s likely not supported, or AWS is down. connection to region failed." % region) 572 | return conn 573 | 574 | def get_instances_by_region(self, region): 575 | ''' Makes an AWS EC2 API call to the list of instances in a particular 576 | region ''' 577 | 578 | try: 579 | conn = self.connect(region) 580 | reservations = [] 581 | if self.ec2_instance_filters: 582 | if self.stack_filters: 583 | filters_dict = {} 584 | for filter_key, filter_values in self.ec2_instance_filters.items(): 585 | filters_dict[filter_key] = filter_values 586 | reservations.extend(conn.get_all_instances(filters=filters_dict)) 587 | else: 588 | for filter_key, filter_values in self.ec2_instance_filters.items(): 589 | reservations.extend(conn.get_all_instances(filters={filter_key: filter_values})) 590 | else: 591 | reservations = conn.get_all_instances() 592 | 593 | # Pull the tags back in a second step 594 | # AWS are on record as saying that the tags fetched in the first `get_all_instances` request are not 595 | # reliable and may be missing, and the only way to guarantee they are there is by calling `get_all_tags` 596 | instance_ids = [] 597 | for reservation in reservations: 598 | instance_ids.extend([instance.id for instance in reservation.instances]) 599 | 600 | max_filter_value = 199 601 | tags = [] 602 | for i in range(0, len(instance_ids), max_filter_value): 603 | tags.extend(conn.get_all_tags(filters={'resource-type': 'instance', 'resource-id': instance_ids[i:i + max_filter_value]})) 604 | 605 | tags_by_instance_id = defaultdict(dict) 606 | for tag in tags: 607 | tags_by_instance_id[tag.res_id][tag.name] = tag.value 608 | 609 | if (not self.aws_account_id) and reservations: 610 | self.aws_account_id = reservations[0].owner_id 611 | 612 | for reservation in reservations: 613 | for instance in reservation.instances: 614 | instance.tags = tags_by_instance_id[instance.id] 615 | self.add_instance(instance, region) 616 | 617 | except boto.exception.BotoServerError as e: 618 | if e.error_code == 'AuthFailure': 619 | error = self.get_auth_error_message() 620 | else: 621 | backend = 'Eucalyptus' if self.eucalyptus else 'AWS' 622 | error = "Error connecting to %s backend.\n%s" % (backend, e.message) 623 | self.fail_with_error(error, 'getting EC2 instances') 624 | 625 | def get_rds_instances_by_region(self, region): 626 | ''' Makes an AWS API call to the list of RDS instances in a particular 627 | region ''' 628 | 629 | if not HAS_BOTO3: 630 | self.fail_with_error("Working with RDS instances requires boto3 - please install boto3 and try again", 631 | "getting RDS instances") 632 | 633 | client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) 634 | db_instances = client.describe_db_instances() 635 | 636 | try: 637 | conn = self.connect_to_aws(rds, region) 638 | if conn: 639 | marker = None 640 | while True: 641 | instances = conn.get_all_dbinstances(marker=marker) 642 | marker = instances.marker 643 | for index, instance in enumerate(instances): 644 | # Add tags to instances. 645 | instance.arn = db_instances['DBInstances'][index]['DBInstanceArn'] 646 | tags = client.list_tags_for_resource(ResourceName=instance.arn)['TagList'] 647 | instance.tags = {} 648 | for tag in tags: 649 | instance.tags[tag['Key']] = tag['Value'] 650 | 651 | self.add_rds_instance(instance, region) 652 | if not marker: 653 | break 654 | except boto.exception.BotoServerError as e: 655 | error = e.reason 656 | 657 | if e.error_code == 'AuthFailure': 658 | error = self.get_auth_error_message() 659 | elif e.error_code == "OptInRequired": 660 | error = "RDS hasn't been enabled for this account yet. " \ 661 | "You must either log in to the RDS service through the AWS console to enable it, " \ 662 | "or set 'rds = False' in ec2.ini" 663 | elif not e.reason == "Forbidden": 664 | error = "Looks like AWS RDS is down:\n%s" % e.message 665 | self.fail_with_error(error, 'getting RDS instances') 666 | 667 | def include_rds_clusters_by_region(self, region): 668 | if not HAS_BOTO3: 669 | self.fail_with_error("Working with RDS clusters requires boto3 - please install boto3 and try again", 670 | "getting RDS clusters") 671 | 672 | client = ec2_utils.boto3_inventory_conn('client', 'rds', region, **self.credentials) 673 | 674 | marker, clusters = '', [] 675 | while marker is not None: 676 | resp = client.describe_db_clusters(Marker=marker) 677 | clusters.extend(resp["DBClusters"]) 678 | marker = resp.get('Marker', None) 679 | 680 | account_id = boto.connect_iam().get_user().arn.split(':')[4] 681 | c_dict = {} 682 | for c in clusters: 683 | # remove these datetime objects as there is no serialisation to json 684 | # currently in place and we don't need the data yet 685 | if 'EarliestRestorableTime' in c: 686 | del c['EarliestRestorableTime'] 687 | if 'LatestRestorableTime' in c: 688 | del c['LatestRestorableTime'] 689 | 690 | if self.ec2_instance_filters == {}: 691 | matches_filter = True 692 | else: 693 | matches_filter = False 694 | 695 | try: 696 | # arn:aws:rds:::: 697 | tags = client.list_tags_for_resource( 698 | ResourceName='arn:aws:rds:' + region + ':' + account_id + ':cluster:' + c['DBClusterIdentifier']) 699 | c['Tags'] = tags['TagList'] 700 | 701 | if self.ec2_instance_filters: 702 | for filter_key, filter_values in self.ec2_instance_filters.items(): 703 | # get AWS tag key e.g. tag:env will be 'env' 704 | tag_name = filter_key.split(":", 1)[1] 705 | # Filter values is a list (if you put multiple values for the same tag name) 706 | matches_filter = any(d['Key'] == tag_name and d['Value'] in filter_values for d in c['Tags']) 707 | 708 | if matches_filter: 709 | # it matches a filter, so stop looking for further matches 710 | break 711 | 712 | except Exception as e: 713 | if e.message.find('DBInstanceNotFound') >= 0: 714 | # AWS RDS bug (2016-01-06) means deletion does not fully complete and leave an 'empty' cluster. 715 | # Ignore errors when trying to find tags for these 716 | pass 717 | 718 | # ignore empty clusters caused by AWS bug 719 | if len(c['DBClusterMembers']) == 0: 720 | continue 721 | elif matches_filter: 722 | c_dict[c['DBClusterIdentifier']] = c 723 | 724 | self.inventory['db_clusters'] = c_dict 725 | 726 | def get_elasticache_clusters_by_region(self, region): 727 | ''' Makes an AWS API call to the list of ElastiCache clusters (with 728 | nodes' info) in a particular region.''' 729 | 730 | # ElastiCache boto module doesn't provide a get_all_instances method, 731 | # that's why we need to call describe directly (it would be called by 732 | # the shorthand method anyway...) 733 | try: 734 | conn = self.connect_to_aws(elasticache, region) 735 | if conn: 736 | # show_cache_node_info = True 737 | # because we also want nodes' information 738 | response = conn.describe_cache_clusters(None, None, None, True) 739 | 740 | except boto.exception.BotoServerError as e: 741 | error = e.reason 742 | 743 | if e.error_code == 'AuthFailure': 744 | error = self.get_auth_error_message() 745 | elif e.error_code == "OptInRequired": 746 | error = "ElastiCache hasn't been enabled for this account yet. " \ 747 | "You must either log in to the ElastiCache service through the AWS console to enable it, " \ 748 | "or set 'elasticache = False' in ec2.ini" 749 | elif not e.reason == "Forbidden": 750 | error = "Looks like AWS ElastiCache is down:\n%s" % e.message 751 | self.fail_with_error(error, 'getting ElastiCache clusters') 752 | 753 | try: 754 | # Boto also doesn't provide wrapper classes to CacheClusters or 755 | # CacheNodes. Because of that we can't make use of the get_list 756 | # method in the AWSQueryConnection. Let's do the work manually 757 | clusters = response['DescribeCacheClustersResponse']['DescribeCacheClustersResult']['CacheClusters'] 758 | 759 | except KeyError as e: 760 | error = "ElastiCache query to AWS failed (unexpected format)." 761 | self.fail_with_error(error, 'getting ElastiCache clusters') 762 | 763 | for cluster in clusters: 764 | self.add_elasticache_cluster(cluster, region) 765 | 766 | def get_elasticache_replication_groups_by_region(self, region): 767 | ''' Makes an AWS API call to the list of ElastiCache replication groups 768 | in a particular region.''' 769 | 770 | # ElastiCache boto module doesn't provide a get_all_instances method, 771 | # that's why we need to call describe directly (it would be called by 772 | # the shorthand method anyway...) 773 | try: 774 | conn = self.connect_to_aws(elasticache, region) 775 | if conn: 776 | response = conn.describe_replication_groups() 777 | 778 | except boto.exception.BotoServerError as e: 779 | error = e.reason 780 | 781 | if e.error_code == 'AuthFailure': 782 | error = self.get_auth_error_message() 783 | if not e.reason == "Forbidden": 784 | error = "Looks like AWS ElastiCache [Replication Groups] is down:\n%s" % e.message 785 | self.fail_with_error(error, 'getting ElastiCache clusters') 786 | 787 | try: 788 | # Boto also doesn't provide wrapper classes to ReplicationGroups 789 | # Because of that we can't make use of the get_list method in the 790 | # AWSQueryConnection. Let's do the work manually 791 | replication_groups = response['DescribeReplicationGroupsResponse']['DescribeReplicationGroupsResult']['ReplicationGroups'] 792 | 793 | except KeyError as e: 794 | error = "ElastiCache [Replication Groups] query to AWS failed (unexpected format)." 795 | self.fail_with_error(error, 'getting ElastiCache clusters') 796 | 797 | for replication_group in replication_groups: 798 | self.add_elasticache_replication_group(replication_group, region) 799 | 800 | def get_auth_error_message(self): 801 | ''' create an informative error message if there is an issue authenticating''' 802 | errors = ["Authentication error retrieving ec2 inventory."] 803 | if None in [os.environ.get('AWS_ACCESS_KEY_ID'), os.environ.get('AWS_SECRET_ACCESS_KEY')]: 804 | errors.append(' - No AWS_ACCESS_KEY_ID or AWS_SECRET_ACCESS_KEY environment vars found') 805 | else: 806 | errors.append(' - AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment vars found but may not be correct') 807 | 808 | boto_paths = ['/etc/boto.cfg', '~/.boto', '~/.aws/credentials'] 809 | boto_config_found = list(p for p in boto_paths if os.path.isfile(os.path.expanduser(p))) 810 | if len(boto_config_found) > 0: 811 | errors.append(" - Boto configs found at '%s', but the credentials contained may not be correct" % ', '.join(boto_config_found)) 812 | else: 813 | errors.append(" - No Boto config found at any expected location '%s'" % ', '.join(boto_paths)) 814 | 815 | return '\n'.join(errors) 816 | 817 | def fail_with_error(self, err_msg, err_operation=None): 818 | '''log an error to std err for ansible-playbook to consume and exit''' 819 | if err_operation: 820 | err_msg = 'ERROR: "{err_msg}", while: {err_operation}'.format( 821 | err_msg=err_msg, err_operation=err_operation) 822 | sys.stderr.write(err_msg) 823 | sys.exit(1) 824 | 825 | def get_instance(self, region, instance_id): 826 | conn = self.connect(region) 827 | 828 | reservations = conn.get_all_instances([instance_id]) 829 | for reservation in reservations: 830 | for instance in reservation.instances: 831 | return instance 832 | 833 | def add_instance(self, instance, region): 834 | ''' Adds an instance to the inventory and index, as long as it is 835 | addressable ''' 836 | 837 | # Only return instances with desired instance states 838 | if instance.state not in self.ec2_instance_states: 839 | return 840 | 841 | # Select the best destination address 842 | if self.destination_format and self.destination_format_tags: 843 | dest = self.destination_format.format(*[getattr(instance, 'tags').get(tag, '') for tag in self.destination_format_tags]) 844 | elif instance.subnet_id: 845 | dest = getattr(instance, self.vpc_destination_variable, None) 846 | if dest is None: 847 | dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) 848 | else: 849 | dest = getattr(instance, self.destination_variable, None) 850 | if dest is None: 851 | dest = getattr(instance, 'tags').get(self.destination_variable, None) 852 | 853 | if not dest: 854 | # Skip instances we cannot address (e.g. private VPC subnet) 855 | return 856 | 857 | # Set the inventory name 858 | hostname = None 859 | if self.hostname_variable: 860 | if self.hostname_variable.startswith('tag_'): 861 | hostname = instance.tags.get(self.hostname_variable[4:], None) 862 | else: 863 | hostname = getattr(instance, self.hostname_variable) 864 | 865 | # set the hostname from route53 866 | if self.route53_enabled and self.route53_hostnames: 867 | route53_names = self.get_instance_route53_names(instance) 868 | for name in route53_names: 869 | if name.endswith(self.route53_hostnames): 870 | hostname = name 871 | 872 | # If we can't get a nice hostname, use the destination address 873 | if not hostname: 874 | hostname = dest 875 | # to_safe strips hostname characters like dots, so don't strip route53 hostnames 876 | elif self.route53_enabled and self.route53_hostnames and hostname.endswith(self.route53_hostnames): 877 | hostname = hostname.lower() 878 | else: 879 | hostname = self.to_safe(hostname).lower() 880 | 881 | # if we only want to include hosts that match a pattern, skip those that don't 882 | if self.pattern_include and not self.pattern_include.match(hostname): 883 | return 884 | 885 | # if we need to exclude hosts that match a pattern, skip those 886 | if self.pattern_exclude and self.pattern_exclude.match(hostname): 887 | return 888 | 889 | # Add to index 890 | self.index[hostname] = [region, instance.id] 891 | 892 | # Inventory: Group by instance ID (always a group of 1) 893 | if self.group_by_instance_id: 894 | self.inventory[instance.id] = [hostname] 895 | if self.nested_groups: 896 | self.push_group(self.inventory, 'instances', instance.id) 897 | 898 | # Inventory: Group by region 899 | if self.group_by_region: 900 | self.push(self.inventory, region, hostname) 901 | if self.nested_groups: 902 | self.push_group(self.inventory, 'regions', region) 903 | 904 | # Inventory: Group by availability zone 905 | if self.group_by_availability_zone: 906 | self.push(self.inventory, instance.placement, hostname) 907 | if self.nested_groups: 908 | if self.group_by_region: 909 | self.push_group(self.inventory, region, instance.placement) 910 | self.push_group(self.inventory, 'zones', instance.placement) 911 | 912 | # Inventory: Group by Amazon Machine Image (AMI) ID 913 | if self.group_by_ami_id: 914 | ami_id = self.to_safe(instance.image_id) 915 | self.push(self.inventory, ami_id, hostname) 916 | if self.nested_groups: 917 | self.push_group(self.inventory, 'images', ami_id) 918 | 919 | # Inventory: Group by instance type 920 | if self.group_by_instance_type: 921 | type_name = self.to_safe('type_' + instance.instance_type) 922 | self.push(self.inventory, type_name, hostname) 923 | if self.nested_groups: 924 | self.push_group(self.inventory, 'types', type_name) 925 | 926 | # Inventory: Group by instance state 927 | if self.group_by_instance_state: 928 | state_name = self.to_safe('instance_state_' + instance.state) 929 | self.push(self.inventory, state_name, hostname) 930 | if self.nested_groups: 931 | self.push_group(self.inventory, 'instance_states', state_name) 932 | 933 | # Inventory: Group by key pair 934 | if self.group_by_key_pair and instance.key_name: 935 | key_name = self.to_safe('key_' + instance.key_name) 936 | self.push(self.inventory, key_name, hostname) 937 | if self.nested_groups: 938 | self.push_group(self.inventory, 'keys', key_name) 939 | 940 | # Inventory: Group by VPC 941 | if self.group_by_vpc_id and instance.vpc_id: 942 | vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) 943 | self.push(self.inventory, vpc_id_name, hostname) 944 | if self.nested_groups: 945 | self.push_group(self.inventory, 'vpcs', vpc_id_name) 946 | 947 | # Inventory: Group by security group 948 | if self.group_by_security_group: 949 | try: 950 | for group in instance.groups: 951 | key = self.to_safe("security_group_" + group.name) 952 | self.push(self.inventory, key, hostname) 953 | if self.nested_groups: 954 | self.push_group(self.inventory, 'security_groups', key) 955 | except AttributeError: 956 | self.fail_with_error('\n'.join(['Package boto seems a bit older.', 957 | 'Please upgrade boto >= 2.3.0.'])) 958 | 959 | # Inventory: Group by AWS account ID 960 | if self.group_by_aws_account: 961 | self.push(self.inventory, self.aws_account_id, dest) 962 | if self.nested_groups: 963 | self.push_group(self.inventory, 'accounts', self.aws_account_id) 964 | 965 | # Inventory: Group by tag keys 966 | if self.group_by_tag_keys: 967 | for k, v in instance.tags.items(): 968 | if self.expand_csv_tags and v and ',' in v: 969 | values = map(lambda x: x.strip(), v.split(',')) 970 | else: 971 | values = [v] 972 | 973 | for v in values: 974 | if v: 975 | key = self.to_safe("tag_" + k + "=" + v) 976 | else: 977 | key = self.to_safe("tag_" + k) 978 | self.push(self.inventory, key, hostname) 979 | if self.nested_groups: 980 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) 981 | if v: 982 | self.push_group(self.inventory, self.to_safe("tag_" + k), key) 983 | 984 | # Inventory: Group by Route53 domain names if enabled 985 | if self.route53_enabled and self.group_by_route53_names: 986 | route53_names = self.get_instance_route53_names(instance) 987 | for name in route53_names: 988 | self.push(self.inventory, name, hostname) 989 | if self.nested_groups: 990 | self.push_group(self.inventory, 'route53', name) 991 | 992 | # Global Tag: instances without tags 993 | if self.group_by_tag_none and len(instance.tags) == 0: 994 | self.push(self.inventory, 'tag_none', hostname) 995 | if self.nested_groups: 996 | self.push_group(self.inventory, 'tags', 'tag_none') 997 | 998 | # Global Tag: tag all EC2 instances 999 | self.push(self.inventory, 'ec2', hostname) 1000 | 1001 | self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) 1002 | self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest 1003 | 1004 | def add_rds_instance(self, instance, region): 1005 | ''' Adds an RDS instance to the inventory and index, as long as it is 1006 | addressable ''' 1007 | 1008 | # Only want available instances unless all_rds_instances is True 1009 | if not self.all_rds_instances and instance.status != 'available': 1010 | return 1011 | 1012 | # Select the best destination address 1013 | dest = instance.endpoint[0] 1014 | 1015 | if not dest: 1016 | # Skip instances we cannot address (e.g. private VPC subnet) 1017 | return 1018 | 1019 | # Set the inventory name 1020 | hostname = None 1021 | if self.hostname_variable: 1022 | if self.hostname_variable.startswith('tag_'): 1023 | hostname = instance.tags.get(self.hostname_variable[4:], None) 1024 | else: 1025 | hostname = getattr(instance, self.hostname_variable) 1026 | 1027 | # If we can't get a nice hostname, use the destination address 1028 | if not hostname: 1029 | hostname = dest 1030 | 1031 | hostname = self.to_safe(hostname).lower() 1032 | 1033 | # Add to index 1034 | self.index[hostname] = [region, instance.id] 1035 | 1036 | # Inventory: Group by instance ID (always a group of 1) 1037 | if self.group_by_instance_id: 1038 | self.inventory[instance.id] = [hostname] 1039 | if self.nested_groups: 1040 | self.push_group(self.inventory, 'instances', instance.id) 1041 | 1042 | # Inventory: Group by region 1043 | if self.group_by_region: 1044 | self.push(self.inventory, region, hostname) 1045 | if self.nested_groups: 1046 | self.push_group(self.inventory, 'regions', region) 1047 | 1048 | # Inventory: Group by availability zone 1049 | if self.group_by_availability_zone: 1050 | self.push(self.inventory, instance.availability_zone, hostname) 1051 | if self.nested_groups: 1052 | if self.group_by_region: 1053 | self.push_group(self.inventory, region, instance.availability_zone) 1054 | self.push_group(self.inventory, 'zones', instance.availability_zone) 1055 | 1056 | # Inventory: Group by instance type 1057 | if self.group_by_instance_type: 1058 | type_name = self.to_safe('type_' + instance.instance_class) 1059 | self.push(self.inventory, type_name, hostname) 1060 | if self.nested_groups: 1061 | self.push_group(self.inventory, 'types', type_name) 1062 | 1063 | # Inventory: Group by VPC 1064 | if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: 1065 | vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) 1066 | self.push(self.inventory, vpc_id_name, hostname) 1067 | if self.nested_groups: 1068 | self.push_group(self.inventory, 'vpcs', vpc_id_name) 1069 | 1070 | # Inventory: Group by security group 1071 | if self.group_by_security_group: 1072 | try: 1073 | if instance.security_group: 1074 | key = self.to_safe("security_group_" + instance.security_group.name) 1075 | self.push(self.inventory, key, hostname) 1076 | if self.nested_groups: 1077 | self.push_group(self.inventory, 'security_groups', key) 1078 | 1079 | except AttributeError: 1080 | self.fail_with_error('\n'.join(['Package boto seems a bit older.', 1081 | 'Please upgrade boto >= 2.3.0.'])) 1082 | 1083 | # Inventory: Group by engine 1084 | if self.group_by_rds_engine: 1085 | self.push(self.inventory, self.to_safe("rds_" + instance.engine), hostname) 1086 | if self.nested_groups: 1087 | self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) 1088 | 1089 | # Inventory: Group by parameter group 1090 | if self.group_by_rds_parameter_group: 1091 | self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), hostname) 1092 | if self.nested_groups: 1093 | self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) 1094 | 1095 | # Global Tag: all RDS instances 1096 | self.push(self.inventory, 'rds', hostname) 1097 | 1098 | self.inventory["_meta"]["hostvars"][hostname] = self.get_host_info_dict_from_instance(instance) 1099 | self.inventory["_meta"]["hostvars"][hostname]['ansible_host'] = dest 1100 | 1101 | def add_elasticache_cluster(self, cluster, region): 1102 | ''' Adds an ElastiCache cluster to the inventory and index, as long as 1103 | it's nodes are addressable ''' 1104 | 1105 | # Only want available clusters unless all_elasticache_clusters is True 1106 | if not self.all_elasticache_clusters and cluster['CacheClusterStatus'] != 'available': 1107 | return 1108 | 1109 | # Select the best destination address 1110 | if 'ConfigurationEndpoint' in cluster and cluster['ConfigurationEndpoint']: 1111 | # Memcached cluster 1112 | dest = cluster['ConfigurationEndpoint']['Address'] 1113 | is_redis = False 1114 | else: 1115 | # Redis sigle node cluster 1116 | # Because all Redis clusters are single nodes, we'll merge the 1117 | # info from the cluster with info about the node 1118 | dest = cluster['CacheNodes'][0]['Endpoint']['Address'] 1119 | is_redis = True 1120 | 1121 | if not dest: 1122 | # Skip clusters we cannot address (e.g. private VPC subnet) 1123 | return 1124 | 1125 | # Add to index 1126 | self.index[dest] = [region, cluster['CacheClusterId']] 1127 | 1128 | # Inventory: Group by instance ID (always a group of 1) 1129 | if self.group_by_instance_id: 1130 | self.inventory[cluster['CacheClusterId']] = [dest] 1131 | if self.nested_groups: 1132 | self.push_group(self.inventory, 'instances', cluster['CacheClusterId']) 1133 | 1134 | # Inventory: Group by region 1135 | if self.group_by_region and not is_redis: 1136 | self.push(self.inventory, region, dest) 1137 | if self.nested_groups: 1138 | self.push_group(self.inventory, 'regions', region) 1139 | 1140 | # Inventory: Group by availability zone 1141 | if self.group_by_availability_zone and not is_redis: 1142 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) 1143 | if self.nested_groups: 1144 | if self.group_by_region: 1145 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) 1146 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) 1147 | 1148 | # Inventory: Group by node type 1149 | if self.group_by_instance_type and not is_redis: 1150 | type_name = self.to_safe('type_' + cluster['CacheNodeType']) 1151 | self.push(self.inventory, type_name, dest) 1152 | if self.nested_groups: 1153 | self.push_group(self.inventory, 'types', type_name) 1154 | 1155 | # Inventory: Group by VPC (information not available in the current 1156 | # AWS API version for ElastiCache) 1157 | 1158 | # Inventory: Group by security group 1159 | if self.group_by_security_group and not is_redis: 1160 | 1161 | # Check for the existence of the 'SecurityGroups' key and also if 1162 | # this key has some value. When the cluster is not placed in a SG 1163 | # the query can return None here and cause an error. 1164 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: 1165 | for security_group in cluster['SecurityGroups']: 1166 | key = self.to_safe("security_group_" + security_group['SecurityGroupId']) 1167 | self.push(self.inventory, key, dest) 1168 | if self.nested_groups: 1169 | self.push_group(self.inventory, 'security_groups', key) 1170 | 1171 | # Inventory: Group by engine 1172 | if self.group_by_elasticache_engine and not is_redis: 1173 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) 1174 | if self.nested_groups: 1175 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe(cluster['Engine'])) 1176 | 1177 | # Inventory: Group by parameter group 1178 | if self.group_by_elasticache_parameter_group: 1179 | self.push(self.inventory, self.to_safe("elasticache_parameter_group_" + cluster['CacheParameterGroup']['CacheParameterGroupName']), dest) 1180 | if self.nested_groups: 1181 | self.push_group(self.inventory, 'elasticache_parameter_groups', self.to_safe(cluster['CacheParameterGroup']['CacheParameterGroupName'])) 1182 | 1183 | # Inventory: Group by replication group 1184 | if self.group_by_elasticache_replication_group and 'ReplicationGroupId' in cluster and cluster['ReplicationGroupId']: 1185 | self.push(self.inventory, self.to_safe("elasticache_replication_group_" + cluster['ReplicationGroupId']), dest) 1186 | if self.nested_groups: 1187 | self.push_group(self.inventory, 'elasticache_replication_groups', self.to_safe(cluster['ReplicationGroupId'])) 1188 | 1189 | # Global Tag: all ElastiCache clusters 1190 | self.push(self.inventory, 'elasticache_clusters', cluster['CacheClusterId']) 1191 | 1192 | host_info = self.get_host_info_dict_from_describe_dict(cluster) 1193 | 1194 | self.inventory["_meta"]["hostvars"][dest] = host_info 1195 | 1196 | # Add the nodes 1197 | for node in cluster['CacheNodes']: 1198 | self.add_elasticache_node(node, cluster, region) 1199 | 1200 | def add_elasticache_node(self, node, cluster, region): 1201 | ''' Adds an ElastiCache node to the inventory and index, as long as 1202 | it is addressable ''' 1203 | 1204 | # Only want available nodes unless all_elasticache_nodes is True 1205 | if not self.all_elasticache_nodes and node['CacheNodeStatus'] != 'available': 1206 | return 1207 | 1208 | # Select the best destination address 1209 | dest = node['Endpoint']['Address'] 1210 | 1211 | if not dest: 1212 | # Skip nodes we cannot address (e.g. private VPC subnet) 1213 | return 1214 | 1215 | node_id = self.to_safe(cluster['CacheClusterId'] + '_' + node['CacheNodeId']) 1216 | 1217 | # Add to index 1218 | self.index[dest] = [region, node_id] 1219 | 1220 | # Inventory: Group by node ID (always a group of 1) 1221 | if self.group_by_instance_id: 1222 | self.inventory[node_id] = [dest] 1223 | if self.nested_groups: 1224 | self.push_group(self.inventory, 'instances', node_id) 1225 | 1226 | # Inventory: Group by region 1227 | if self.group_by_region: 1228 | self.push(self.inventory, region, dest) 1229 | if self.nested_groups: 1230 | self.push_group(self.inventory, 'regions', region) 1231 | 1232 | # Inventory: Group by availability zone 1233 | if self.group_by_availability_zone: 1234 | self.push(self.inventory, cluster['PreferredAvailabilityZone'], dest) 1235 | if self.nested_groups: 1236 | if self.group_by_region: 1237 | self.push_group(self.inventory, region, cluster['PreferredAvailabilityZone']) 1238 | self.push_group(self.inventory, 'zones', cluster['PreferredAvailabilityZone']) 1239 | 1240 | # Inventory: Group by node type 1241 | if self.group_by_instance_type: 1242 | type_name = self.to_safe('type_' + cluster['CacheNodeType']) 1243 | self.push(self.inventory, type_name, dest) 1244 | if self.nested_groups: 1245 | self.push_group(self.inventory, 'types', type_name) 1246 | 1247 | # Inventory: Group by VPC (information not available in the current 1248 | # AWS API version for ElastiCache) 1249 | 1250 | # Inventory: Group by security group 1251 | if self.group_by_security_group: 1252 | 1253 | # Check for the existence of the 'SecurityGroups' key and also if 1254 | # this key has some value. When the cluster is not placed in a SG 1255 | # the query can return None here and cause an error. 1256 | if 'SecurityGroups' in cluster and cluster['SecurityGroups'] is not None: 1257 | for security_group in cluster['SecurityGroups']: 1258 | key = self.to_safe("security_group_" + security_group['SecurityGroupId']) 1259 | self.push(self.inventory, key, dest) 1260 | if self.nested_groups: 1261 | self.push_group(self.inventory, 'security_groups', key) 1262 | 1263 | # Inventory: Group by engine 1264 | if self.group_by_elasticache_engine: 1265 | self.push(self.inventory, self.to_safe("elasticache_" + cluster['Engine']), dest) 1266 | if self.nested_groups: 1267 | self.push_group(self.inventory, 'elasticache_engines', self.to_safe("elasticache_" + cluster['Engine'])) 1268 | 1269 | # Inventory: Group by parameter group (done at cluster level) 1270 | 1271 | # Inventory: Group by replication group (done at cluster level) 1272 | 1273 | # Inventory: Group by ElastiCache Cluster 1274 | if self.group_by_elasticache_cluster: 1275 | self.push(self.inventory, self.to_safe("elasticache_cluster_" + cluster['CacheClusterId']), dest) 1276 | 1277 | # Global Tag: all ElastiCache nodes 1278 | self.push(self.inventory, 'elasticache_nodes', dest) 1279 | 1280 | host_info = self.get_host_info_dict_from_describe_dict(node) 1281 | 1282 | if dest in self.inventory["_meta"]["hostvars"]: 1283 | self.inventory["_meta"]["hostvars"][dest].update(host_info) 1284 | else: 1285 | self.inventory["_meta"]["hostvars"][dest] = host_info 1286 | 1287 | def add_elasticache_replication_group(self, replication_group, region): 1288 | ''' Adds an ElastiCache replication group to the inventory and index ''' 1289 | 1290 | # Only want available clusters unless all_elasticache_replication_groups is True 1291 | if not self.all_elasticache_replication_groups and replication_group['Status'] != 'available': 1292 | return 1293 | 1294 | # Skip clusters we cannot address (e.g. private VPC subnet or clustered redis) 1295 | if replication_group['NodeGroups'][0]['PrimaryEndpoint'] is None or \ 1296 | replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] is None: 1297 | return 1298 | 1299 | # Select the best destination address (PrimaryEndpoint) 1300 | dest = replication_group['NodeGroups'][0]['PrimaryEndpoint']['Address'] 1301 | 1302 | # Add to index 1303 | self.index[dest] = [region, replication_group['ReplicationGroupId']] 1304 | 1305 | # Inventory: Group by ID (always a group of 1) 1306 | if self.group_by_instance_id: 1307 | self.inventory[replication_group['ReplicationGroupId']] = [dest] 1308 | if self.nested_groups: 1309 | self.push_group(self.inventory, 'instances', replication_group['ReplicationGroupId']) 1310 | 1311 | # Inventory: Group by region 1312 | if self.group_by_region: 1313 | self.push(self.inventory, region, dest) 1314 | if self.nested_groups: 1315 | self.push_group(self.inventory, 'regions', region) 1316 | 1317 | # Inventory: Group by availability zone (doesn't apply to replication groups) 1318 | 1319 | # Inventory: Group by node type (doesn't apply to replication groups) 1320 | 1321 | # Inventory: Group by VPC (information not available in the current 1322 | # AWS API version for replication groups 1323 | 1324 | # Inventory: Group by security group (doesn't apply to replication groups) 1325 | # Check this value in cluster level 1326 | 1327 | # Inventory: Group by engine (replication groups are always Redis) 1328 | if self.group_by_elasticache_engine: 1329 | self.push(self.inventory, 'elasticache_redis', dest) 1330 | if self.nested_groups: 1331 | self.push_group(self.inventory, 'elasticache_engines', 'redis') 1332 | 1333 | # Global Tag: all ElastiCache clusters 1334 | self.push(self.inventory, 'elasticache_replication_groups', replication_group['ReplicationGroupId']) 1335 | 1336 | host_info = self.get_host_info_dict_from_describe_dict(replication_group) 1337 | 1338 | self.inventory["_meta"]["hostvars"][dest] = host_info 1339 | 1340 | def get_route53_records(self): 1341 | ''' Get and store the map of resource records to domain names that 1342 | point to them. ''' 1343 | 1344 | if self.boto_profile: 1345 | r53_conn = route53.Route53Connection(profile_name=self.boto_profile) 1346 | else: 1347 | r53_conn = route53.Route53Connection() 1348 | all_zones = r53_conn.get_zones() 1349 | 1350 | route53_zones = [zone for zone in all_zones if zone.name[:-1] not in self.route53_excluded_zones] 1351 | 1352 | self.route53_records = {} 1353 | 1354 | for zone in route53_zones: 1355 | rrsets = r53_conn.get_all_rrsets(zone.id) 1356 | 1357 | for record_set in rrsets: 1358 | record_name = record_set.name 1359 | 1360 | if record_name.endswith('.'): 1361 | record_name = record_name[:-1] 1362 | 1363 | for resource in record_set.resource_records: 1364 | self.route53_records.setdefault(resource, set()) 1365 | self.route53_records[resource].add(record_name) 1366 | 1367 | def get_instance_route53_names(self, instance): 1368 | ''' Check if an instance is referenced in the records we have from 1369 | Route53. If it is, return the list of domain names pointing to said 1370 | instance. If nothing points to it, return an empty list. ''' 1371 | 1372 | instance_attributes = ['public_dns_name', 'private_dns_name', 1373 | 'ip_address', 'private_ip_address'] 1374 | 1375 | name_list = set() 1376 | 1377 | for attrib in instance_attributes: 1378 | try: 1379 | value = getattr(instance, attrib) 1380 | except AttributeError: 1381 | continue 1382 | 1383 | if value in self.route53_records: 1384 | name_list.update(self.route53_records[value]) 1385 | 1386 | return list(name_list) 1387 | 1388 | def get_host_info_dict_from_instance(self, instance): 1389 | instance_vars = {} 1390 | for key in vars(instance): 1391 | value = getattr(instance, key) 1392 | key = self.to_safe('ec2_' + key) 1393 | 1394 | # Handle complex types 1395 | # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 1396 | if key == 'ec2__state': 1397 | instance_vars['ec2_state'] = instance.state or '' 1398 | instance_vars['ec2_state_code'] = instance.state_code 1399 | elif key == 'ec2__previous_state': 1400 | instance_vars['ec2_previous_state'] = instance.previous_state or '' 1401 | instance_vars['ec2_previous_state_code'] = instance.previous_state_code 1402 | elif isinstance(value, (int, bool)): 1403 | instance_vars[key] = value 1404 | elif isinstance(value, six.string_types): 1405 | instance_vars[key] = value.strip() 1406 | elif value is None: 1407 | instance_vars[key] = '' 1408 | elif key == 'ec2_region': 1409 | instance_vars[key] = value.name 1410 | elif key == 'ec2__placement': 1411 | instance_vars['ec2_placement'] = value.zone 1412 | elif key == 'ec2_tags': 1413 | for k, v in value.items(): 1414 | if self.expand_csv_tags and ',' in v: 1415 | v = list(map(lambda x: x.strip(), v.split(','))) 1416 | key = self.to_safe('ec2_tag_' + k) 1417 | instance_vars[key] = v 1418 | elif key == 'ec2_groups': 1419 | group_ids = [] 1420 | group_names = [] 1421 | for group in value: 1422 | group_ids.append(group.id) 1423 | group_names.append(group.name) 1424 | instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) 1425 | instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) 1426 | elif key == 'ec2_block_device_mapping': 1427 | instance_vars["ec2_block_devices"] = {} 1428 | for k, v in value.items(): 1429 | instance_vars["ec2_block_devices"][os.path.basename(k)] = v.volume_id 1430 | else: 1431 | pass 1432 | # TODO Product codes if someone finds them useful 1433 | # print key 1434 | # print type(value) 1435 | # print value 1436 | 1437 | instance_vars[self.to_safe('ec2_account_id')] = self.aws_account_id 1438 | 1439 | return instance_vars 1440 | 1441 | def get_host_info_dict_from_describe_dict(self, describe_dict): 1442 | ''' Parses the dictionary returned by the API call into a flat list 1443 | of parameters. This method should be used only when 'describe' is 1444 | used directly because Boto doesn't provide specific classes. ''' 1445 | 1446 | # I really don't agree with prefixing everything with 'ec2' 1447 | # because EC2, RDS and ElastiCache are different services. 1448 | # I'm just following the pattern used until now to not break any 1449 | # compatibility. 1450 | 1451 | host_info = {} 1452 | for key in describe_dict: 1453 | value = describe_dict[key] 1454 | key = self.to_safe('ec2_' + self.uncammelize(key)) 1455 | 1456 | # Handle complex types 1457 | 1458 | # Target: Memcached Cache Clusters 1459 | if key == 'ec2_configuration_endpoint' and value: 1460 | host_info['ec2_configuration_endpoint_address'] = value['Address'] 1461 | host_info['ec2_configuration_endpoint_port'] = value['Port'] 1462 | 1463 | # Target: Cache Nodes and Redis Cache Clusters (single node) 1464 | if key == 'ec2_endpoint' and value: 1465 | host_info['ec2_endpoint_address'] = value['Address'] 1466 | host_info['ec2_endpoint_port'] = value['Port'] 1467 | 1468 | # Target: Redis Replication Groups 1469 | if key == 'ec2_node_groups' and value: 1470 | host_info['ec2_endpoint_address'] = value[0]['PrimaryEndpoint']['Address'] 1471 | host_info['ec2_endpoint_port'] = value[0]['PrimaryEndpoint']['Port'] 1472 | replica_count = 0 1473 | for node in value[0]['NodeGroupMembers']: 1474 | if node['CurrentRole'] == 'primary': 1475 | host_info['ec2_primary_cluster_address'] = node['ReadEndpoint']['Address'] 1476 | host_info['ec2_primary_cluster_port'] = node['ReadEndpoint']['Port'] 1477 | host_info['ec2_primary_cluster_id'] = node['CacheClusterId'] 1478 | elif node['CurrentRole'] == 'replica': 1479 | host_info['ec2_replica_cluster_address_' + str(replica_count)] = node['ReadEndpoint']['Address'] 1480 | host_info['ec2_replica_cluster_port_' + str(replica_count)] = node['ReadEndpoint']['Port'] 1481 | host_info['ec2_replica_cluster_id_' + str(replica_count)] = node['CacheClusterId'] 1482 | replica_count += 1 1483 | 1484 | # Target: Redis Replication Groups 1485 | if key == 'ec2_member_clusters' and value: 1486 | host_info['ec2_member_clusters'] = ','.join([str(i) for i in value]) 1487 | 1488 | # Target: All Cache Clusters 1489 | elif key == 'ec2_cache_parameter_group': 1490 | host_info["ec2_cache_node_ids_to_reboot"] = ','.join([str(i) for i in value['CacheNodeIdsToReboot']]) 1491 | host_info['ec2_cache_parameter_group_name'] = value['CacheParameterGroupName'] 1492 | host_info['ec2_cache_parameter_apply_status'] = value['ParameterApplyStatus'] 1493 | 1494 | # Target: Almost everything 1495 | elif key == 'ec2_security_groups': 1496 | 1497 | # Skip if SecurityGroups is None 1498 | # (it is possible to have the key defined but no value in it). 1499 | if value is not None: 1500 | sg_ids = [] 1501 | for sg in value: 1502 | sg_ids.append(sg['SecurityGroupId']) 1503 | host_info["ec2_security_group_ids"] = ','.join([str(i) for i in sg_ids]) 1504 | 1505 | # Target: Everything 1506 | # Preserve booleans and integers 1507 | elif isinstance(value, (int, bool)): 1508 | host_info[key] = value 1509 | 1510 | # Target: Everything 1511 | # Sanitize string values 1512 | elif isinstance(value, six.string_types): 1513 | host_info[key] = value.strip() 1514 | 1515 | # Target: Everything 1516 | # Replace None by an empty string 1517 | elif value is None: 1518 | host_info[key] = '' 1519 | 1520 | else: 1521 | # Remove non-processed complex types 1522 | pass 1523 | 1524 | return host_info 1525 | 1526 | def get_host_info(self): 1527 | ''' Get variables about a specific host ''' 1528 | 1529 | if len(self.index) == 0: 1530 | # Need to load index from cache 1531 | self.load_index_from_cache() 1532 | 1533 | if self.args.host not in self.index: 1534 | # try updating the cache 1535 | self.do_api_calls_update_cache() 1536 | if self.args.host not in self.index: 1537 | # host might not exist anymore 1538 | return self.json_format_dict({}, True) 1539 | 1540 | (region, instance_id) = self.index[self.args.host] 1541 | 1542 | instance = self.get_instance(region, instance_id) 1543 | return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) 1544 | 1545 | def push(self, my_dict, key, element): 1546 | ''' Push an element onto an array that may not have been defined in 1547 | the dict ''' 1548 | group_info = my_dict.setdefault(key, []) 1549 | if isinstance(group_info, dict): 1550 | host_list = group_info.setdefault('hosts', []) 1551 | host_list.append(element) 1552 | else: 1553 | group_info.append(element) 1554 | 1555 | def push_group(self, my_dict, key, element): 1556 | ''' Push a group as a child of another group. ''' 1557 | parent_group = my_dict.setdefault(key, {}) 1558 | if not isinstance(parent_group, dict): 1559 | parent_group = my_dict[key] = {'hosts': parent_group} 1560 | child_groups = parent_group.setdefault('children', []) 1561 | if element not in child_groups: 1562 | child_groups.append(element) 1563 | 1564 | def get_inventory_from_cache(self): 1565 | ''' Reads the inventory from the cache file and returns it as a JSON 1566 | object ''' 1567 | 1568 | with open(self.cache_path_cache, 'r') as f: 1569 | json_inventory = f.read() 1570 | return json_inventory 1571 | 1572 | def load_index_from_cache(self): 1573 | ''' Reads the index from the cache file sets self.index ''' 1574 | 1575 | with open(self.cache_path_index, 'rb') as f: 1576 | self.index = json.load(f) 1577 | 1578 | def write_to_cache(self, data, filename): 1579 | ''' Writes data in JSON format to a file ''' 1580 | 1581 | json_data = self.json_format_dict(data, True) 1582 | with open(filename, 'w') as f: 1583 | f.write(json_data) 1584 | 1585 | def uncammelize(self, key): 1586 | temp = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', key) 1587 | return re.sub('([a-z0-9])([A-Z])', r'\1_\2', temp).lower() 1588 | 1589 | def to_safe(self, word): 1590 | ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' 1591 | regex = "[^A-Za-z0-9\_" 1592 | if not self.replace_dash_in_groups: 1593 | regex += "\-" 1594 | return re.sub(regex + "]", "_", word) 1595 | 1596 | def json_format_dict(self, data, pretty=False): 1597 | ''' Converts a dict to a JSON object and dumps it as a formatted 1598 | string ''' 1599 | 1600 | if pretty: 1601 | return json.dumps(data, sort_keys=True, indent=2) 1602 | else: 1603 | return json.dumps(data) 1604 | 1605 | 1606 | if __name__ == '__main__': 1607 | # Run the script 1608 | Ec2Inventory() --------------------------------------------------------------------------------