├── .gitignore ├── LICENSE ├── Pipfile ├── Pipfile.lock ├── README.md ├── ansible.cfg ├── aurora.yml ├── elasticsearch.yml ├── group_vars ├── all.yml ├── aurora.cluster.yml ├── elasticsearch.yml └── project.ansibled.yml ├── host_vars ├── big.elasticsearch.ansibled.yml ├── cluster.aurora.ansibled.yml ├── dba.aurora.ansibled.yml ├── dbb.aurora.ansibled.yml ├── small.elasticsearch.ansibled.yml └── vpc.ansibled.yml ├── hosts.inventory ├── tasks ├── aurora │ ├── files │ │ ├── create-db-cluster.json.j2 │ │ └── create-db-instance.json.j2 │ ├── setup.cluster.yml │ ├── setup.db.yml │ ├── setup.parameter-groups.yml │ └── setup.subnet-group.yml ├── elasticsearch │ ├── files │ │ ├── create-elasticsearch-domain.json.j2 │ │ └── elasticsearch-access-policy.json.j2 │ ├── setup.cluster.yml │ ├── setup.role.yml │ └── setup.yml ├── logentries │ ├── log.create.yml │ ├── log.lookup.yml │ └── main.yml └── vpc │ ├── facts.yml │ ├── setup.gateways.yml │ └── setup.vpc.yml └── vpc.yml /.gitignore: -------------------------------------------------------------------------------- 1 | # ansible 2 | *.retry 3 | *.vault 4 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2018 tomwwright 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Pipfile: -------------------------------------------------------------------------------- 1 | [[source]] 2 | 3 | url = "https://pypi.python.org/simple" 4 | verify_ssl = true 5 | name = "pypi" 6 | 7 | 8 | [dev-packages] 9 | 10 | 11 | 12 | [packages] 13 | 14 | ansible = "==2.4.3.0" 15 | awscli = "==1.14.58" 16 | "boto3" = "==1.6.11" 17 | boto = "==2.48.0" 18 | 19 | 20 | [requires] 21 | 22 | python_version = "2.7" 23 | -------------------------------------------------------------------------------- /Pipfile.lock: -------------------------------------------------------------------------------- 1 | { 2 | "_meta": { 3 | "hash": { 4 | "sha256": "8e469844e923ea9233bda09f73121adf1ed7c42b484bc9c4cfdf2ce3d6305d90" 5 | }, 6 | "pipfile-spec": 6, 7 | "requires": { 8 | "python_version": "2.7" 9 | }, 10 | "sources": [ 11 | { 12 | "name": "pypi", 13 | "url": "https://pypi.python.org/simple", 14 | "verify_ssl": true 15 | } 16 | ] 17 | }, 18 | "default": { 19 | "ansible": { 20 | "hashes": [ 21 | "sha256:0e98b3a56928d03979d5f8e7ae5d8e326939111b298729b03f00b3ad8f998a3d" 22 | ], 23 | "index": "pypi", 24 | "version": "==2.4.3.0" 25 | }, 26 | "asn1crypto": { 27 | "hashes": [ 28 | "sha256:2f1adbb7546ed199e3c90ef23ec95c5cf3585bac7d11fb7eb562a3fe89c64e87", 29 | "sha256:9d5c20441baf0cb60a4ac34cc447c6c189024b6b4c6cd7877034f4965c464e49" 30 | ], 31 | "version": "==0.24.0" 32 | }, 33 | "awscli": { 34 | "hashes": [ 35 | "sha256:5753c4b1ef06cd27d3a683af4da199b6ac0243316577e51f18bf16e7e19b5136", 36 | "sha256:6b06744dc01d464fdf001a2185ab05dbe20bdc79ad7bef91ac113d9717979206" 37 | ], 38 | "index": "pypi", 39 | "version": "==1.14.58" 40 | }, 41 | "bcrypt": { 42 | "hashes": [ 43 | "sha256:01477981abf74e306e8ee31629a940a5e9138de000c6b0898f7f850461c4a0a5", 44 | "sha256:054d6e0acaea429e6da3613fcd12d05ee29a531794d96f6ab959f29a39f33391", 45 | "sha256:0872eeecdf9a429c1420158500eedb323a132bc5bf3339475151c52414729e70", 46 | "sha256:09a3b8c258b815eadb611bad04ca15ec77d86aa9ce56070e1af0d5932f17642a", 47 | "sha256:0f317e4ffbdd15c3c0f8ab5fbd86aa9aabc7bea18b5cc5951b456fe39e9f738c", 48 | "sha256:2788c32673a2ad0062bea850ab73cffc0dba874db10d7a3682b6f2f280553f20", 49 | "sha256:321d4d48be25b8d77594d8324c0585c80ae91ac214f62db9098734e5e7fb280f", 50 | "sha256:346d6f84ff0b493dbc90c6b77136df83e81f903f0b95525ee80e5e6d5e4eef84", 51 | "sha256:34dd60b90b0f6de94a89e71fcd19913a30e83091c8468d0923a93a0cccbfbbff", 52 | "sha256:3b4c23300c4eded8895442c003ae9b14328ae69309ac5867e7530de8bdd7875d", 53 | "sha256:43d1960e7db14042319c46925892d5fa99b08ff21d57482e6f5328a1aca03588", 54 | "sha256:49e96267cd9be55a349fd74f9852eb9ae2c427cd7f6455d0f1765d7332292832", 55 | "sha256:67ed1a374c9155ec0840214ce804616de49c3df9c5bc66740687c1c9b1cd9e8d", 56 | "sha256:6efd9ca20aefbaf2e7e6817a2c6ed4a50ff6900fafdea1bcb1d0e9471743b144", 57 | "sha256:8569844a5d8e1fdde4d7712a05ab2e6061343ac34af6e7e3d7935b2bd1907bfd", 58 | "sha256:8629ea6a8a59f865add1d6a87464c3c676e60101b8d16ef404d0a031424a8491", 59 | "sha256:988cac675e25133d01a78f2286189c1f01974470817a33eaf4cfee573cfb72a5", 60 | "sha256:9a6fedda73aba1568962f7543a1f586051c54febbc74e87769bad6a4b8587c39", 61 | "sha256:9eced8962ce3b7124fe20fd358cf8c7470706437fa064b9874f849ad4c5866fc", 62 | "sha256:a005ed6163490988711ff732386b08effcbf8df62ae93dd1e5bda0714fad8afb", 63 | "sha256:ae35dbcb6b011af6c840893b32399252d81ff57d52c13e12422e16b5fea1d0fb", 64 | "sha256:b1e8491c6740f21b37cca77bc64677696a3fb9f32360794d57fa8477b7329eda", 65 | "sha256:c906bdb482162e9ef48eea9f8c0d967acceb5c84f2d25574c7d2a58d04861df1", 66 | "sha256:cb18ffdc861dbb244f14be32c47ab69604d0aca415bee53485fcea4f8e93d5ef", 67 | "sha256:d86da365dda59010ba0d1ac45aa78390f56bf7f992e65f70b3b081d5e5257b09", 68 | "sha256:e22f0997622e1ceec834fd25947dc2ee2962c2133ea693d61805bc867abaf7ea", 69 | "sha256:f2fe545d27a619a552396533cddf70d83cecd880a611cdfdbb87ca6aec52f66b", 70 | "sha256:f7fd3ed3745fe6e81e28dc3b3d76cce31525a91f32a387e1febd6b982caf8cdb", 71 | "sha256:f9210820ee4818d84658ed7df16a7f30c9fba7d8b139959950acef91745cc0f7" 72 | ], 73 | "version": "==3.1.4" 74 | }, 75 | "boto": { 76 | "hashes": [ 77 | "sha256:13be844158d1bd80a94c972c806ec8381b9ea72035aa06123c5db6bc6a6f3ead", 78 | "sha256:deb8925b734b109679e3de65856018996338758f4b916ff4fe7bb62b6d7000d1" 79 | ], 80 | "index": "pypi", 81 | "version": "==2.48.0" 82 | }, 83 | "boto3": { 84 | "hashes": [ 85 | "sha256:474e1333f17bb2e361555c9f98c5db57581de32f68713bfb1f8bbbb647580ecc", 86 | "sha256:8cac0e97b9c7bf78a933644f2c801f049c5acce8f2a0105104811fab3829712e" 87 | ], 88 | "index": "pypi", 89 | "version": "==1.6.11" 90 | }, 91 | "botocore": { 92 | "hashes": [ 93 | "sha256:38471b40c32eb20cbac8ac13b85d82d2a313f06a6a89e26fbe72160d81cb3d66", 94 | "sha256:5a79b44c6fa1f3befae37519add0b02307cd3425308ff8fd6793fe7a349f0c54" 95 | ], 96 | "version": "==1.9.11" 97 | }, 98 | "cffi": { 99 | "hashes": [ 100 | "sha256:151b7eefd035c56b2b2e1eb9963c90c6302dc15fbd8c1c0a83a163ff2c7d7743", 101 | "sha256:1553d1e99f035ace1c0544050622b7bc963374a00c467edafac50ad7bd276aef", 102 | "sha256:1b0493c091a1898f1136e3f4f991a784437fac3673780ff9de3bcf46c80b6b50", 103 | "sha256:2ba8a45822b7aee805ab49abfe7eec16b90587f7f26df20c71dd89e45a97076f", 104 | "sha256:3c85641778460581c42924384f5e68076d724ceac0f267d66c757f7535069c93", 105 | "sha256:3eb6434197633b7748cea30bf0ba9f66727cdce45117a712b29a443943733257", 106 | "sha256:4c91af6e967c2015729d3e69c2e51d92f9898c330d6a851bf8f121236f3defd3", 107 | "sha256:770f3782b31f50b68627e22f91cb182c48c47c02eb405fd689472aa7b7aa16dc", 108 | "sha256:79f9b6f7c46ae1f8ded75f68cf8ad50e5729ed4d590c74840471fc2823457d04", 109 | "sha256:7a33145e04d44ce95bcd71e522b478d282ad0eafaf34fe1ec5bbd73e662f22b6", 110 | "sha256:857959354ae3a6fa3da6651b966d13b0a8bed6bbc87a0de7b38a549db1d2a359", 111 | "sha256:87f37fe5130574ff76c17cab61e7d2538a16f843bb7bca8ebbc4b12de3078596", 112 | "sha256:95d5251e4b5ca00061f9d9f3d6fe537247e145a8524ae9fd30a2f8fbce993b5b", 113 | "sha256:9d1d3e63a4afdc29bd76ce6aa9d58c771cd1599fbba8cf5057e7860b203710dd", 114 | "sha256:a36c5c154f9d42ec176e6e620cb0dd275744aa1d804786a71ac37dc3661a5e95", 115 | "sha256:ae5e35a2c189d397b91034642cb0eab0e346f776ec2eb44a49a459e6615d6e2e", 116 | "sha256:b0f7d4a3df8f06cf49f9f121bead236e328074de6449866515cea4907bbc63d6", 117 | "sha256:b75110fb114fa366b29a027d0c9be3709579602ae111ff61674d28c93606acca", 118 | "sha256:ba5e697569f84b13640c9e193170e89c13c6244c24400fc57e88724ef610cd31", 119 | "sha256:be2a9b390f77fd7676d80bc3cdc4f8edb940d8c198ed2d8c0be1319018c778e1", 120 | "sha256:d5d8555d9bfc3f02385c1c37e9f998e2011f0db4f90e250e5bc0c0a85a813085", 121 | "sha256:e55e22ac0a30023426564b1059b035973ec82186ddddbac867078435801c7801", 122 | "sha256:e90f17980e6ab0f3c2f3730e56d1fe9bcba1891eeea58966e89d352492cc74f4", 123 | "sha256:ecbb7b01409e9b782df5ded849c178a0aa7c906cf8c5a67368047daab282b184", 124 | "sha256:ed01918d545a38998bfa5902c7c00e0fee90e957ce036a4000a88e3fe2264917", 125 | "sha256:edabd457cd23a02965166026fd9bfd196f4324fe6032e866d0f3bd0301cd486f", 126 | "sha256:fdf1c1dc5bafc32bc5d08b054f94d659422b05aba244d6be4ddc1c72d9aa70fb" 127 | ], 128 | "version": "==1.11.5" 129 | }, 130 | "colorama": { 131 | "hashes": [ 132 | "sha256:a4c0f5bc358a62849653471e309dcc991223cf86abafbec17cd8f41327279e89", 133 | "sha256:e043c8d32527607223652021ff648fbb394d5e19cba9f1a698670b338c9d782b", 134 | "sha256:f4945bf52ae49da0728fe730a33c18744803752fc948f154f29dc0c4f9f2f9cc" 135 | ], 136 | "version": "==0.3.7" 137 | }, 138 | "cryptography": { 139 | "hashes": [ 140 | "sha256:0222f19fa29c609b4be4bc260db6ab9bfabca1b2626ebf97875cca21ac60d968", 141 | "sha256:064e820797b6992104041e74a32f912b4e4279da4e7821daa31b580de1fa910c", 142 | "sha256:0e426fcb6e6f9100b3e3373458888cc6deb5934e6c4a26996ad720de35bce276", 143 | "sha256:0f11c46e22bb4c2f6811ae408fb72e262116e864cf1e75d9503bd6a5ced04fb4", 144 | "sha256:252185cbe85c057796458f365425d45d7cd7f748ca53dbc906359a22e156cfd2", 145 | "sha256:2ec7cc10a65b6ea9efd46e9c6f247e01c707c92074d2ba0be5c2641defe858f7", 146 | "sha256:30427c764aa0fcc6983af78bcbd540e10a87a094cbe428006329c6666ce00db8", 147 | "sha256:527c096af06aa0620d3d361b17e6d314e9d4800ce53c2ad841d9fe5a82488acd", 148 | "sha256:8c0f5b4001fcaf742f9d74b483249d4675de2f837146baf8f2e4a7999993fcb4", 149 | "sha256:a0d0f1a7aebeb9a4145ee09a4667a7510caf97bd127c4b5d6332d013050a7567", 150 | "sha256:aead0332e00ae18045f3d4a8eea3891be095aa5bb3a74ea0affa49fe80c40ecd", 151 | "sha256:b323325ea2dcacfdf3ff8f82a1069ab9e65353cc433625c4ebe54ed70ced4137", 152 | "sha256:cc5a53061d65bc8f80b08645b32c814071630e763a897b0db72fbb0e170fc93f", 153 | "sha256:ee37235d837c9b6bdd921d396017b65df67c4c16befc1772be5266304fdaf427", 154 | "sha256:f1d2d8e808523bac32737d167f3b7370429a9e575d156e887779310e57e41b5d", 155 | "sha256:f5a0279e362c37e2150a32fe35ec20226e9237b6c9927fce8d53ef8e49e64f48", 156 | "sha256:f76e27b5a57337352b59b79a342264b9a8557dc11174e6ec222d0b5e266b132f" 157 | ], 158 | "version": "==2.2" 159 | }, 160 | "docutils": { 161 | "hashes": [ 162 | "sha256:02aec4bd92ab067f6ff27a38a38a41173bf01bed8f89157768c1573f53e474a6", 163 | "sha256:51e64ef2ebfb29cae1faa133b3710143496eca21c530f3f71424d77687764274", 164 | "sha256:7a4bd47eaf6596e1295ecb11361139febe29b084a87bf005bf899f9a42edc3c6" 165 | ], 166 | "version": "==0.14" 167 | }, 168 | "enum34": { 169 | "hashes": [ 170 | "sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850", 171 | "sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a", 172 | "sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79", 173 | "sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1" 174 | ], 175 | "markers": "python_version < '3'", 176 | "version": "==1.1.6" 177 | }, 178 | "futures": { 179 | "hashes": [ 180 | "sha256:9ec02aa7d674acb8618afb127e27fde7fc68994c0437ad759fa094a574adb265", 181 | "sha256:ec0a6cb848cc212002b9828c3e34c675e0c9ff6741dc445cab6fdd4e1085d1f1" 182 | ], 183 | "markers": "python_version == '2.6' or python_version == '2.7'", 184 | "version": "==3.2.0" 185 | }, 186 | "idna": { 187 | "hashes": [ 188 | "sha256:2c6a5de3089009e3da7c5dde64a141dbc8551d5b7f6cf4ed7c2568d0cc520a8f", 189 | "sha256:8c7309c718f94b3a625cb648ace320157ad16ff131ae0af362c9f21b80ef6ec4" 190 | ], 191 | "version": "==2.6" 192 | }, 193 | "ipaddress": { 194 | "hashes": [ 195 | "sha256:200d8686011d470b5e4de207d803445deee427455cd0cb7c982b68cf82524f81" 196 | ], 197 | "markers": "python_version < '3'", 198 | "version": "==1.0.19" 199 | }, 200 | "jinja2": { 201 | "hashes": [ 202 | "sha256:74c935a1b8bb9a3947c50a54766a969d4846290e1e788ea44c1392163723c3bd", 203 | "sha256:f84be1bb0040caca4cea721fcbbbbd61f9be9464ca236387158b0feea01914a4" 204 | ], 205 | "version": "==2.10" 206 | }, 207 | "jmespath": { 208 | "hashes": [ 209 | "sha256:6a81d4c9aa62caf061cb517b4d9ad1dd300374cd4706997aff9cd6aedd61fc64", 210 | "sha256:f11b4461f425740a1d908e9a3f7365c3d2e569f6ca68a2ff8bc5bcd9676edd63" 211 | ], 212 | "version": "==0.9.3" 213 | }, 214 | "markupsafe": { 215 | "hashes": [ 216 | "sha256:a6be69091dac236ea9c6bc7d012beab42010fa914c459791d627dad4910eb665" 217 | ], 218 | "version": "==1.0" 219 | }, 220 | "paramiko": { 221 | "hashes": [ 222 | "sha256:24fb31c947de85fbdeca09e222d41206781581fb0bdf118d2ef18f6e414cd388", 223 | "sha256:33e36775a6c71790ba7692a73f948b329cf9295a72b0102144b031114bd2a4f3" 224 | ], 225 | "version": "==2.4.1" 226 | }, 227 | "pyasn1": { 228 | "hashes": [ 229 | "sha256:0d7f6e959fe53f3960a23d73f35e1fce61348b30915b6664309ca756de7c1f89", 230 | "sha256:5a0db897b311d265cde49615cf783f1c78613138605cdd0f907ecfa5b2aba3ee", 231 | "sha256:758cb50abddc03e4563fd9e7f03db56e3e87b58c0bd01247360326e5c0c7ffa5", 232 | "sha256:7d626683e3d792cccc608da02498aff37ab4f3dafd8905d6bf755d11f9b26b43", 233 | "sha256:a7efe807c4b83a859e2735c692b92ed7b567cfddc4163763412920041d876c2b", 234 | "sha256:b5a9ca48055b9a20f6d1b3d68e38692e5431c86a0f99ea602e61294e891fee5b", 235 | "sha256:c07d6e587b2f928366b1f67c09bda026a3e6fcc99e80a744dc67f8fca3895626", 236 | "sha256:d258b0a71994f7770599835249cece1caef3c70def868c4915e6e5ca49b67d15", 237 | "sha256:d5cd6ed995dba16fad0c521cfe31cd2d68400b53fcc2bce93326829be73ab6d1", 238 | "sha256:d84c2aea3cf43780e9e6a19f4e4dddee9f6976519020e64e47c57e5c7a8c3dd2", 239 | "sha256:e85895087905c65b5b594eb91f7522664c85545b147d5f4d4e7b1b07da8dcbdc", 240 | "sha256:f81c96761fca60d64b1c9b79ec2e40cf9495a745cf570613079ef324aeb9672b" 241 | ], 242 | "version": "==0.4.2" 243 | }, 244 | "pycparser": { 245 | "hashes": [ 246 | "sha256:99a8ca03e29851d96616ad0404b4aad7d9ee16f25c9f9708a11faf2810f7b226" 247 | ], 248 | "version": "==2.18" 249 | }, 250 | "pynacl": { 251 | "hashes": [ 252 | "sha256:04e30e5bdeeb2d5b34107f28cd2f5bbfdc6c616f3be88fc6f53582ff1669eeca", 253 | "sha256:0bfa0d94d2be6874e40f896e0a67e290749151e7de767c5aefbad1121cad7512", 254 | "sha256:11aa4e141b2456ce5cecc19c130e970793fa3a2c2e6fbb8ad65b28f35aa9e6b6", 255 | "sha256:13bdc1fe084ff9ac7653ae5a924cae03bf4bb07c6667c9eb5b6eb3c570220776", 256 | "sha256:14339dc233e7a9dda80a3800e64e7ff89d0878ba23360eea24f1af1b13772cac", 257 | "sha256:1d33e775fab3f383167afb20b9927aaf4961b953d76eeb271a5703a6d756b65b", 258 | "sha256:2a42b2399d0428619e58dac7734838102d35f6dcdee149e0088823629bf99fbb", 259 | "sha256:2dce05ac8b3c37b9e2f65eab56c544885607394753e9613fd159d5e2045c2d98", 260 | "sha256:6453b0dae593163ffc6db6f9c9c1597d35c650598e2c39c0590d1757207a1ac2", 261 | "sha256:73a5a96fb5fbf2215beee2353a128d382dbca83f5341f0d3c750877a236569ef", 262 | "sha256:8abb4ef79161a5f58848b30ab6fb98d8c466da21fdd65558ce1d7afc02c70b5f", 263 | "sha256:8ac1167195b32a8755de06efd5b2d2fe76fc864517dab66aaf65662cc59e1988", 264 | "sha256:8f505f42f659012794414fa57c498404e64db78f1d98dfd40e318c569f3c783b", 265 | "sha256:be71cd5fce04061e1f3d39597f93619c80cdd3558a6c9ba99a546f144a8d8101", 266 | "sha256:cf6877124ae6a0698404e169b3ba534542cfbc43f939d46b927d956daf0a373a", 267 | "sha256:d0eb5b2795b7ee2cbcfcadacbe95a13afbda048a262bd369da9904fecb568975", 268 | "sha256:d795f506bcc9463efb5ebb0f65ed77921dcc9e0a50499dedd89f208445de9ecb", 269 | "sha256:d8aaf7e5d6b0e0ef7d6dbf7abeb75085713d0100b4eb1a4e4e857de76d77ac45", 270 | "sha256:e0d38fa0a75f65f556fb912f2c6790d1fa29b7dd27a1d9cc5591b281321eaaa9", 271 | "sha256:eb2acabbd487a46b38540a819ef67e477a674481f84a82a7ba2234b9ba46f752", 272 | "sha256:eeee629828d0eb4f6d98ac41e9a3a6461d114d1d0aa111a8931c049359298da0", 273 | "sha256:f5ce9e26d25eb0b2d96f3ef0ad70e1d3ae89b5d60255c462252a3e456a48c053", 274 | "sha256:fabf73d5d0286f9e078774f3435601d2735c94ce9e514ac4fb945701edead7e4" 275 | ], 276 | "version": "==1.2.1" 277 | }, 278 | "python-dateutil": { 279 | "hashes": [ 280 | "sha256:891c38b2a02f5bb1be3e4793866c8df49c7d19baabf9c1bad62547e0b4866aca", 281 | "sha256:95511bae634d69bc7329ba55e646499a842bc4ec342ad54a8cdb65645a0aad3c" 282 | ], 283 | "version": "==2.6.1" 284 | }, 285 | "pyyaml": { 286 | "hashes": [ 287 | "sha256:0c507b7f74b3d2dd4d1322ec8a94794927305ab4cebbe89cc47fe5e81541e6e8", 288 | "sha256:16b20e970597e051997d90dc2cddc713a2876c47e3d92d59ee198700c5427736", 289 | "sha256:3262c96a1ca437e7e4763e2843746588a965426550f3797a79fca9c6199c431f", 290 | "sha256:326420cbb492172dec84b0f65c80942de6cedb5233c413dd824483989c000608", 291 | "sha256:4474f8ea030b5127225b8894d626bb66c01cda098d47a2b0d3429b6700af9fd8", 292 | "sha256:592766c6303207a20efc445587778322d7f73b161bd994f227adaa341ba212ab", 293 | "sha256:5ac82e411044fb129bae5cfbeb3ba626acb2af31a8d17d175004b70862a741a7", 294 | "sha256:5f84523c076ad14ff5e6c037fe1c89a7f73a3e04cf0377cb4d017014976433f3", 295 | "sha256:827dc04b8fa7d07c44de11fabbc888e627fa8293b695e0f99cb544fdfa1bf0d1", 296 | "sha256:b4c423ab23291d3945ac61346feeb9a0dc4184999ede5e7c43e1ffb975130ae6", 297 | "sha256:bc6bced57f826ca7cb5125a10b23fd0f2fff3b7c4701d64c439a300ce665fff8", 298 | "sha256:c01b880ec30b5a6e6aa67b09a2fe3fb30473008c85cd6a67359a1b15ed6d83a4", 299 | "sha256:ca233c64c6e40eaa6c66ef97058cdc80e8d0157a443655baa1b2966e812807ca", 300 | "sha256:e863072cdf4c72eebf179342c94e6989c67185842d9997960b3e69290b2fa269" 301 | ], 302 | "version": "==3.12" 303 | }, 304 | "rsa": { 305 | "hashes": [ 306 | "sha256:25df4e10c263fb88b5ace923dd84bf9aa7f5019687b5e55382ffcdb8bede9db5", 307 | "sha256:43f682fea81c452c98d09fc316aae12de6d30c4b5c84226642cf8f8fd1c93abd" 308 | ], 309 | "version": "==3.4.2" 310 | }, 311 | "s3transfer": { 312 | "hashes": [ 313 | "sha256:90dc18e028989c609146e241ea153250be451e05ecc0c2832565231dacdf59c1", 314 | "sha256:c7a9ec356982d5e9ab2d4b46391a7d6a950e2b04c472419f5fdec70cc0ada72f" 315 | ], 316 | "version": "==0.1.13" 317 | }, 318 | "six": { 319 | "hashes": [ 320 | "sha256:70e8a77beed4562e7f14fe23a786b54f6296e34344c23bc42f07b15018ff98e9", 321 | "sha256:832dc0e10feb1aa2c68dcc57dbb658f1c7e65b9b61af69048abc87a2db00a0eb" 322 | ], 323 | "version": "==1.11.0" 324 | } 325 | }, 326 | "develop": {} 327 | } 328 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansibled 2 | This repository supports the "Automation with Ansible" series by Tom Wright on Medium. 3 | 4 | https://medium.com/@tomwwright 5 | 6 | ### Articles 7 | 8 | - [Automation with Ansible: Introduction](https://medium.com/@tomwwright/automation-with-ansible-introduction-ccfa1baf8f5c) 9 | - [Automation with Ansible: Building a VPC](https://medium.com/@tomwwright/automating-with-ansible-building-a-vpc-c252944d3d2e) 10 | - [Automation with Ansible: AWS Elasticsearch](https://medium.com/@tomwwright/automation-with-ansible-aws-elasticsearch-service-8d862cdb4a68) 11 | - [Automation with Ansible: Aurora RDS Clusters](https://medium.com/@tomwwright/automating-with-ansible-aurora-clusters-7272364777dd) 12 | - [Automation with Ansible: Logentries](https://medium.com/@tomwwright/automation-with-ansible-logentries-53cf595c2002) 13 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | stdout_callback = debug 3 | vault_password_file = ~/ansibled.vault 4 | inventory = hosts.inventory 5 | -------------------------------------------------------------------------------- /aurora.yml: -------------------------------------------------------------------------------- 1 | # aurora.yml 2 | # --- 3 | # playbook that builds Aurora clusters and Aurora DB instances 4 | 5 | # first run a play for any cluster hosts to create them 6 | - hosts: aurora.cluster 7 | environment: 8 | AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" 9 | AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" 10 | AWS_REGION: "{{ aws_region }}" 11 | tasks: 12 | 13 | - import_tasks: tasks/vpc/facts.yml 14 | - import_tasks: tasks/aurora/setup.subnet-group.yml 15 | - import_tasks: tasks/aurora/setup.parameter-groups.yml 16 | - import_tasks: tasks/aurora/setup.cluster.yml 17 | 18 | # then run a play for all aurora.db hosts, to add them to the clusters 19 | - hosts: aurora.db 20 | environment: 21 | AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" 22 | AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" 23 | AWS_REGION: "{{ aws_region }}" 24 | tasks: 25 | 26 | - import_tasks: tasks/aurora/setup.db.yml 27 | -------------------------------------------------------------------------------- /elasticsearch.yml: -------------------------------------------------------------------------------- 1 | # elasticsearch.yml 2 | # --- 3 | # playbook that builds elasticsearch clusters: run our facts tasks for the VPC 4 | # to define some necessary details, then run the setup tasks 5 | 6 | - hosts: elasticsearch 7 | environment: 8 | AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" 9 | AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" 10 | AWS_REGION: "{{ aws_region }}" 11 | tasks: 12 | 13 | # discover required facts about our VPC before building Elasticsearch 14 | - import_tasks: tasks/vpc/facts.yml 15 | 16 | # build Elasticsearch domain 17 | - import_tasks: tasks/elasticsearch/setup.yml 18 | -------------------------------------------------------------------------------- /group_vars/all.yml: -------------------------------------------------------------------------------- 1 | # group_vars/all.yml 2 | 3 | # specify to run Ansible for hosts locally by default, not over SSH 4 | ansible_connection: local 5 | -------------------------------------------------------------------------------- /group_vars/aurora.cluster.yml: -------------------------------------------------------------------------------- 1 | # group_vars/aurora.cluster.yml 2 | # --- 3 | # default variables for hosts in the group for Aurora clusters 4 | 5 | aurora_cluster_port: 3306 6 | 7 | aurora_cluster_security_group_ids: 8 | - "{{ vpc_security_group_ids['vpc'] }}" 9 | 10 | aurora_cluster_availability_zones: 11 | - "{{ aws_region }}a" 12 | - "{{ aws_region }}b" 13 | - "{{ aws_region }}c" 14 | 15 | aurora_cluster_subnet_ids: 16 | - "{{ vpc_subnet_ids['private-a'] }}" 17 | - "{{ vpc_subnet_ids['private-b'] }}" 18 | - "{{ vpc_subnet_ids['private-c'] }}" 19 | -------------------------------------------------------------------------------- /group_vars/elasticsearch.yml: -------------------------------------------------------------------------------- 1 | # group_vars/elasticsearch.yml 2 | # --- 3 | # specify defaults for our Elasticsearch clusters 4 | 5 | elasticsearch_version: 6.0 6 | elasticsearch_instance_type: r4.large 7 | elasticsearch_instance_count: 3 8 | elasticsearch_dedicated_masters_enabled: false 9 | -------------------------------------------------------------------------------- /group_vars/project.ansibled.yml: -------------------------------------------------------------------------------- 1 | # group_vars/project.ansibled.yml 2 | # --- 3 | # specify VPC details and AWS credentials 4 | 5 | # general details about our VPC 6 | vpc_name: ansibled-vpc 7 | vpc_key: ansibled-key 8 | vpc_dns_zone: ansibled 9 | vpc_use_nat: false 10 | 11 | # credentials for AWS (no, they aren't real...) 12 | aws_region: ap-southeast-2 13 | aws_account_id: !vault | 14 | $ANSIBLE_VAULT;1.1;AES256 15 | 36643034613434663432363131393632346165393638316236326232316230623738616263393334 16 | 6535363832303734343436323364633836386566353030370a323931623765393462383562646334 17 | 61386432306263303435366431363261323162303531623436323732373663663236633630373936 18 | 3138356535306363350a653734663066313366383132346638613464353861316430613239346664 19 | 6337 20 | aws_access_key: !vault | 21 | $ANSIBLE_VAULT;1.1;AES256 22 | 32356430353165383564323734366432386331363030306361663935646439633332353438636137 23 | 3533633666623339326435373864373236663430663365300a373563353639623634663961633863 24 | 63386463623963623331613962346630666237646661626332333162336334303537353139653030 25 | 6436643339343235620a656438633238343031323733646132316232353665626365636133633535 26 | 3032 27 | aws_secret_key: !vault | 28 | $ANSIBLE_VAULT;1.1;AES256 29 | 32356430353165383564323734366432386331363030306361663935646439633332353438636137 30 | 3533633666623339326435373864373236663430663365300a373563353639623634663961633863 31 | 63386463623963623331613962346630666237646661626332333162336334303537353139653030 32 | 6436643339343235620a656438633238343031323733646132316232353665626365636133633535 33 | 3032 34 | 35 | # Logentries REST API key -- read/write 36 | logentries_api_key: !vault | 37 | $ANSIBLE_VAULT;1.1;AES256 38 | 62336430323434343665653736376339333661356535336132326634353934356634666338316333 39 | 6264376536343862303962383638303037366634343137350a346364663835663964333136366330 40 | 37333564373232393966616164643539373534393761303332306438353562663434636466353431 41 | 3430373764336439330a386339636234323566653436393136313762356537343637326239356139 42 | 6332 43 | -------------------------------------------------------------------------------- /host_vars/big.elasticsearch.ansibled.yml: -------------------------------------------------------------------------------- 1 | # host_vars/big.elasticsearch.ansibled.yml 2 | 3 | elasticsearch_name: ansibled-big-cluster 4 | elasticsearch_domain_name: big.elasticsearch.ansibled 5 | elasticsearch_ebs_size_gb: 80 6 | elasticsearch_instance_count: 4 7 | elasticsearch_instance_type: r4.xlarge.elasticsearch 8 | elasticsearch_dedicated_masters_enabled: true 9 | elasticsearch_dedicated_masters_count: 2 10 | elasticsearch_dedicated_masters_type: m4.large.elasticsearch 11 | elasticsearch_domain_name: big.elasticsearch.ansibled 12 | -------------------------------------------------------------------------------- /host_vars/cluster.aurora.ansibled.yml: -------------------------------------------------------------------------------- 1 | # host_vars/cluster.aurora.ansibled.yml 2 | # --- 3 | # specific variables for the "auroradb.ansibled" host 4 | 5 | aurora_cluster_name: ansibled-aurora-cluster 6 | aurora_cluster_dns: auroradb.{{ vpc_dns_zone }} 7 | 8 | # credentials for the master user of our db cluster -- password (ansibled-db-password) encrypted with Ansible Vault 9 | aurora_cluster_username: ansibled_db_user 10 | aurora_cluster_password: !vault | 11 | $ANSIBLE_VAULT;1.1;AES256 12 | 63313164666335343165616234663938376539393163336233336535623831383135393731393938 13 | 6561346464306536303161303261363334303566616663390a336636623566333737363830316335 14 | 30636161396231646130353939373236353833313164353462646535646432623939343937346438 15 | 3635303438343963660a323534613431643465316630353735663437333430393232316636313364 16 | 36383330363532363963666432313262623134366465333532623865356139376161 17 | -------------------------------------------------------------------------------- /host_vars/dba.aurora.ansibled.yml: -------------------------------------------------------------------------------- 1 | # host_vars/dba.aurora.ansibled.yml 2 | # --- 3 | # specific configuration for one of our Aurora DBs 4 | 5 | aurora_cluster_name: ansibled-aurora-cluster 6 | aurora_db_name: ansibled-aurora-a 7 | 8 | aurora_instance_type: db.t2.small 9 | aurora_availability_zone: "{{ aws_region }}a" 10 | aurora_promotion_tier: 1 11 | -------------------------------------------------------------------------------- /host_vars/dbb.aurora.ansibled.yml: -------------------------------------------------------------------------------- 1 | # host_vars/dbb.aurora.ansibled.yml 2 | # --- 3 | # specific configuration for one of our Aurora DBs 4 | 5 | aurora_cluster_name: ansibled-aurora-cluster 6 | aurora_db_name: ansibled-aurora-b 7 | 8 | aurora_instance_type: db.t2.small 9 | aurora_availability_zone: "{{ aws_region }}b" 10 | aurora_promotion_tier: 2 11 | -------------------------------------------------------------------------------- /host_vars/small.elasticsearch.ansibled.yml: -------------------------------------------------------------------------------- 1 | # host_vars/small.elasticsearch.ansibled.yml 2 | 3 | elasticsearch_name: ansibled-small-cluster 4 | elasticsearch_ebs_size_gb: 20 5 | elasticsearch_instance_count: 2 6 | elasticsearch_instance_type: t2.medium.elasticsearch 7 | -------------------------------------------------------------------------------- /host_vars/vpc.ansibled.yml: -------------------------------------------------------------------------------- 1 | # host_vars/vpc.ansibled.yml 2 | 3 | vpc_cidr_block: 10.0.0.0/16 4 | 5 | vpc_subnets: 6 | private-a: 7 | cidr: 10.0.1.0/24 8 | az: "{{ aws_region }}a" 9 | public-a: 10 | cidr: 10.0.2.0/24 11 | az: "{{ aws_region }}a" 12 | private-b: 13 | cidr: 10.0.3.0/24 14 | az: "{{ aws_region }}b" 15 | public-b: 16 | cidr: 10.0.4.0/24 17 | az: "{{ aws_region }}b" 18 | private-c: 19 | cidr: 10.0.5.0/24 20 | az: "{{ aws_region }}c" 21 | public-c: 22 | cidr: 10.0.6.0/24 23 | az: "{{ aws_region }}c" 24 | 25 | vpc_security_groups: 26 | - name: vpc 27 | description: "Allow internal traffic in the VPC" 28 | rules: 29 | - proto: all 30 | group_name: vpc 31 | ports: all 32 | - name: allow-public-ssh 33 | description: "Allow public SSH" 34 | rules: 35 | - proto: tcp 36 | cidr_ip: 0.0.0.0/0 37 | ports: 38 | - 22 39 | - name: allow-public-http 40 | description: "Allow public web traffic" 41 | rules: 42 | - proto: tcp 43 | cidr_ip: 0.0.0.0/0 44 | ports: 45 | - 80 46 | - 8080 47 | - 443 48 | -------------------------------------------------------------------------------- /hosts.inventory: -------------------------------------------------------------------------------- 1 | # hosts.inventory 2 | # --- 3 | # the host and group list for this example "Ansibled" project 4 | 5 | [elasticsearch] 6 | big.elasticsearch.ansibled 7 | small.elasticsearch.ansibled 8 | 9 | [aurora.cluster] 10 | cluster.aurora.ansibled 11 | 12 | [aurora.db] 13 | dba.aurora.ansibled 14 | dbb.aurora.ansibled 15 | 16 | [vpc] 17 | vpc.ansibled 18 | 19 | [project.ansibled:children] 20 | vpc 21 | aurora.cluster 22 | aurora.db 23 | elasticsearch 24 | -------------------------------------------------------------------------------- /tasks/aurora/files/create-db-cluster.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "AvailabilityZones": [ 3 | "{{ aurora_cluster_availability_zones | join('", "') }}" 4 | ], 5 | "BackupRetentionPeriod": 7, 6 | "DBClusterIdentifier": "{{ aurora_cluster_name }}", 7 | "DBClusterParameterGroupName": "default-cluster-aurora-5-7", 8 | "VpcSecurityGroupIds": [ 9 | "{{ aurora_cluster_security_group_ids | join('", "') }}" 10 | ], 11 | "DBSubnetGroupName": "{{ aurora_cluster_name }}-subnets", 12 | "Engine": "aurora-mysql", 13 | "EngineVersion": "5.7.12", 14 | "Port": {{ aurora_cluster_port }}, 15 | "MasterUsername": "{{ aurora_cluster_username }}", 16 | "MasterUserPassword": "{{ aurora_cluster_password }}", 17 | "Tags": [], 18 | "StorageEncrypted": true 19 | } 20 | -------------------------------------------------------------------------------- /tasks/aurora/files/create-db-instance.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "DBInstanceIdentifier": "{{ aurora_db_name }}", 3 | "DBInstanceClass": "{{ aurora_instance_type }}", 4 | "Engine": "aurora-mysql", 5 | "AvailabilityZone": "{{ aurora_availability_zone }}", 6 | "DBSubnetGroupName": "{{ aurora_cluster_name }}-subnets", 7 | "PreferredMaintenanceWindow": "mon:11:00-mon:11:30", 8 | "MultiAZ": false, 9 | "AutoMinorVersionUpgrade": true, 10 | "LicenseModel": "general-public-license", 11 | "PubliclyAccessible": false, 12 | "Tags": [], 13 | "DBClusterIdentifier": "{{ aurora_cluster_name }}", 14 | "PromotionTier": {{ aurora_promotion_tier | default('1') }} 15 | } 16 | -------------------------------------------------------------------------------- /tasks/aurora/setup.cluster.yml: -------------------------------------------------------------------------------- 1 | # tasks/auroradb/setup.cluster.yml 2 | # --- 3 | # check if an Aurora DB cluster exists, and create it if it doesn't. Then update the VPC DNS 4 | 5 | # look for an existing Aurora DB cluster for this host using the AWS CLI 6 | - name: check for Aurora DB cluster 7 | command: aws rds describe-db-clusters --filters Name=db-cluster-id,Values={{ aurora_cluster_name }} --region {{ aws_region }} 8 | changed_when: false 9 | register: aurora_cluster_query 10 | 11 | - name: parse Aurora DB cluster query 12 | set_fact: 13 | aurora_cluster: "{{ aurora_cluster_query.stdout | from_json | json_query('DBClusters[0]')}}" 14 | 15 | # create the cluster if it doesn't exist -- passing config as JSON from a Jinja2 template 16 | - name: create Aurora DB cluster 17 | command: aws rds create-db-cluster --region {{ aws_region }} --cli-input-json '{{ lookup('template', 'files/create-db-cluster.json.j2') | to_json }}' 18 | when: aurora_cluster == '' 19 | register: aurora_cluster_create 20 | 21 | # parse JSON output of the AWS CLI create cluster command, if we ran it, to get our cluster details 22 | - name: parse Aurora DB cluster create 23 | set_fact: 24 | aurora_cluster: "{{ aurora_cluster_create.stdout | from_json | json_query('DBCluster') }}" 25 | when: aurora_cluster == '' 26 | 27 | - name: update VPC DNS for Aurora DB cluster 28 | route53: 29 | state: present 30 | zone: "{{ vpc_dns_zone }}" 31 | private_zone: true 32 | record: "{{ aurora_cluster_dns }}" 33 | type: CNAME 34 | overwrite: true 35 | ttl: 900 36 | value: "{{ aurora_cluster.Endpoint }}" 37 | -------------------------------------------------------------------------------- /tasks/aurora/setup.db.yml: -------------------------------------------------------------------------------- 1 | # look for an existing Aurora DB instance for this host using the AWS CLI 2 | - name: check for Aurora DB instance 3 | command: aws rds describe-db-instances --filters Name=db-instance-id,Values={{ aurora_db_name }} --region {{ aws_region }} 4 | changed_when: false 5 | register: aurora_instance_query 6 | 7 | - name: parse Aurora DB instance query 8 | set_fact: 9 | aurora_instance: "{{ aurora_instance_query.stdout | from_json | json_query('DBInstances[0]')}}" 10 | 11 | # create the instance if it doesn't exist -- passing config as JSON from a Jinja2 template 12 | - name: create Aurora DB instance 13 | command: aws rds create-db-instance --region {{ aws_region }} --cli-input-json '{{ lookup('template', 'files/create-db-instance.json.j2') | to_json }}' 14 | when: aurora_instance == '' 15 | register: aurora_instance_create 16 | 17 | - name: parse Aurora DB instance query 18 | set_fact: 19 | aurora_instance: "{{ aurora_instance_create.stdout | from_json | json_query('DBInstance')}}" 20 | when: aurora_instance == '' 21 | -------------------------------------------------------------------------------- /tasks/aurora/setup.parameter-groups.yml: -------------------------------------------------------------------------------- 1 | # tasks/aurora/setup.parameter-group.yml 2 | # --- 3 | # use the AWS CLI to create a (default) parameter group for Aurora clusters 4 | 5 | # list existing cluster parameter groups using the AWS CLI 6 | - name: check for DB cluster parameter group 7 | command: > 8 | aws rds describe-db-cluster-parameter-groups 9 | --no-paginate 10 | --region {{ aws_region }} 11 | changed_when: false 12 | register: db_cluster_parameter_group_query 13 | 14 | # search through existing parameter groups to see if our's already exists (idempotence!) 15 | - name: parse DB cluster parameter group query 16 | set_fact: 17 | db_cluster_parameter_group: "{{ db_cluster_parameter_group_query.stdout | from_json | json_query(query)}}" 18 | vars: 19 | query: DBClusterParameterGroups[?DBClusterParameterGroupName=='default-cluster-aurora-5-7'] | [0] 20 | 21 | # create the cluster paramter group with the AWS CLI if it doesn't already exist 22 | - name: create DB cluster parameter group 23 | command: > 24 | aws rds create-db-cluster-parameter-group 25 | --db-cluster-parameter-group-name default-cluster-aurora-5-7 26 | --db-parameter-group-family aurora-mysql5.7 27 | --description "Default Aurora MySQL 5.7 cluster parameter group created by Ansible" 28 | --region {{ aws_region }} 29 | when: db_cluster_parameter_group == '' 30 | -------------------------------------------------------------------------------- /tasks/aurora/setup.subnet-group.yml: -------------------------------------------------------------------------------- 1 | # tasks/aurora/setup.subnet-group.yml 2 | # --- 3 | # use an Ansible module to configure the subnet group for our cluster 4 | 5 | - name: configure DB subnet group 6 | rds_subnet_group: 7 | name: "{{ aurora_cluster_name }}-subnets" 8 | description: "Subnets used by {{ aurora_cluster_name }} Aurora cluster" 9 | subnets: "{{ aurora_cluster_subnet_ids }}" 10 | state: present 11 | -------------------------------------------------------------------------------- /tasks/elasticsearch/files/create-elasticsearch-domain.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "DomainName": "{{ elasticsearch_name }}", 3 | "ElasticsearchVersion": "{{ elasticsearch_version }}", 4 | "ElasticsearchClusterConfig": { 5 | "InstanceType": "{{ elasticsearch_instance_type }}", 6 | "InstanceCount": {{ elasticsearch_instance_count }}, 7 | "ZoneAwarenessEnabled": {{ (elasticsearch_instance_count == 1) | ternary('false', 'true') }}, 8 | "DedicatedMasterEnabled": {{ elasticsearch_dedicated_masters_enabled | lower }}, 9 | {% if elasticsearch_dedicated_masters_enabled %} 10 | "DedicatedMasterType": "{{ elasticsearch_dedicated_masters_type }}", 11 | "DedicatedMasterCount": {{ elasticsearch_dedicated_masters_count }}, 12 | {% endif %} 13 | }, 14 | "EBSOptions": { 15 | "EBSEnabled": true, 16 | "VolumeType": "gp2", 17 | "VolumeSize": {{ elasticsearch_ebs_size_gb }}, 18 | }, 19 | "AccessPolicies": "{{ lookup('template', 'files/elasticsearch-access-policy.json.j2') | to_json | regex_replace('\"', '\\\"') }}", 20 | "SnapshotOptions": { 21 | "AutomatedSnapshotStartHour": 14 22 | }, 23 | "VPCOptions": { 24 | "SubnetIds": [ 25 | "{{ vpc_subnet_ids['private-a'] }}", 26 | "{{ vpc_subnet_ids['private-b'] }}" 27 | ], 28 | "SecurityGroupIds": [ 29 | "{{ vpc_security_group_ids['vpc'] }}" 30 | ] 31 | } 32 | } 33 | -------------------------------------------------------------------------------- /tasks/elasticsearch/files/elasticsearch-access-policy.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "Version": "2012-10-17", 3 | "Statement": [ 4 | { 5 | "Effect": "Allow", 6 | "Principal": { 7 | "AWS": "{{ aws_account_id }}" 8 | }, 9 | "Action": "es:*", 10 | "Resource": "arn:aws:es:{{ aws_region }}:{{ aws_account_id }}:domain/{{ elasticsearch_name }}/*" 11 | } 12 | ] 13 | } 14 | -------------------------------------------------------------------------------- /tasks/elasticsearch/setup.cluster.yml: -------------------------------------------------------------------------------- 1 | # tasks/elasticsearch/setup.cluster.yml 2 | # --- 3 | # look up our Elasticsearch cluster, create it if necessary, wait for it to be 4 | # available, then update the DNS record for it in Route 53 5 | 6 | # use the AWS CLI to query for details about this cluster, if it exists 7 | - name: check for existing Elasticsearch cluster 8 | command: aws es describe-elasticsearch-domains --region {{ aws_region }} --domain-names {{ elasticsearch_name }} 9 | changed_when: false 10 | register: elasticsearch_cluster_query 11 | 12 | # handle the output from the CLI 13 | - name: parse Elasticsearch cluster query 14 | set_fact: 15 | elasticsearch_cluster: "{{ (elasticsearch_cluster_query.stdout | from_json).DomainStatusList[0] }}" 16 | when: (elasticsearch_cluster_query.stdout | from_json).DomainStatusList[0] is defined 17 | 18 | # if the cluster doesn't exist (`when` clause), call the CLI to create it -- passing an evaluated template as the configuration 19 | - name: create Elasticsearch cluster 20 | command: aws es create-elasticsearch-domain --region {{ aws_region }} --cli-input-json '{{ lookup('template', 'files/create-elasticsearch-domain.json.j2') | to_json }}' 21 | when: elasticsearch_cluster is not defined 22 | register: elasticsearch_cluster_create 23 | 24 | # poll the AWS CLI using `until` to wait until the output shows our Elasticsearch endpoint has appeared 25 | - name: wait for Elasticsearch endpoint to be availabile 26 | command: aws es describe-elasticsearch-domains --region {{ aws_region }} --domain-names {{ elasticsearch_name }} 27 | changed_when: false 28 | register: elasticsearch_cluster_query 29 | until: (elasticsearch_cluster_query.stdout | from_json).DomainStatusList[0].Endpoints is defined 30 | retries: 15 31 | delay: 60 32 | 33 | # handle the output from the CLI a bit 34 | - name: parse Elasticsearch cluster query 35 | set_fact: 36 | elasticsearch_cluster: "{{ (elasticsearch_cluster_query.stdout | from_json).DomainStatusList[0] }}" 37 | 38 | # use an Ansible module to update the Route 53 record for our Elasticsearch host 39 | - name: update VPC DNS for Elasticsearch cluster 40 | route53: 41 | state: present 42 | zone: "{{ vpc_dns_zone }}" 43 | private_zone: true 44 | record: "{{ elasticsearch_domain_name }}" 45 | type: CNAME 46 | overwrite: true 47 | ttl: 300 48 | value: "{{ elasticsearch_cluster.Endpoints.vpc }}" 49 | when: elasticsearch_cluster.Endpoints is defined and elasticsearch_domain_name is defined 50 | -------------------------------------------------------------------------------- /tasks/elasticsearch/setup.role.yml: -------------------------------------------------------------------------------- 1 | # tasks/elasticsearch/setup.role.yml 2 | # --- 3 | # create the service-linked IAM role used by AWS Elasticsearch Service 4 | 5 | # use the AWS CLI to retrieve a list of our IAM roles, store it in a variable 6 | # using `register` 7 | - name: list existing IAM roles 8 | command: aws iam list-roles --no-paginate 9 | changed_when: false 10 | register: list_iam_roles 11 | 12 | # convert the output from the CLI from JSON and look for a role with a known description 13 | - name: find service-linked IAM role for Elasticsearch 14 | set_fact: 15 | iam_role: "{{ list_iam_roles.stdout | from_json | json_query(query) }}" 16 | vars: 17 | query: Roles[?Description=='ansibled-es-role'] | [0] 18 | 19 | # if we couldn't find our role (`when` clause), call the AWS CLI to create it 20 | - name: create service-linked IAM role for Elasticsearch 21 | command: aws iam create-service-linked-role --region {{ aws_region }} --aws-service-name es.amazonaws.com --description ansibled-es-role 22 | when: iam_role == '' 23 | -------------------------------------------------------------------------------- /tasks/elasticsearch/setup.yml: -------------------------------------------------------------------------------- 1 | # tasks/elasticsearch/setup.yml 2 | # --- 3 | # entrypoint for our Elasticsearch setup tasks, simply ties together the task 4 | # lists for the role and the cluster 5 | 6 | - import_tasks: setup.role.yml 7 | - import_tasks: setup.cluster.yml 8 | -------------------------------------------------------------------------------- /tasks/logentries/log.create.yml: -------------------------------------------------------------------------------- 1 | # POST to Logentries and create the new log, note: 2 | # - provide our API key in the `x-api-key` header 3 | # - Logentries API responds with a 201, so that is a successful response (status_code) 4 | # - provide our Logset id so that our new log will be created in it 5 | - name: create Log in Logentries if it doesn't exist - {{ item }} 6 | uri: 7 | url: https://rest.logentries.com/management/logs 8 | headers: 9 | x-api-key: "{{ logentries_api_key }}" 10 | method: POST 11 | body_format: json 12 | body: 13 | log: 14 | name: "{{ item | lower }}" 15 | logs_info: [] 16 | user_data: {} 17 | source_type: token 18 | logsets_info: 19 | - id: "{{ logentries_logset.id }}" 20 | status_code: 200,201 21 | return_content: yes 22 | register: create_log_info 23 | 24 | # scrape the Logentries response for the new log token -- for use later 25 | - name: save the Log token (new log) 26 | set_fact: 27 | logentries_log_tokens: "{{ logentries_log_tokens | default({}) | combine({ item: create_log_info.json.log.tokens[0] }) }}" 28 | 29 | # update our Log id dictionary 30 | - name: save the Log id (new log) 31 | set_fact: 32 | logentries_log_ids: "{{ logentries_log_ids | default({}) | combine({ item: create_log_info.json.log.id }) }}" 33 | -------------------------------------------------------------------------------- /tasks/logentries/log.lookup.yml: -------------------------------------------------------------------------------- 1 | # GET from Logentries for existing log details, note: 2 | # - provide our API key in the `x-api-key` header 3 | # - Logentries API responds with a 201, so that is a successful response (status_code) 4 | - name: look up Log in Logentries if it does exist - {{ item }} 5 | uri: 6 | url: https://rest.logentries.com/management/logs/{{ logentries_log_ids[item] }} 7 | headers: 8 | x-api-key: "{{ logentries_api_key }}" 9 | method: GET 10 | return_content: yes 11 | register: lookup_log_info 12 | when: logentries_log_ids[item] is defined 13 | 14 | # scrape the Logentries response for the log token -- for use later 15 | - name: save the Log token (existing log) 16 | set_fact: 17 | logentries_log_tokens: "{{ logentries_log_tokens | default({}) | combine({ item: lookup_log_info.json.log.tokens[0] }) }}" 18 | when: logentries_log_ids[item] is defined 19 | -------------------------------------------------------------------------------- /tasks/logentries/main.yml: -------------------------------------------------------------------------------- 1 | # pull down the current state of our Logentries logs from the REST API 2 | - name: retrieve Logsets info from Logentries 3 | uri: 4 | url: https://rest.logentries.com/management/logsets 5 | headers: 6 | x-api-key: "{{ logentries_api_key }}" 7 | method: GET 8 | return_content: yes 9 | register: logentries_logsets_info 10 | 11 | # --- 12 | 13 | # use json_query to pick through the log sets and see if the Logset for this 14 | # host already exists 15 | - name: check if Logset for this host already exists 16 | set_fact: 17 | logentries_logset: "{{ logentries_logsets_info.json | json_query(query) }}" 18 | vars: 19 | query: logsets[?name=='{{ logentries_logset_name }}'] | [0] 20 | 21 | # if it doesn't exist (when condition), POST to the REST API and create the Logset 22 | - name: create the Logset if it doesn't exist 23 | uri: 24 | url: https://rest.logentries.com/management/logsets 25 | headers: 26 | x-api-key: "{{ logentries_api_key }}" 27 | method: POST 28 | body_format: json 29 | body: 30 | logset: 31 | name: "{{ logentries_logset_name }}" 32 | logs_info: [] 33 | user_data: {} 34 | status_code: 200,201 35 | return_content: yes 36 | register: new_logset_info 37 | when: logentries_logset == '' 38 | 39 | # just a little parsing on the request response 40 | - name: set new Logset info 41 | set_fact: 42 | logentries_logset: "{{ new_logset_info.json.logset }}" 43 | when: logentries_logset == '' 44 | 45 | # --- 46 | 47 | # define an empty container to use in our next step 48 | - set_fact: 49 | logentries_log_ids: {} 50 | 51 | # iterate each log in our logset info from Logentries and add it to a dictionary 52 | # to give us a dictionary of `name` -> `id` 53 | - name: extract existing Log IDs from Log Sets info 54 | set_fact: 55 | logentries_log_ids: "{{ logentries_log_ids | combine({ (item.name | default('default') | regex_replace('[ -]', '_') | upper ): item.id }) }}" 56 | with_items: "{{ logentries_logset.logs_info }}" 57 | 58 | # for each of our defined logs for this host, if the id is in our dictionary, just do a lookup 59 | - name: lookup existing log 60 | include_tasks: log.lookup.yml 61 | with_items: "{{ logentries_log_names }}" 62 | when: logentries_log_ids[item] is defined 63 | 64 | # for each of our defined logs for this host, if the id isn't in our dictionary, create it 65 | - name: create log 66 | include_tasks: log.create.yml 67 | with_items: "{{ logentries_log_names }}" 68 | when: logentries_log_ids[item] is not defined 69 | -------------------------------------------------------------------------------- /tasks/vpc/facts.yml: -------------------------------------------------------------------------------- 1 | # tasks/vpc/facts.yml 2 | # --- 3 | # sets facts for some important IDs and IPs of our VPC 4 | 5 | # find the VPC by name 6 | - name: VPC facts 7 | ec2_vpc_net_facts: 8 | filters: 9 | "tag:Name": "{{ vpc_name }}" 10 | register: vpc_facts 11 | 12 | # parse the facts output and extract the VPC ID 13 | - name: "set fact: VPC ID" 14 | set_fact: 15 | vpc_id: "{{ vpc_facts.vpcs[0].id }}" 16 | 17 | # find our subnets by VPC ID that was just defined 18 | - name: VPC subnet facts 19 | ec2_vpc_subnet_facts: 20 | filters: 21 | vpc-id: "{{ vpc_id }}" 22 | register: vpc_subnet_facts 23 | 24 | # parse the facts output and extract the IDs with some fancy filter work: 25 | # - iterate each subnet found by the facts query 26 | # - for that subnet, define a "name: id" entry in the `vpc_subnet_ids` dictionary (or empty dictionary if it doesn't exist) 27 | - name: "set facts: VPC subnet IDs" 28 | set_fact: 29 | vpc_subnet_ids: "{{ vpc_subnet_ids | default({}) | combine({ (item.tags.Name | default('default')): item.id }) }}" 30 | with_items: "{{ vpc_subnet_facts.subnets }}" 31 | 32 | # find our security groups by VPC ID 33 | - name: VPC security group facts 34 | ec2_group_facts: 35 | filters: 36 | vpc-id: "{{ vpc_id }}" 37 | register: vpc_security_group_facts 38 | 39 | # parse the facts output and extract the IDs with some fancy filter work: 40 | # - iterate each security group found by the facts query 41 | # - for that group, define a "name: id" entry in the `vpc_security_group_ids` dictionary (or empty dictionary if it doesn't exist) 42 | - name: "set facts: VPC security group IDs" 43 | set_fact: 44 | vpc_security_group_ids: "{{ vpc_security_group_ids | default({}) | combine({ (item.group_name | default('default')): item.group_id }) }}" 45 | with_items: "{{ vpc_security_group_facts.security_groups }}" 46 | 47 | # find our NAT gateway by VPC ID 48 | - name: VPC NAT gateway facts 49 | ec2_vpc_nat_gateway_facts: 50 | filters: 51 | vpc-id: "{{ vpc_id }}" 52 | register: vpc_nat_gateway_facts 53 | when: vpc_use_nat 54 | 55 | # parse the facts output and extract the NAT gateway IP 56 | - name: "set fact: VPC NAT gateway IP" 57 | set_fact: 58 | vpc_nat_gateway_ip: "{{ vpc_nat_gateway_facts.result[0].nat_gateway_addresses.public_ip }}" 59 | when: vpc_use_nat 60 | -------------------------------------------------------------------------------- /tasks/vpc/setup.gateways.yml: -------------------------------------------------------------------------------- 1 | # tasks/vpc/setup.gateways.yml 2 | # --- 3 | # creates the gateways for the VPC, and sets up routing for the subnets 4 | 5 | # create the internet gateway, saving the output to extract the ID later 6 | - name: create internet gateway 7 | ec2_vpc_igw: 8 | vpc_id: "{{ vpc_id }}" 9 | register: create_gateway 10 | 11 | # create the NAT gateway, looking up the subnet ID by the human readable name: "private-a" 12 | - name: create NAT gateway 13 | ec2_vpc_nat_gateway: 14 | subnet_id: "{{ vpc_subnet_ids['private-a'] }}" 15 | region: "{{ aws_region }}" 16 | wait: yes 17 | if_exist_do_not_create: true 18 | register: create_nat_gateway 19 | 20 | # parse the outputs of the Ansible modules for some important details referred to when setting up routing 21 | - name: "set facts: Gateway IDs and IP" 22 | set_fact: 23 | vpc_gateway_id: "{{ create_gateway.gateway_id }}" 24 | vpc_nat_gateway_id: "{{ create_nat_gateway.nat_gateway_id }}" 25 | vpc_nat_gateway_ip: "{{ create_nat_gateway.nat_gateway_addresses.public_ip }}" 26 | 27 | # update the VPCs DNS with the public IP of the new NAT gateway 28 | - name: update DNS with NAT gateway IP 29 | route53: 30 | zone: "{{ vpc_dns_zone }}" 31 | private_zone: yes 32 | record: nat.{{ vpc_dns_zone }} 33 | type: A 34 | value: "{{ vpc_nat_gateway_ip }}" 35 | 36 | # private route table that routes through the NAT -- attach it to our three private subnets 37 | - name: create route table for private subnets 38 | ec2_vpc_route_table: 39 | vpc_id: "{{ vpc_id }}" 40 | tags: 41 | Name: "{{ vpc_name }}-private" 42 | subnets: 43 | - "{{ vpc_subnet_ids['private-a'] }}" 44 | - "{{ vpc_subnet_ids['private-b'] }}" 45 | - "{{ vpc_subnet_ids['private-c'] }}" 46 | routes: 47 | - dest: 0.0.0.0/0 48 | gateway_id: "{{ vpc_nat_gateway_id }}" 49 | 50 | # public route table that routes through the internet gateway -- attach it to our three public subnets 51 | - name: create route table for public subnets 52 | ec2_vpc_route_table: 53 | vpc_id: "{{ vpc_id }}" 54 | tags: 55 | Name: "{{ vpc_name }}-public" 56 | subnets: 57 | - "{{ vpc_subnet_ids['public-a'] }}" 58 | - "{{ vpc_subnet_ids['public-b'] }}" 59 | - "{{ vpc_subnet_ids['public-c'] }}" 60 | routes: 61 | - dest: 0.0.0.0/0 62 | gateway_id: "{{ vpc_gateway_id }}" 63 | -------------------------------------------------------------------------------- /tasks/vpc/setup.vpc.yml: -------------------------------------------------------------------------------- 1 | # tasks/vpc/setup.vpc.yml 2 | # --- 3 | # creates a VPC, configures a list of defined subnets, configures a list of defined security groups 4 | 5 | # use the Ansible module to create our VPC, saving the output into `create_vpc` 6 | - name: create VPC 7 | ec2_vpc_net: 8 | name: "{{ vpc_name }}" 9 | cidr_block: "{{ vpc_cidr_block }}" 10 | region: "{{ aws_region }}" 11 | register: create_vpc 12 | 13 | # parse the output of creating the VPC to extract the VPC ID -- we need to specify this in the subsequent tasks 14 | - name: "set fact: VPC ID" 15 | set_fact: 16 | vpc_id: "{{ create_vpc.vpc.id }}" 17 | 18 | # iterate over our dictionary of subnets with `with_dict`, and create each one with the Ansible module 19 | - name: create VPC subnets 20 | ec2_vpc_subnet: 21 | vpc_id: "{{ vpc_id }}" 22 | cidr: "{{ item.value.cidr }}" 23 | az: "{{ item.value.az }}" 24 | tags: 25 | Name: "{{ item.key }}" 26 | with_dict: "{{ vpc_subnets }}" 27 | register: create_vpc_subnets 28 | 29 | # this is a tricky one, using some filters to: 30 | # - loop over the list of outputs from creating our subnets 31 | # - for that subnet, define a "name: id" entry in the `vpc_subnet_ids` dictionary (or empty dictionary if it doesn't exist) 32 | - name: "set fact: VPC subnet IDs" 33 | set_fact: 34 | vpc_subnet_ids: "{{ vpc_subnet_ids | default({}) | combine({ item.subnet.tags.Name: item.subnet.id }) }}" 35 | with_items: "{{ create_vpc_subnets.results }}" 36 | 37 | # iterate over our list of security groups and create each one with the Ansible module 38 | - name: create VPC security groups 39 | ec2_group: 40 | name: "{{ item.name }}" 41 | description: "{{ item.description }}" 42 | vpc_id: "{{ vpc_id }}" 43 | rules: "{{ item.rules }}" 44 | with_items: "{{ vpc_security_groups }}" 45 | 46 | # create an EC2 key -- pretty simple 47 | - name: create EC2 key 48 | ec2_key: 49 | name: "{{ vpc_key }}" 50 | 51 | # create a private hosted zone for the VPC with the Ansible module 52 | - name: create Route 53 private hosted zone 53 | route53_zone: 54 | zone: "{{ vpc_dns_zone }}" 55 | state: present 56 | vpc_id: "{{ vpc_id }}" 57 | vpc_region: "{{ aws_region }}" 58 | -------------------------------------------------------------------------------- /vpc.yml: -------------------------------------------------------------------------------- 1 | # vpc.yml 2 | # --- 3 | # playbook that runs our VPC tasks for any hosts in the `vpc` group, providing AWS credentials in the environment 4 | 5 | - hosts: vpc 6 | environment: 7 | AWS_ACCESS_KEY_ID: "{{ aws_access_key }}" 8 | AWS_SECRET_ACCESS_KEY: "{{ aws_secret_key }}" 9 | AWS_REGION: "{{ aws_region }}" 10 | tasks: 11 | 12 | - include_tasks: tasks/vpc/setup.vpc.yml 13 | - include_tasks: tasks/vpc/setup.gateways.yml 14 | - include_tasks: tasks/vpc/facts.yml 15 | --------------------------------------------------------------------------------