├── .gitignore ├── README.md ├── ansible ├── digital_ocean.ini ├── digital_ocean.py ├── docker.yml ├── roles │ ├── basic_server_setup │ │ ├── files │ │ │ └── sshd_config │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── newuser_sudoer │ ├── blog_site │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── s3cfg │ ├── docker │ │ └── tasks │ │ │ └── main.yml │ ├── docker_firewall │ │ └── tasks │ │ │ └── main.yml │ ├── docker_swarm │ │ └── tasks │ │ │ └── main.yml │ ├── reboot │ │ └── tasks │ │ │ └── main.yml │ └── swap │ │ └── tasks │ │ └── main.yml ├── server.yml ├── site_data │ ├── app_Dockerfile │ ├── app_default.conf │ ├── elasticsearch_Dockerfile │ ├── frontend_Dockerfile │ ├── frontend_default.conf │ ├── frontend_nginx.conf │ ├── logrotate_docker │ ├── mysql_Dockerfile │ ├── php_Dockerfile │ ├── site-backup │ ├── varnish_Dockerfile │ └── varnish_default.vcl ├── update.yml └── vars.yml ├── example.env ├── prod_build.sh ├── prod_update.sh ├── requirements.txt └── vagrant ├── Vagrantfile └── dev_up.sh /.gitignore: -------------------------------------------------------------------------------- 1 | venv/ 2 | .env 3 | .DS_Store 4 | wp_backup.sql 5 | ejosh/ 6 | *.key 7 | *.retry 8 | ejosh.co.crt 9 | zendfb/ 10 | .vagrant/ 11 | vagrant/*.log 12 | ejosh_site.bak.tar 13 | wp_backup.sql.tar 14 | ejosh_site.bak.tar.gz 15 | wp_backup.sql.tar.gz 16 | commands 17 | inventory 18 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Ansible and Docker 2 | This is the code I used to migrate my Wordpress blog to Docker. It has Ansible playbooks for setting up an Ubuntu server with Docker. It also includes the Dockerfiles for all the containers. 3 | -------------------------------------------------------------------------------- /ansible/digital_ocean.ini: -------------------------------------------------------------------------------- 1 | # Ansible DigitalOcean external inventory script settings 2 | # 3 | 4 | [digital_ocean] 5 | 6 | # The module needs your DigitalOcean Client ID and API Key. 7 | # These may also be specified on the command line via --client-id and --api-key 8 | # or via the environment variables DO_CLIENT_ID and DO_API_KEY 9 | # 10 | #these are in the environment, check Trello for values 11 | 12 | 13 | # API calls to DigitalOcean may be slow. For this reason, we cache the results 14 | # of an API call. Set this to the path you want cache files to be written to. 15 | # One file will be written to this directory: 16 | # - ansible-digital_ocean.cache 17 | # 18 | cache_path = /tmp 19 | 20 | 21 | # The number of seconds a cache file is considered valid. After this many 22 | # seconds, a new API call will be made, and the cache file will be updated. 23 | # 24 | cache_max_age = 300 25 | -------------------------------------------------------------------------------- /ansible/digital_ocean.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | ''' 4 | DigitalOcean external inventory script 5 | ====================================== 6 | 7 | Generates Ansible inventory of DigitalOcean Droplets. 8 | 9 | In addition to the --list and --host options used by Ansible, there are options 10 | for generating JSON of other DigitalOcean data. This is useful when creating 11 | droplets. For example, --regions will return all the DigitalOcean Regions. 12 | This information can also be easily found in the cache file, whose default 13 | location is /tmp/ansible-digital_ocean.cache). 14 | 15 | The --pretty (-p) option pretty-prints the output for better human readability. 16 | 17 | ---- 18 | Although the cache stores all the information received from DigitalOcean, 19 | the cache is not used for current droplet information (in --list, --host, 20 | --all, and --droplets). This is so that accurate droplet information is always 21 | found. You can force this script to use the cache with --force-cache. 22 | 23 | ---- 24 | Configuration is read from `digital_ocean.ini`, then from environment variables, 25 | then and command-line arguments. 26 | 27 | Most notably, the DigitalOcean Client ID and API Key must be specified. They 28 | can be specified in the INI file or with the following environment variables: 29 | export DO_CLIENT_ID='DO123' DO_API_KEY='abc123' 30 | 31 | Alternatively, they can be passed on the command-line with --client-id and 32 | --api-key. 33 | 34 | If you specify DigitalOcean credentials in the INI file, a handy way to 35 | get them into your environment (e.g., to use the digital_ocean module) 36 | is to use the output of the --env option with export: 37 | export $(digital_ocean.py --env) 38 | 39 | ---- 40 | The following groups are generated from --list: 41 | - ID (droplet ID) 42 | - NAME (droplet NAME) 43 | - image_ID 44 | - image_NAME 45 | - distro_NAME (distribution NAME from image) 46 | - region_ID 47 | - region_NAME 48 | - size_ID 49 | - size_NAME 50 | - status_STATUS 51 | 52 | When run against a specific host, this script returns the following variables: 53 | - do_created_at 54 | - do_distroy 55 | - do_id 56 | - do_image 57 | - do_image_id 58 | - do_ip_address 59 | - do_name 60 | - do_region 61 | - do_region_id 62 | - do_size 63 | - do_size_id 64 | - do_status 65 | 66 | ----- 67 | ``` 68 | usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] 69 | [--droplets] [--regions] [--images] [--sizes] 70 | [--ssh-keys] [--domains] [--pretty] 71 | [--cache-path CACHE_PATH] 72 | [--cache-max_age CACHE_MAX_AGE] 73 | [--refresh-cache] [--client-id CLIENT_ID] 74 | [--api-key API_KEY] 75 | 76 | Produce an Ansible Inventory file based on DigitalOcean credentials 77 | 78 | optional arguments: 79 | -h, --help show this help message and exit 80 | --list List all active Droplets as Ansible inventory 81 | (default: True) 82 | --host HOST Get all Ansible inventory variables about a specific 83 | Droplet 84 | --all List all DigitalOcean information as JSON 85 | --droplets List Droplets as JSON 86 | --regions List Regions as JSON 87 | --images List Images as JSON 88 | --sizes List Sizes as JSON 89 | --ssh-keys List SSH keys as JSON 90 | --domains List Domains as JSON 91 | --pretty, -p Pretty-print results 92 | --cache-path CACHE_PATH 93 | Path to the cache files (default: .) 94 | --cache-max_age CACHE_MAX_AGE 95 | Maximum age of the cached items (default: 0) 96 | --refresh-cache Force refresh of cache by making API requests to 97 | DigitalOcean (default: False - use cache files) 98 | --client-id CLIENT_ID, -c CLIENT_ID 99 | DigitalOcean Client ID 100 | --api-key API_KEY, -a API_KEY 101 | DigitalOcean API Key 102 | ``` 103 | 104 | ''' 105 | 106 | # (c) 2013, Evan Wies 107 | # 108 | # Inspired by the EC2 inventory plugin: 109 | # https://github.com/ansible/ansible/blob/devel/plugins/inventory/ec2.py 110 | # 111 | # This file is part of Ansible, 112 | # 113 | # Ansible is free software: you can redistribute it and/or modify 114 | # it under the terms of the GNU General Public License as published by 115 | # the Free Software Foundation, either version 3 of the License, or 116 | # (at your option) any later version. 117 | # 118 | # Ansible is distributed in the hope that it will be useful, 119 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 120 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 121 | # GNU General Public License for more details. 122 | # 123 | # You should have received a copy of the GNU General Public License 124 | # along with Ansible. If not, see . 125 | 126 | ###################################################################### 127 | 128 | import os 129 | import sys 130 | import re 131 | import argparse 132 | from time import time 133 | import ConfigParser 134 | 135 | try: 136 | import json 137 | except ImportError: 138 | import simplejson as json 139 | 140 | try: 141 | from dopy.manager import DoError, DoManager 142 | except ImportError, e: 143 | print "failed=True msg='`dopy` library required for this script'" 144 | sys.exit(1) 145 | 146 | 147 | 148 | class DigitalOceanInventory(object): 149 | 150 | ########################################################################### 151 | # Main execution path 152 | ########################################################################### 153 | 154 | def __init__(self): 155 | ''' Main execution path ''' 156 | 157 | # DigitalOceanInventory data 158 | self.data = {} # All DigitalOcean data 159 | self.inventory = {} # Ansible Inventory 160 | self.index = {} # Various indices of Droplet metadata 161 | 162 | # Define defaults 163 | self.cache_path = '.' 164 | self.cache_max_age = 0 165 | 166 | # Read settings, environment variables, and CLI arguments 167 | self.read_settings() 168 | self.read_environment() 169 | self.read_cli_args() 170 | 171 | # Verify credentials were set 172 | if not hasattr(self, 'client_id') or not hasattr(self, 'api_key'): 173 | print '''Could not find values for DigitalOcean client_id and api_key. 174 | They must be specified via either ini file, command line argument (--client-id and --api-key), 175 | or environment variables (DO_CLIENT_ID and DO_API_KEY)''' 176 | sys.exit(-1) 177 | 178 | # env command, show DigitalOcean credentials 179 | if self.args.env: 180 | print "DO_CLIENT_ID=%s DO_API_KEY=%s" % (self.client_id, self.api_key) 181 | sys.exit(0) 182 | 183 | # Manage cache 184 | self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" 185 | self.cache_refreshed = False 186 | 187 | if not self.args.force_cache and self.args.refresh_cache or not self.is_cache_valid(): 188 | self.load_all_data_from_digital_ocean() 189 | else: 190 | self.load_from_cache() 191 | if len(self.data) == 0: 192 | if self.args.force_cache: 193 | print '''Cache is empty and --force-cache was specified''' 194 | sys.exit(-1) 195 | self.load_all_data_from_digital_ocean() 196 | else: 197 | # We always get fresh droplets for --list, --host, --all, and --droplets 198 | # unless --force-cache is specified 199 | if not self.args.force_cache and ( 200 | self.args.list or self.args.host or self.args.all or self.args.droplets): 201 | self.load_droplets_from_digital_ocean() 202 | 203 | # Pick the json_data to print based on the CLI command 204 | if self.args.droplets: json_data = { 'droplets': self.data['droplets'] } 205 | elif self.args.regions: json_data = { 'regions': self.data['regions'] } 206 | elif self.args.images: json_data = { 'images': self.data['images'] } 207 | elif self.args.sizes: json_data = { 'sizes': self.data['sizes'] } 208 | elif self.args.ssh_keys: json_data = { 'ssh_keys': self.data['ssh_keys'] } 209 | elif self.args.domains: json_data = { 'domains': self.data['domains'] } 210 | elif self.args.all: json_data = self.data 211 | 212 | elif self.args.host: json_data = self.load_droplet_variables_for_host() 213 | else: # '--list' this is last to make it default 214 | json_data = self.inventory 215 | 216 | if self.args.pretty: 217 | print json.dumps(json_data, sort_keys=True, indent=2) 218 | else: 219 | print json.dumps(json_data) 220 | # That's all she wrote... 221 | 222 | 223 | ########################################################################### 224 | # Script configuration 225 | ########################################################################### 226 | 227 | def read_settings(self): 228 | ''' Reads the settings from the digital_ocean.ini file ''' 229 | config = ConfigParser.SafeConfigParser() 230 | config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') 231 | 232 | # Credentials 233 | if config.has_option('digital_ocean', 'client_id'): 234 | self.client_id = config.get('digital_ocean', 'client_id') 235 | if config.has_option('digital_ocean', 'api_key'): 236 | self.api_key = config.get('digital_ocean', 'api_key') 237 | 238 | # Cache related 239 | if config.has_option('digital_ocean', 'cache_path'): 240 | self.cache_path = config.get('digital_ocean', 'cache_path') 241 | if config.has_option('digital_ocean', 'cache_max_age'): 242 | self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') 243 | 244 | 245 | def read_environment(self): 246 | ''' Reads the settings from environment variables ''' 247 | # Setup credentials 248 | if os.getenv("DO_CLIENT_ID"): self.client_id = os.getenv("DO_CLIENT_ID") 249 | if os.getenv("DO_API_KEY"): self.api_key = os.getenv("DO_API_KEY") 250 | 251 | 252 | def read_cli_args(self): 253 | ''' Command line argument processing ''' 254 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') 255 | 256 | parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') 257 | parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') 258 | 259 | parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') 260 | parser.add_argument('--droplets','-d', action='store_true', help='List Droplets as JSON') 261 | parser.add_argument('--regions', action='store_true', help='List Regions as JSON') 262 | parser.add_argument('--images', action='store_true', help='List Images as JSON') 263 | parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') 264 | parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') 265 | parser.add_argument('--domains', action='store_true',help='List Domains as JSON') 266 | 267 | parser.add_argument('--pretty','-p', action='store_true', help='Pretty-print results') 268 | 269 | parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') 270 | parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') 271 | parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') 272 | parser.add_argument('--refresh-cache','-r', action='store_true', default=False, help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') 273 | 274 | parser.add_argument('--env','-e', action='store_true', help='Display DO_CLIENT_ID and DO_API_KEY') 275 | parser.add_argument('--client-id','-c', action='store', help='DigitalOcean Client ID') 276 | parser.add_argument('--api-key','-a', action='store', help='DigitalOcean API Key') 277 | 278 | self.args = parser.parse_args() 279 | 280 | if self.args.client_id: self.client_id = self.args.client_id 281 | if self.args.api_key: self.api_key = self.args.api_key 282 | if self.args.cache_path: self.cache_path = self.args.cache_path 283 | if self.args.cache_max_age: self.cache_max_age = self.args.cache_max_age 284 | 285 | # Make --list default if none of the other commands are specified 286 | if (not self.args.droplets and not self.args.regions and not self.args.images and 287 | not self.args.sizes and not self.args.ssh_keys and not self.args.domains and 288 | not self.args.all and not self.args.host): 289 | self.args.list = True 290 | 291 | 292 | ########################################################################### 293 | # Data Management 294 | ########################################################################### 295 | 296 | def load_all_data_from_digital_ocean(self): 297 | ''' Use dopy to get all the information from DigitalOcean and save data in cache files ''' 298 | manager = DoManager(self.client_id, self.api_key) 299 | 300 | self.data = {} 301 | self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) 302 | self.data['regions'] = self.sanitize_list(manager.all_regions()) 303 | self.data['images'] = self.sanitize_list(manager.all_images(filter=None)) 304 | self.data['sizes'] = self.sanitize_list(manager.sizes()) 305 | self.data['ssh_keys'] = self.sanitize_list(manager.all_ssh_keys()) 306 | self.data['domains'] = self.sanitize_list(manager.all_domains()) 307 | 308 | self.index = {} 309 | self.index['region_to_name'] = self.build_index(self.data['regions'], 'id', 'name') 310 | self.index['size_to_name'] = self.build_index(self.data['sizes'], 'id', 'name') 311 | self.index['image_to_name'] = self.build_index(self.data['images'], 'id', 'name') 312 | self.index['image_to_distro'] = self.build_index(self.data['images'], 'id', 'distribution') 313 | self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) 314 | 315 | self.build_inventory() 316 | 317 | self.write_to_cache() 318 | 319 | 320 | def load_droplets_from_digital_ocean(self): 321 | ''' Use dopy to get droplet information from DigitalOcean and save data in cache files ''' 322 | manager = DoManager(self.client_id, self.api_key) 323 | self.data['droplets'] = self.sanitize_list(manager.all_active_droplets()) 324 | self.index['host_to_droplet'] = self.build_index(self.data['droplets'], 'ip_address', 'id', False) 325 | self.build_inventory() 326 | self.write_to_cache() 327 | 328 | 329 | def build_index(self, source_seq, key_from, key_to, use_slug=True): 330 | dest_dict = {} 331 | for item in source_seq: 332 | name = (use_slug and item.has_key('slug')) and item['slug'] or item[key_to] 333 | key = item[key_from] 334 | dest_dict[key] = name 335 | return dest_dict 336 | 337 | 338 | def build_inventory(self): 339 | '''Build Ansible inventory of droplets''' 340 | self.inventory = {} 341 | 342 | # add all droplets by id and name 343 | for droplet in self.data['droplets']: 344 | dest = droplet['ip_address'] 345 | 346 | self.inventory[droplet['id']] = [dest] 347 | self.push(self.inventory, droplet['name'], dest) 348 | self.push(self.inventory, 'region_'+droplet['region_id'], dest) 349 | self.push(self.inventory, 'image_' +droplet['image_id'], dest) 350 | self.push(self.inventory, 'size_' +droplet['size_id'], dest) 351 | self.push(self.inventory, 'status_'+droplet['status'], dest) 352 | 353 | region_name = self.index['region_to_name'].get(droplet['region_id']) 354 | if region_name: 355 | self.push(self.inventory, 'region_'+region_name, dest) 356 | 357 | size_name = self.index['size_to_name'].get(droplet['size_id']) 358 | if size_name: 359 | self.push(self.inventory, 'size_'+size_name, dest) 360 | 361 | image_name = self.index['image_to_name'].get(droplet['image_id']) 362 | if image_name: 363 | self.push(self.inventory, 'image_'+image_name, dest) 364 | 365 | distro_name = self.index['image_to_distro'].get(droplet['image_id']) 366 | if distro_name: 367 | self.push(self.inventory, 'distro_'+distro_name, dest) 368 | 369 | 370 | def load_droplet_variables_for_host(self): 371 | '''Generate a JSON response to a --host call''' 372 | host = self.to_safe(str(self.args.host)) 373 | 374 | if not host in self.index['host_to_droplet']: 375 | # try updating cache 376 | if not self.args.force_cache: 377 | self.load_all_data_from_digital_ocean() 378 | if not host in self.index['host_to_droplet']: 379 | # host might not exist anymore 380 | return {} 381 | 382 | droplet = None 383 | if self.cache_refreshed: 384 | for drop in self.data['droplets']: 385 | if drop['ip_address'] == host: 386 | droplet = self.sanitize_dict(drop) 387 | break 388 | else: 389 | # Cache wasn't refreshed this run, so hit DigitalOcean API 390 | manager = DoManager(self.client_id, self.api_key) 391 | droplet_id = self.index['host_to_droplet'][host] 392 | droplet = self.sanitize_dict(manager.show_droplet(droplet_id)) 393 | 394 | if not droplet: 395 | return {} 396 | 397 | # Put all the information in a 'do_' namespace 398 | info = {} 399 | for k, v in droplet.items(): 400 | info['do_'+k] = v 401 | 402 | # Generate user-friendly variables (i.e. not the ID's) 403 | if droplet.has_key('region_id'): 404 | info['do_region'] = self.index['region_to_name'].get(droplet['region_id']) 405 | if droplet.has_key('size_id'): 406 | info['do_size'] = self.index['size_to_name'].get(droplet['size_id']) 407 | if droplet.has_key('image_id'): 408 | info['do_image'] = self.index['image_to_name'].get(droplet['image_id']) 409 | info['do_distro'] = self.index['image_to_distro'].get(droplet['image_id']) 410 | 411 | return info 412 | 413 | 414 | 415 | ########################################################################### 416 | # Cache Management 417 | ########################################################################### 418 | 419 | def is_cache_valid(self): 420 | ''' Determines if the cache files have expired, or if it is still valid ''' 421 | if os.path.isfile(self.cache_filename): 422 | mod_time = os.path.getmtime(self.cache_filename) 423 | current_time = time() 424 | if (mod_time + self.cache_max_age) > current_time: 425 | return True 426 | return False 427 | 428 | 429 | def load_from_cache(self): 430 | ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' 431 | cache = open(self.cache_filename, 'r') 432 | json_data = cache.read() 433 | cache.close() 434 | data = json.loads(json_data) 435 | 436 | self.data = data['data'] 437 | self.inventory = data['inventory'] 438 | self.index = data['index'] 439 | 440 | 441 | def write_to_cache(self): 442 | ''' Writes data in JSON format to a file ''' 443 | data = { 'data': self.data, 'index': self.index, 'inventory': self.inventory } 444 | json_data = json.dumps(data, sort_keys=True, indent=2) 445 | 446 | cache = open(self.cache_filename, 'w') 447 | cache.write(json_data) 448 | cache.close() 449 | 450 | 451 | 452 | ########################################################################### 453 | # Utilities 454 | ########################################################################### 455 | 456 | def push(self, my_dict, key, element): 457 | ''' Pushed an element onto an array that may not have been defined in the dict ''' 458 | if key in my_dict: 459 | my_dict[key].append(element); 460 | else: 461 | my_dict[key] = [element] 462 | 463 | 464 | def to_safe(self, word): 465 | ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' 466 | return re.sub("[^A-Za-z0-9\-\.]", "_", word) 467 | 468 | 469 | def sanitize_dict(self, d): 470 | new_dict = {} 471 | for k, v in d.items(): 472 | if v != None: 473 | new_dict[self.to_safe(str(k))] = self.to_safe(str(v)) 474 | return new_dict 475 | 476 | 477 | def sanitize_list(self, seq): 478 | new_seq = [] 479 | for d in seq: 480 | new_seq.append(self.sanitize_dict(d)) 481 | return new_seq 482 | 483 | 484 | 485 | ########################################################################### 486 | # Run the script 487 | DigitalOceanInventory() 488 | -------------------------------------------------------------------------------- /ansible/docker.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | remote_user: jjohanan 3 | become: yes 4 | vars_files: 5 | - vars.yml 6 | roles: 7 | - swap 8 | - docker 9 | #- reboot 10 | - docker_firewall 11 | - docker_swarm 12 | - blog_site 13 | -------------------------------------------------------------------------------- /ansible/roles/basic_server_setup/files/sshd_config: -------------------------------------------------------------------------------- 1 | # Package generated configuration file 2 | # See the sshd_config(5) manpage for details 3 | 4 | # What ports, IPs and protocols we listen for 5 | Port 22 6 | # Use these options to restrict which interfaces/protocols sshd will bind to 7 | #ListenAddress :: 8 | #ListenAddress 0.0.0.0 9 | Protocol 2 10 | # HostKeys for protocol version 2 11 | HostKey /etc/ssh/ssh_host_rsa_key 12 | HostKey /etc/ssh/ssh_host_dsa_key 13 | HostKey /etc/ssh/ssh_host_ecdsa_key 14 | #Privilege Separation is turned on for security 15 | UsePrivilegeSeparation yes 16 | 17 | # Lifetime and size of ephemeral version 1 server key 18 | KeyRegenerationInterval 3600 19 | ServerKeyBits 768 20 | 21 | # Logging 22 | SyslogFacility AUTH 23 | LogLevel INFO 24 | 25 | # Authentication: 26 | LoginGraceTime 120 27 | PermitRootLogin no 28 | StrictModes yes 29 | 30 | RSAAuthentication yes 31 | PubkeyAuthentication yes 32 | #AuthorizedKeysFile %h/.ssh/authorized_keys 33 | 34 | # Don't read the user's ~/.rhosts and ~/.shosts files 35 | IgnoreRhosts yes 36 | # For this to work you will also need host keys in /etc/ssh_known_hosts 37 | RhostsRSAAuthentication no 38 | # similar for protocol version 2 39 | HostbasedAuthentication no 40 | # Uncomment if you don't trust ~/.ssh/known_hosts for RhostsRSAAuthentication 41 | #IgnoreUserKnownHosts yes 42 | 43 | # To enable empty passwords, change to yes (NOT RECOMMENDED) 44 | PermitEmptyPasswords no 45 | 46 | # Change to yes to enable challenge-response passwords (beware issues with 47 | # some PAM modules and threads) 48 | ChallengeResponseAuthentication no 49 | 50 | # Change to no to disable tunnelled clear text passwords 51 | PasswordAuthentication no 52 | 53 | # Kerberos options 54 | #KerberosAuthentication no 55 | #KerberosGetAFSToken no 56 | #KerberosOrLocalPasswd yes 57 | #KerberosTicketCleanup yes 58 | 59 | # GSSAPI options 60 | #GSSAPIAuthentication no 61 | #GSSAPICleanupCredentials yes 62 | 63 | X11Forwarding yes 64 | X11DisplayOffset 10 65 | PrintMotd no 66 | PrintLastLog yes 67 | TCPKeepAlive yes 68 | #UseLogin no 69 | 70 | #MaxStartups 10:30:60 71 | #Banner /etc/issue.net 72 | 73 | # Allow client to pass locale environment variables 74 | AcceptEnv LANG LC_* 75 | 76 | Subsystem sftp /usr/lib/openssh/sftp-server 77 | 78 | # Set this to 'yes' to enable PAM authentication, account processing, 79 | # and session processing. If this is enabled, PAM authentication will 80 | # be allowed through the ChallengeResponseAuthentication and 81 | # PasswordAuthentication. Depending on your PAM configuration, 82 | # PAM authentication via ChallengeResponseAuthentication may bypass 83 | # the setting of "PermitRootLogin without-password". 84 | # If you just want the PAM account and session checks to run without 85 | # PAM authentication, then enable this but set PasswordAuthentication 86 | # and ChallengeResponseAuthentication to 'no'. 87 | UsePAM yes 88 | -------------------------------------------------------------------------------- /ansible/roles/basic_server_setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure apt-get is up to date 3 | raw: apt-get update 4 | 5 | - name: 'install python2' 6 | raw: sudo apt-get -y install python-simplejson 7 | 8 | - name: Install aptitude 9 | apt: 10 | name: aptitude 11 | state: present 12 | 13 | - name: Update and upgrade apt packages 14 | apt: 15 | upgrade: yes 16 | update_cache: yes 17 | 18 | - name: Gathering facts 19 | setup: 20 | 21 | - name: Set hostname 22 | lineinfile: dest=/etc/hosts line="127.0.0.1 {{ansible_hostname}}" 23 | 24 | - name: Install ufw 25 | apt: name=ufw state=present 26 | 27 | - name: create a new user 28 | user: name={{ newuser }} 29 | state=present 30 | shell=/bin/bash 31 | 32 | - authorized_key: user="{{ newuser }}" 33 | key="{{ lookup('file', ssh_key ) }}" 34 | 35 | - name: Make sure we can sudo 36 | template: src=newuser_sudoer dest=/etc/sudoers.d/{{ newuser }}_sudoer mode=0440 37 | 38 | - name: Configure ufw 39 | ufw: rule=allow port=22 proto=tcp 40 | 41 | - name: Default deny 42 | ufw: state=enabled direction=incoming policy=deny 43 | 44 | - name: Disable root SSH 45 | copy: src=sshd_config dest=/etc/ssh/sshd_config 46 | 47 | - name: Restart SSH 48 | service: name=ssh state=restarted enabled=yes -------------------------------------------------------------------------------- /ansible/roles/basic_server_setup/templates/newuser_sudoer: -------------------------------------------------------------------------------- 1 | {{ newuser }} ALL=(ALL:ALL) NOPASSWD:ALL 2 | -------------------------------------------------------------------------------- /ansible/roles/blog_site/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create Docker base directory 3 | file: path={{ work_dir }} state=directory 4 | 5 | - name: Untar site backup 6 | unarchive: src=../../site_data/ejosh_site.bak.tar.gz dest=/ 7 | tags: 8 | - initial 9 | 10 | - name: Move MySQL backup over 11 | unarchive: src=../../site_data/wp_backup.sql.tar.gz dest={{ work_dir }} 12 | tags: 13 | - initial 14 | 15 | - name: Copy needed files over 16 | copy: src=../../site_data/{{ item }} dest={{ work_dir }} 17 | with_items: 18 | - mysql_Dockerfile 19 | - php_Dockerfile 20 | - app_default.conf 21 | - app_Dockerfile 22 | - varnish_Dockerfile 23 | - varnish_default.vcl 24 | - frontend_default.conf 25 | - frontend_Dockerfile 26 | - frontend_nginx.conf 27 | - ejosh.co.crt 28 | - ejosh.co.key 29 | - site-backup 30 | - elasticsearch_Dockerfile 31 | 32 | - name: Check secrets 33 | shell: docker secret ls 34 | changed_when: False 35 | register: docker_secret 36 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager)" 37 | 38 | - name: Create mysql root password 39 | shell: openssl rand -base64 20 | docker secret create mysql_root_password - 40 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_secret.stdout.find('mysql_root_password') == -1" 41 | 42 | - name: Create mysql user password 43 | shell: openssl rand -base64 20 | docker secret create mysql_password - 44 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_secret.stdout.find('mysql_password') == -1" 45 | 46 | - name: Create ejosh cert secret 47 | shell: docker secret create ejosh.co.crt {{ work_dir }}/ejosh.co.crt 48 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_secret.stdout.find('ejosh.co.crt') == -1" 49 | 50 | - name: Create ejosh cert key 51 | shell: docker secret create ejosh.co.key {{ work_dir }}/ejosh.co.key 52 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_secret.stdout.find('ejosh.co.key') == -1" 53 | 54 | - name: Delete the key 55 | file: 56 | path: "{{ work_dir }}/ejosh.co.key" 57 | state: absent 58 | 59 | - name: Build images 60 | docker_image: path={{ work_dir }} name=ejosh.co/{{ item }} dockerfile={{ item }}_Dockerfile state=present force=True 61 | with_items: 62 | - mysql 63 | - php 64 | - app 65 | - varnish 66 | - frontend 67 | - elasticsearch 68 | 69 | - name: Register volume 70 | shell: docker volume ls 71 | changed_when: False 72 | register: docker_volume 73 | 74 | - name: make sure esdata exists 75 | file: path=/esdata state=directory mode='g+rwx' group=1000 76 | 77 | - name: Create volume 78 | shell: docker volume create --opt type=none --opt device=/var/www/html --opt o=bind ejosh.co_data 79 | when: "docker_volume.stdout.find('ejosh.co_data') == -1" 80 | 81 | - name: Create esdata volume 82 | shell: docker volume create --opt type=none --opt device=/esdata --opt o=bind ejosh.co_esdata 83 | when: "docker_volume.stdout.find('ejosh.co_esdata') == -1" 84 | 85 | - name: Register services 86 | shell: docker service ls 87 | changed_when: False 88 | register: docker_service 89 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager)" 90 | 91 | - name: Start ElasticSearch 92 | shell: docker service create --name elasticsearch --replicas 1 --network ejosh_network -e "discovery.type=single-node" -e "cluster.name=docker-cluster" -e "bootstrap.memory_lock=true" -e "ES_JAVA_OPTS=-Xms256m -Xmx256m" --mount type=volume,source=ejosh.co_esdata,destination=/usr/share/elasticsearch/data ejosh.co/elasticsearch 93 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_service.stdout.find('elasticsearch') == -1" 94 | 95 | - name: Start MySQL 96 | shell: docker service create --name mysql --replicas 1 --network ejosh_network --secret source=mysql_root_password,target=mysql_root_password --secret source=mysql_password,target=mysql_password -e MYSQL_ROOT_PASSWORD_FILE="/run/secrets/mysql_root_password" -e MYSQL_PASSWORD_FILE="/run/secrets/mysql_password" -e MYSQL_USER="wordpress" -e MYSQL_DATABASE="wordpress" ejosh.co/mysql 97 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_service.stdout.find('mysql') == -1" 98 | 99 | - name: Start php 100 | shell: docker service create --name php --network ejosh_network --secret source=mysql_password,target=mysql_password --mount type=volume,source=ejosh.co_data,destination=/var/www/html ejosh.co/php 101 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_service.stdout.find('php') == -1" 102 | 103 | - name: Start app 104 | shell: docker service create --name app --network ejosh_network --mount type=volume,source=ejosh.co_data,destination=/var/www/html --publish 8000:80 ejosh.co/app 105 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_service.stdout.find('app') == -1" 106 | 107 | - name: Start varnish 108 | shell: docker service create --name varnish --network ejosh_network --publish 8001:80 ejosh.co/varnish 109 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_service.stdout.find('varnish') == -1" 110 | 111 | - name: Start frontend 112 | shell: docker service create --name frontend --network ejosh_network --secret source=ejosh.co.crt,target=ejosh.co.crt --secret source=ejosh.co.key,target=ejosh.co.key --publish 80:80 --publish 443:443 ejosh.co/frontend 113 | when: "(inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager) and docker_service.stdout.find('frontend') == -1" 114 | 115 | - name: Setup ufw 116 | ufw: rule=allow port=80 proto=tcp 117 | 118 | - name: Open up SSL ufw 119 | ufw: rule=allow port=443 proto=tcp 120 | 121 | - name: create the logrotate conf for docker 122 | copy: src=../../site_data/logrotate_docker dest=/etc/logrotate.d/docker 123 | 124 | - name: copy the backup script 125 | copy: src=../../site_data/site-backup dest={{ work_dir }}/site-backup mode=755 126 | 127 | - name: install s3cmd 128 | apt: name=s3cmd state=present update_cache=yes 129 | 130 | - name: install s3cfg 131 | template: src=s3cfg dest=/root/.s3cfg 132 | 133 | - name: schedule backup to run weekly 134 | cron: name="site backup" minute="0" hour="2" weekday="1" job="{{ work_dir }}/site-backup" user="root" 135 | tags: 136 | - prod 137 | -------------------------------------------------------------------------------- /ansible/roles/blog_site/templates/s3cfg: -------------------------------------------------------------------------------- 1 | [default] 2 | access_key = {{ lookup('env', 'AWS_S3_ACCESS') }} 3 | bucket_location = US 4 | cloudfront_host = cloudfront.amazonaws.com 5 | cloudfront_resource = /2010-07-15/distribution 6 | default_mime_type = binary/octet-stream 7 | delete_removed = False 8 | dry_run = False 9 | encoding = UTF-8 10 | encrypt = False 11 | follow_symlinks = False 12 | force = False 13 | get_continue = False 14 | gpg_command = /usr/bin/gpg 15 | gpg_decrypt = %(gpg_command)s -d --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s 16 | gpg_encrypt = %(gpg_command)s -c --verbose --no-use-agent --batch --yes --passphrase-fd %(passphrase_fd)s -o %(output_file)s %(input_file)s 17 | gpg_passphrase = 18 | guess_mime_type = True 19 | host_base = s3.amazonaws.com 20 | host_bucket = %(bucket)s.s3.amazonaws.com 21 | human_readable_sizes = False 22 | list_md5 = False 23 | log_target_prefix = 24 | preserve_attrs = True 25 | progress_meter = True 26 | proxy_host = 27 | proxy_port = 0 28 | recursive = False 29 | recv_chunk = 4096 30 | reduced_redundancy = False 31 | secret_key = {{ lookup('env', 'AWS_S3_SECRET') }} 32 | send_chunk = 4096 33 | simpledb_host = sdb.amazonaws.com 34 | skip_existing = False 35 | socket_timeout = 10 36 | urlencoding_mode = normal 37 | use_https = False 38 | verbosity = WARNING 39 | -------------------------------------------------------------------------------- /ansible/roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get Docker GPG key 3 | apt_key: 4 | url: "https://download.docker.com/linux/ubuntu/gpg" 5 | state: present 6 | 7 | - name: Get more packages 8 | apt: 9 | name: ['linux-image-extra-virtual', 'apt-transport-https', 'ca-certificates', 'curl', 'software-properties-common', 'python-setuptools'] 10 | state: present 11 | 12 | - name: Add Docker repo 13 | apt_repository: 14 | repo: deb [arch=amd64] https://download.docker.com/linux/ubuntu {{ hostvars[inventory_hostname].ansible_distribution_release}} stable 15 | state: present 16 | update_cache: yes 17 | 18 | - name: Get Docker 19 | apt: 20 | name: ['docker-ce', 'docker-compose', 'python-docker'] 21 | state: present 22 | 23 | - name: Get Pip 24 | apt: name=python-pip state=present 25 | 26 | - name: Get docker for pip 27 | pip: 28 | name: docker 29 | state: latest 30 | -------------------------------------------------------------------------------- /ansible/roles/docker_firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: create allow rules for all hosts 3 | ufw: 4 | rule: allow 5 | proto: any 6 | src: "{{ hostvars[item]['ansible_default_ipv4']['address'] }}" 7 | with_inventory_hostnames: 8 | - all 9 | -------------------------------------------------------------------------------- /ansible/roles/docker_swarm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if "Swarm Mode" is enabled. 3 | shell: docker info 4 | changed_when: False 5 | register: docker_info 6 | 7 | - name: get the main swarm manager 8 | set_fact: swarm_manager={{ groups['manager'][0] }} 9 | when: "'manager' in groups" 10 | 11 | - name: get the main swarm manager 12 | set_fact: swarm_manager={{ ansible_default_ipv4.address|default(inventory_hostname) }} 13 | when: "'manager' not in groups" 14 | 15 | - name: Start a Swarm 16 | shell: docker swarm init --advertise-addr {{ swarm_manager }} 17 | when: "docker_info.stdout.find('Swarm: active') == -1 and (inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager)" 18 | 19 | - name: Get the worker join-token. 20 | shell: docker swarm join-token -q worker 21 | changed_when: False 22 | register: docker_worker_token 23 | when: "inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager" 24 | 25 | - name: Create overlay network 26 | docker_network: 27 | name: ejosh_network 28 | driver: overlay 29 | #ignore_errors: yes 30 | #this errors but creates the network 31 | when: "inventory_hostname == swarm_manager or ansible_default_ipv4.address == swarm_manager" 32 | 33 | - name: Join the pending Swarm worker nodes. 34 | shell: docker swarm join --token "{{ docker_worker_token.stdout }}" 35 | {{ swarm_manager }}:2377 36 | when: "docker_info.stdout.find('Swarm: active') == -1 37 | and 'worker' in group_names" 38 | -------------------------------------------------------------------------------- /ansible/roles/reboot/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update and upgrade apt packages 3 | apt: 4 | upgrade: yes 5 | update_cache: yes 6 | 7 | - name: Restart server 8 | command: /sbin/shutdown -r 9 | async: 0 10 | poll: 0 11 | ignore_errors: true 12 | 13 | - name: Wait 14 | wait_for: 15 | port: 22 16 | host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}' 17 | delay: 150 18 | delegate_to: localhost 19 | connection: local 20 | become: false -------------------------------------------------------------------------------- /ansible/roles/swap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: register swap var 3 | stat: path=/mnt/swap 4 | register: swap_file 5 | 6 | - name: create the file to be used for swap 7 | command: fallocate -l 512M /mnt/swap 8 | when: swap_file.stat.exists == False 9 | 10 | - name: format the file for swap 11 | command: mkswap /mnt/swap 12 | when: swap_file.stat.exists == False 13 | 14 | - name: change swap file permissions 15 | file: path=/mnt/swap owner=root group=root mode=0600 16 | when: swap_file.stat.exists == False 17 | 18 | - name: add the file to the system as a swap file 19 | command: swapon /mnt/swap 20 | when: swap_file.stat.exists == False 21 | -------------------------------------------------------------------------------- /ansible/server.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | remote_user: root 3 | gather_facts: no 4 | become: true 5 | vars_files: 6 | - vars.yml 7 | roles: 8 | - basic_server_setup 9 | -------------------------------------------------------------------------------- /ansible/site_data/app_Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:latest 2 | 3 | COPY app_default.conf /etc/nginx/conf.d/default.conf 4 | -------------------------------------------------------------------------------- /ansible/site_data/app_default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 80 default_server; 3 | listen [::]:80 default_server ipv6only=on; 4 | 5 | root /var/www/html/ejosh; 6 | index index.php index.html index.htm; 7 | 8 | #server_name localhost; 9 | 10 | location @de { 11 | rewrite ^/de(.*) /de/index.php?q=$1; 12 | } 13 | 14 | location /de/ { 15 | try_files $uri $uri/ @de; 16 | } 17 | 18 | error_page 404 /404.html; 19 | 20 | error_page 500 502 503 504 /50x.html; 21 | location = /50x.html { 22 | root /usr/share/nginx/html; 23 | } 24 | 25 | location ~ \.php$ { 26 | try_files $uri =404; 27 | include fastcgi_params; 28 | fastcgi_split_path_info ^(.+\.php)(/.+)$; 29 | fastcgi_pass php:9000; 30 | fastcgi_index index.php; 31 | fastcgi_param SCRIPT_FILENAME $document_root/$fastcgi_script_name; 32 | } 33 | } 34 | -------------------------------------------------------------------------------- /ansible/site_data/elasticsearch_Dockerfile: -------------------------------------------------------------------------------- 1 | FROM docker.elastic.co/elasticsearch/elasticsearch:7.6.2 2 | 3 | RUN bin/elasticsearch-plugin install ingest-attachment -b -------------------------------------------------------------------------------- /ansible/site_data/frontend_Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:latest 2 | 3 | COPY frontend_default.conf /etc/nginx/conf.d/default.conf 4 | COPY frontend_nginx.conf /etc/nginx/nginx.conf 5 | -------------------------------------------------------------------------------- /ansible/site_data/frontend_default.conf: -------------------------------------------------------------------------------- 1 | server { 2 | listen 443 ssl http2 default_server; 3 | 4 | ssl_certificate /run/secrets/ejosh.co.crt; 5 | ssl_certificate_key /run/secrets/ejosh.co.key; 6 | 7 | location / { 8 | proxy_pass http://varnish:80; 9 | proxy_set_header X-Real-IP $remote_addr; 10 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 11 | proxy_set_header X-Forwarded-Proto https; 12 | proxy_set_header X-Forwarded-Port 443; 13 | proxy_set_header Host $host; 14 | proxy_http_version 1.1; 15 | } 16 | } 17 | 18 | server { 19 | listen 80 default_server; 20 | 21 | location / { 22 | proxy_pass http://varnish:80; 23 | proxy_set_header X-Real-IP $remote_addr; 24 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 25 | proxy_set_header X-Forwarded-Proto http; 26 | proxy_set_header X-Forwarded-Port 80; 27 | proxy_set_header Host $host; 28 | proxy_http_version 1.1; 29 | } 30 | } 31 | 32 | server { 33 | listen 443 ssl http2; 34 | server_name cadvisor.ejosh.co; 35 | 36 | ssl_certificate /run/secrets/ejosh.co.crt; 37 | ssl_certificate_key /run/secrets/ejosh.co.key; 38 | 39 | location / { 40 | proxy_pass http://varnish:80; 41 | proxy_set_header X-Real-IP $remote_addr; 42 | proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; 43 | proxy_set_header X-Forwarded-Proto https; 44 | proxy_set_header X-Forwarded-Port 443; 45 | proxy_set_header Host $host; 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /ansible/site_data/frontend_nginx.conf: -------------------------------------------------------------------------------- 1 | user www-data; 2 | worker_processes 4; 3 | 4 | error_log /var/log/nginx/error.log warn; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /dev/stdout main; 22 | error_log /dev/stderr; 23 | 24 | sendfile on; 25 | tcp_nopush on; 26 | tcp_nodelay on; 27 | keepalive_timeout 65; 28 | types_hash_max_size 2048; 29 | # server_tokens off; 30 | gzip on; 31 | gzip_disable "msie6"; 32 | gzip_http_version 1.1; 33 | gzip_vary on; 34 | gzip_comp_level 6; 35 | gzip_proxied any; 36 | gzip_types text/plain text/html text/css application/json application/javascript application/x-javascript text/javascript text/xml application/xml application/rss+xml application/atom+xml application/rdf+xml; 37 | 38 | 39 | include /etc/nginx/conf.d/*.conf; 40 | include /etc/nginx/sites-enabled/*; 41 | 42 | } 43 | -------------------------------------------------------------------------------- /ansible/site_data/logrotate_docker: -------------------------------------------------------------------------------- 1 | /var/lib/docker/containers/*/*-json.log { 2 | size 5120k 3 | rotate 5 4 | copytruncate 5 | } 6 | -------------------------------------------------------------------------------- /ansible/site_data/mysql_Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mysql:5.7 2 | 3 | COPY tmp/wp_backup.sql /docker-entrypoint-initdb.d/ 4 | -------------------------------------------------------------------------------- /ansible/site_data/php_Dockerfile: -------------------------------------------------------------------------------- 1 | FROM php:fpm 2 | 3 | RUN docker-php-ext-install mysqli bcmath exif imagick\ 4 | && docker-php-ext-enable mysqli bcmath exif imagick -------------------------------------------------------------------------------- /ansible/site_data/site-backup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | WEEK=$((($(date +%e)-1)/7+1)) 3 | docker exec -t $(docker container ls -f "name=mysql.*." -l -q) /bin/bash -c '(export MYSQL_PWD="$(cat /run/secrets/mysql_root_password)"; mysqldump -hlocalhost -uroot $MYSQL_DATABASE)' > /tmp/wp_backup.sql && tar -zcvf /tmp/wp_backup$WEEK.sql.tar.gz /tmp/wp_backup.sql 4 | docker run -it --volumes-from $(docker container ls -f "name=php.*." -l -q) -v /tmp:/backup debian:stretch-slim tar -zcvf /backup/ejosh_site.bak.tar.gz /var/www/html/ejosh/ 5 | 6 | s3cmd put /tmp/wp_backup$WEEK.sql.tar.gz s3://ejoshblog-backup/weekly/wp_backup$WEEK.sql.tar.gz && rm /tmp/wp_backup$WEEK.sql.tar.gz 7 | s3cmd put /tmp/ejosh_site.bak.tar.gz s3://ejoshblog-backup/weekly/ejosh_site$WEEK.bak.tar.gz && rm /tmp/ejosh_site.bak.tar.gz 8 | -------------------------------------------------------------------------------- /ansible/site_data/varnish_Dockerfile: -------------------------------------------------------------------------------- 1 | FROM varnish:6 2 | 3 | COPY varnish_default.vcl /etc/varnish/default.vcl 4 | -------------------------------------------------------------------------------- /ansible/site_data/varnish_default.vcl: -------------------------------------------------------------------------------- 1 | # 2 | # This is an example VCL file for Varnish. 3 | # 4 | # It does not do anything by default, delegating control to the 5 | # builtin VCL. The builtin VCL is called when there is no explicit 6 | # return statement. 7 | # 8 | # See the VCL chapters in the Users Guide at https://www.varnish-cache.org/docs/ 9 | # and http://varnish-cache.org/trac/wiki/VCLExamples for more examples. 10 | 11 | # Update of varnish 4 to work with wordpress 12 | # Marker to tell the VCL compiler that this VCL has been adapted to the 13 | # new 4.0 format. 14 | vcl 4.0; 15 | 16 | # Default backend definition. Set this to point to your content server. 17 | backend default { 18 | .host = "app"; 19 | .port = "80"; 20 | .connect_timeout = 600s; 21 | .first_byte_timeout = 600s; 22 | .between_bytes_timeout = 600s; 23 | .max_connections = 800; 24 | } 25 | 26 | # Only allow purging from specific IPs 27 | acl purge { 28 | "localhost"; 29 | "app"; 30 | } 31 | 32 | # This function is used when a request is send by a HTTP client (Browser) 33 | sub vcl_recv { 34 | # Normalize the header, remove the port (in case you're testing this on various TCP ports) 35 | set req.http.Host = regsub(req.http.Host, ":[0-9]+", ""); 36 | 37 | # Remove has_js and CloudFlare/Google Analytics __* cookies. 38 | set req.http.Cookie = regsuball(req.http.Cookie, "(^|;\s*)(_[_a-z]+|has_js)=[^;]*", ""); 39 | # Remove a ";" prefix, if present. 40 | set req.http.Cookie = regsub(req.http.Cookie, "^;\s*", ""); 41 | 42 | # Allow purging from ACL 43 | if (req.method == "PURGE") { 44 | # If not allowed then a error 405 is returned 45 | if (!client.ip ~ purge) { 46 | return(synth(405, "This IP is not allowed to send PURGE requests.")); 47 | } 48 | # If allowed, do a cache_lookup -> vlc_hit() or vlc_miss() 49 | return (purge); 50 | } 51 | 52 | # Post requests will not be cached 53 | if (req.http.Authorization || req.method == "POST") { 54 | return (pass); 55 | } 56 | 57 | # --- WordPress specific configuration 58 | 59 | # Did not cache the admin and login pages 60 | if (req.url ~ "wp-(login|admin)" || req.url ~ "preview=true") { 61 | return (pass); 62 | } 63 | 64 | # Remove the "has_js" cookie 65 | set req.http.Cookie = regsuball(req.http.Cookie, "has_js=[^;]+(; )?", ""); 66 | 67 | # Remove any Google Analytics based cookies 68 | set req.http.Cookie = regsuball(req.http.Cookie, "__utm.=[^;]+(; )?", ""); 69 | 70 | # Remove the Quant Capital cookies (added by some plugin, all __qca) 71 | set req.http.Cookie = regsuball(req.http.Cookie, "__qc.=[^;]+(; )?", ""); 72 | 73 | # Remove the wp-settings-1 cookie 74 | set req.http.Cookie = regsuball(req.http.Cookie, "wp-settings-1=[^;]+(; )?", ""); 75 | 76 | # Remove the wp-settings-time-1 cookie 77 | set req.http.Cookie = regsuball(req.http.Cookie, "wp-settings-time-1=[^;]+(; )?", ""); 78 | 79 | # Remove the wp test cookie 80 | set req.http.Cookie = regsuball(req.http.Cookie, "wordpress_test_cookie=[^;]+(; )?", ""); 81 | 82 | # Are there cookies left with only spaces or that are empty? 83 | if (req.http.cookie ~ "^ *$") { 84 | unset req.http.cookie; 85 | } 86 | 87 | # Cache the following files extensions 88 | if (req.url ~ "\.(css|js|png|gif|jp(e)?g|swf|ico)") { 89 | unset req.http.cookie; 90 | } 91 | 92 | # Normalize Accept-Encoding header and compression 93 | # https://www.varnish-cache.org/docs/3.0/tutorial/vary.html 94 | if (req.http.Accept-Encoding) { 95 | # Do no compress compressed files... 96 | if (req.url ~ "\.(jpg|png|gif|gz|tgz|bz2|tbz|mp3|ogg)$") { 97 | unset req.http.Accept-Encoding; 98 | } elsif (req.http.Accept-Encoding ~ "gzip") { 99 | set req.http.Accept-Encoding = "gzip"; 100 | } elsif (req.http.Accept-Encoding ~ "deflate") { 101 | set req.http.Accept-Encoding = "deflate"; 102 | } else { 103 | unset req.http.Accept-Encoding; 104 | } 105 | } 106 | 107 | # Check the cookies for wordpress-specific items 108 | if (req.http.Cookie ~ "wordpress_" || req.http.Cookie ~ "comment_") { 109 | return (pass); 110 | } 111 | if (!req.http.cookie) { 112 | unset req.http.cookie; 113 | } 114 | 115 | # --- End of WordPress specific configuration 116 | 117 | # Did not cache HTTP authentication and HTTP Cookie 118 | if (req.http.Authorization || req.http.Cookie) { 119 | # Not cacheable by default 120 | return (pass); 121 | } 122 | 123 | # Cache all others requests 124 | return (hash); 125 | } 126 | 127 | sub vcl_pipe { 128 | return (pipe); 129 | } 130 | 131 | sub vcl_pass { 132 | return (fetch); 133 | } 134 | 135 | # The data on which the hashing will take place 136 | sub vcl_hash { 137 | hash_data(req.url); 138 | if (req.http.host) { 139 | hash_data(req.http.host); 140 | } else { 141 | hash_data(server.ip); 142 | } 143 | 144 | # If the client supports compression, keep that in a different cache 145 | if (req.http.Accept-Encoding) { 146 | hash_data(req.http.Accept-Encoding); 147 | } 148 | 149 | if (req.http.X-Forwarded-Proto ~ "https") { 150 | hash_data(req.http.X-Forwarded-Proto); 151 | } 152 | 153 | return (lookup); 154 | } 155 | 156 | # This function is used when a request is sent by our backend (Nginx server) 157 | sub vcl_backend_response { 158 | # Remove some headers we never want to see 159 | unset beresp.http.Server; 160 | unset beresp.http.X-Powered-By; 161 | 162 | # For static content strip all backend cookies 163 | if (bereq.url ~ "\.(css|js|png|gif|jp(e?)g)|swf|ico") { 164 | unset beresp.http.cookie; 165 | set beresp.http.cache-control = "max-age=31536000"; 166 | } 167 | # Don't store backend 168 | if (bereq.url ~ "wp-(login|admin)" || bereq.url ~ "preview=true") { 169 | set beresp.uncacheable = true; 170 | set beresp.ttl = 30s; 171 | return (deliver); 172 | } 173 | 174 | # Only allow cookies to be set if we're in admin area 175 | if (!(bereq.url ~ "(wp-login|wp-admin|preview=true)")) { 176 | unset beresp.http.set-cookie; 177 | } 178 | 179 | # don't cache response to posted requests or those with basic auth 180 | if ( bereq.method == "POST" || bereq.http.Authorization ) { 181 | set beresp.uncacheable = true; 182 | set beresp.ttl = 120s; 183 | return (deliver); 184 | } 185 | 186 | # don't cache search results 187 | if ( bereq.url ~ "\?s=" ){ 188 | set beresp.uncacheable = true; 189 | set beresp.ttl = 120s; 190 | return (deliver); 191 | } 192 | 193 | # only cache status ok 194 | if ( beresp.status != 200 ) { 195 | set beresp.uncacheable = true; 196 | set beresp.ttl = 120s; 197 | return (deliver); 198 | } 199 | 200 | # A TTL of 2h 201 | set beresp.ttl = 2h; 202 | # Define the default grace period to serve cached content 203 | set beresp.grace = 2h; 204 | 205 | return (deliver); 206 | } 207 | 208 | # The routine when we deliver the HTTP request to the user 209 | # Last chance to modify headers that are sent to the client 210 | sub vcl_deliver { 211 | if (obj.hits > 0) { 212 | set resp.http.X-Cache = "cached"; 213 | } else { 214 | set resp.http.x-Cache = "uncached"; 215 | } 216 | 217 | # Remove some headers: PHP version 218 | #unset resp.http.X-Powered-By; 219 | 220 | # Remove some headers: Apache version & OS 221 | #unset resp.http.Server; 222 | 223 | # Remove some heanders: Varnish 224 | #unset resp.http.Via; 225 | #unset resp.http.X-Varnish; 226 | 227 | return (deliver); 228 | } 229 | 230 | sub vcl_init { 231 | return (ok); 232 | } 233 | 234 | sub vcl_fini { 235 | return (ok); 236 | } 237 | -------------------------------------------------------------------------------- /ansible/update.yml: -------------------------------------------------------------------------------- 1 | - hosts: all 2 | remote_user: "{{ newuser }}" 3 | become: yes 4 | vars_files: 5 | - vars.yml 6 | roles: 7 | - reboot 8 | - docker_firewall 9 | - docker_swarm 10 | - blog_site 11 | -------------------------------------------------------------------------------- /ansible/vars.yml: -------------------------------------------------------------------------------- 1 | newuser: jjohanan 2 | work_dir: /var/lib/blog 3 | site_name: ejosh.co 4 | ssh_key: /Users/jjohanan/.ssh/id_ed25519.pub 5 | ansible_device: ansible_eth1 6 | -------------------------------------------------------------------------------- /example.env: -------------------------------------------------------------------------------- 1 | export DO_CLIENT_ID=INSERT_YOUR_OWN_VALUES 2 | export DO_API_KEY=INSERT_YOUR_OWN_VALUES 3 | export AWS_S3_SECRET=INSERT_YOUR_OWN_VALUES 4 | export AWS_S3_ACCESS=INSERT_YOUR_OWN_VALUES 5 | -------------------------------------------------------------------------------- /prod_build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./.env 3 | source ./venv/bin/activate 4 | 5 | #ansible localhost --inventory-file=inventory -m command -a " ssh-keygen -lf <(ssh-keyscan {{inventory_hostname}} 2>/dev/null)" 6 | ansible-playbook --inventory-file=inventory ./ansible/server.yml 7 | ansible-playbook --inventory-file=inventory ./ansible/docker.yml 8 | -------------------------------------------------------------------------------- /prod_update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ./.env 3 | source ./venv/bin/activate 4 | 5 | ansible-playbook --inventory-file=inventory ./ansible/update.yml --skip-tags "initial" 6 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | ansible==2.10.0 2 | -------------------------------------------------------------------------------- /vagrant/Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # All Vagrant configuration is done below. The "2" in Vagrant.configure 5 | # configures the configuration version (we support older styles for 6 | # backwards compatibility). Please don't change it unless you know what 7 | # you're doing. 8 | Vagrant.configure(2) do |config| 9 | # The most common configuration options are documented and commented below. 10 | # For a complete reference, please see the online documentation at 11 | # https://docs.vagrantup.com. 12 | 13 | # Every Vagrant development environment requires a box. You can search for 14 | # boxes at https://atlas.hashicorp.com/search. 15 | config.vm.box = "ubuntu/bionic64" 16 | #config.vm.box = "debian/jessie64" 17 | 18 | # Disable automatic box update checking. If you disable this, then 19 | # boxes will only be checked for updates when the user runs 20 | # `vagrant box outdated`. This is not recommended. 21 | # config.vm.box_check_update = false 22 | 23 | # Create a forwarded port mapping which allows access to a specific port 24 | # within the machine from a port on the host machine. In the example below, 25 | # accessing "localhost:8080" will access port 80 on the guest machine. 26 | config.vm.network "forwarded_port", guest: 80, host: 8080 27 | config.vm.network "forwarded_port", guest: 443, host: 8443 28 | #config.vm.network "forwarded_port", guest: 8000, host: 8000 29 | config.vm.network "forwarded_port", guest: 8001, host: 8001 30 | config.vm.network "forwarded_port", guest: 8080, host: 8081 31 | config.vm.network "forwarded_port", guest: 9200, host: 9200 32 | 33 | # Create a private network, which allows host-only access to the machine 34 | # using a specific IP. 35 | # config.vm.network "private_network", ip: "192.168.33.10" 36 | 37 | # Create a public network, which generally matched to bridged network. 38 | # Bridged networks make the machine appear as another physical device on 39 | # your network. 40 | # config.vm.network "public_network" 41 | 42 | # Share an additional folder to the guest VM. The first argument is 43 | # the path on the host to the actual folder. The second argument is 44 | # the path on the guest to mount the folder. And the optional third 45 | # argument is a set of non-required options. 46 | # config.vm.synced_folder "../data", "/vagrant_data" 47 | 48 | # Provider-specific configuration so you can fine-tune various 49 | # backing providers for Vagrant. These expose provider-specific options. 50 | # Example for VirtualBox: 51 | # 52 | config.vm.provider "virtualbox" do |vb| 53 | # # Display the VirtualBox GUI when booting the machine 54 | # vb.gui = true 55 | # 56 | # # Customize the amount of memory on the VM: 57 | vb.memory = "2048" 58 | end 59 | # 60 | # View the documentation for the provider you are using for more 61 | # information on available options. 62 | 63 | # Define a Vagrant Push strategy for pushing to Atlas. Other push strategies 64 | # such as FTP and Heroku are also available. See the documentation at 65 | # https://docs.vagrantup.com/v2/push/atlas.html for more information. 66 | # config.push.define "atlas" do |push| 67 | # push.app = "YOUR_ATLAS_USERNAME/YOUR_APPLICATION_NAME" 68 | # end 69 | 70 | # Enable provisioning with a shell script. Additional provisioners such as 71 | # Puppet, Chef, Ansible, Salt, and Docker are also available. Please see the 72 | # documentation for more information about their specific syntax and use. 73 | # config.vm.provision "shell", inline: <<-SHELL 74 | # sudo apt-get update 75 | # sudo apt-get install -y apache2 76 | # SHELL 77 | 78 | config.vm.provision "ansible" do |ansible| 79 | ansible.playbook = "../ansible/server.yml" 80 | #ansible.sudo = true 81 | ansible.extra_vars = { ansible_ssh_user: 'ubuntu' } 82 | ansible.host_key_checking = false 83 | end 84 | 85 | end 86 | -------------------------------------------------------------------------------- /vagrant/dev_up.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | source ../.env 3 | source ../venv/bin/activate 4 | 5 | vagrant up 6 | 7 | export ANSIBLE_HOST_KEY_CHECKING=False 8 | ansible-playbook --user=ubuntu --inventory-file=./.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory ../ansible/server.yml --skip-tags "prod" 9 | ansible-playbook --user=ubuntu --inventory-file=./.vagrant/provisioners/ansible/inventory/vagrant_ansible_inventory ../ansible/docker.yml --skip-tags "prod" 10 | 11 | #sudo ssh -p 2222 -gNfL 80:localhost:80 ubuntu@localhost -i .vagrant/machines/default/virtualbox/private_key -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no 12 | echo " 13 | rdr pass inet proto tcp from any to any port 80 -> 127.0.0.1 port 8080 14 | rdr pass inet proto tcp from any to any port 443 -> 127.0.0.1 port 8443 15 | " | sudo pfctl -ef - 16 | --------------------------------------------------------------------------------