├── deploy ├── builds │ └── .gitkeep └── logs │ └── .gitkeep ├── src ├── hadjango │ ├── __init__.py │ ├── uwsgi │ │ ├── __init__.py │ │ ├── bootstrap.py │ │ └── wsgi.py │ └── management │ │ ├── __init__.py │ │ └── commands │ │ ├── __init__.py │ │ └── warmup.py ├── requirements │ ├── hadjango.txt │ └── prod_without_hash.txt └── local_settings.py ├── .gitconfig ├── docker ├── README.md ├── mysql.repo ├── hadjango.repo ├── epel.repo ├── epel-testing.repo ├── start-docker.sh ├── supervisor.conf ├── RPM-GPG-KEY-EPEL-7 ├── RPM-GPG-KEY-hadjango ├── start-nginx.sh └── RPM-GPG-KEY-mysql ├── .dockerignore ├── .gitignore ├── fabfile ├── README.md ├── test.py ├── build_port_convert.py ├── utils.py └── __init__.py ├── wsgi ├── stub.ini ├── uwsgi_status.ini ├── README.md ├── stub.py └── uwsgi_status.py ├── .gitmodules ├── conf └── uwsgi │ ├── vassal.skel │ └── zerg.skel ├── LICENSE.md ├── docker-compose.yml ├── Dockerfile └── README.md /deploy/builds/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /deploy/logs/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/hadjango/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/hadjango/uwsgi/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/hadjango/management/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/hadjango/management/commands/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitconfig: -------------------------------------------------------------------------------- 1 | [user] 2 | email = nobody@nowhere.com 3 | name = Nobody 4 | -------------------------------------------------------------------------------- /src/requirements/hadjango.txt: -------------------------------------------------------------------------------- 1 | uWSGI==2.0.12 2 | futures==3.0.3 3 | requests-futures==0.9.5 4 | -------------------------------------------------------------------------------- /docker/README.md: -------------------------------------------------------------------------------- 1 | These are files that are either copied into docker containers, or are 2 | executed as the entry points in `docker-compose.yml`. 3 | -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .npm 3 | deploy 4 | node_modules 5 | .bash_history 6 | .lesshst 7 | .npmrc 8 | .cache 9 | .ipython 10 | docker/artifacts 11 | *.pyc 12 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | logs/* 2 | .bash_history 3 | .lesshst 4 | .npmrc 5 | .cache 6 | deps/ 7 | *.pid 8 | .ipython 9 | assets/ 10 | deploy/ 11 | docker/artifacts 12 | *.pyc 13 | .npm 14 | -------------------------------------------------------------------------------- /fabfile/README.md: -------------------------------------------------------------------------------- 1 | Defines all of the commands available to fabric (run `fab --list` from the root 2 | of the repository to see a full list). 3 | 4 | All tasks are defined in `fabfile/__init__.py`. 5 | -------------------------------------------------------------------------------- /wsgi/stub.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | plugins = logfile,http,python 3 | master = true 4 | uid = olympia 5 | gid = olympia 6 | http = :1999 7 | processes = 1 8 | wsgi-file = /code/wsgi/stub.py 9 | procname = uwsgi stub 10 | logto = /code/deploy/logs/uwsgi-stub.log 11 | -------------------------------------------------------------------------------- /docker/mysql.repo: -------------------------------------------------------------------------------- 1 | [mysql56-community] 2 | name=MySQL 5.6 Community Server 3 | baseurl=https://s3-us-west-2.amazonaws.com/net-mozaws-prod-us-west-2-ops-rpmrepo-mirror/mysql/5.6/7/$basearch/ 4 | enabled=1 5 | gpgcheck=1 6 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-mysql 7 | -------------------------------------------------------------------------------- /src/requirements/prod_without_hash.txt: -------------------------------------------------------------------------------- 1 | # Pinned versions, equivalent to their editable git installs in 2 | # addons-server/requirements/prod_without_hash.txt, to allow wheel install with 3 | # --no-index 4 | jingo-minify==0.6.0 5 | django-cache-machine==0.9.1 6 | django-mobility==0.1 7 | -------------------------------------------------------------------------------- /wsgi/uwsgi_status.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | plugins = logfile,http,python 3 | master = true 4 | uid = olympia 5 | gid = olympia 6 | http = :9999 7 | processes = 1 8 | wsgi-file = /code/wsgi/uwsgi_status.py 9 | procname = uwsgi status 10 | logto = /code/deploy/logs/uwsgi-status.log 11 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "addons-server"] 2 | path = addons-server 3 | url = https://github.com/hadjango/addons-server.git 4 | [submodule "src/uwsgi-dashboard"] 5 | path = uwsgi-dashboard 6 | url = https://github.com/hadjango/uwsgi-dashboard.git 7 | [submodule "nginx"] 8 | path = nginx 9 | url = https://github.com/hadjango/addons-nginx.git 10 | -------------------------------------------------------------------------------- /conf/uwsgi/vassal.skel: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | plugins = logfile,http,python 3 | build = %c 4 | port = @(exec:///code/fabfile/build_port_convert.py %c) 5 | uid = olympia 6 | gid = olympia 7 | chmod-socket = 666 8 | chown-socket = olympia:olympia 9 | 10 | thunder-lock = true 11 | master = true 12 | stats = /var/run/uwsgi/%(build)-vassal.stats 13 | processes = 0 14 | http = :%(port) 15 | zerg-server = /var/run/uwsgi/%(build).sock 16 | logto = /code/deploy/logs/vassal.%(build).log 17 | 18 | ; Kill request after 15 seconds 19 | http-timeout = 15 20 | -------------------------------------------------------------------------------- /docker/hadjango.repo: -------------------------------------------------------------------------------- 1 | [hadjango] 2 | name=hadjango 3 | baseurl=https://hadjango.github.io/rpms/el/7/$basearch 4 | repo_gpgcheck=0 5 | gpgcheck=1 6 | enabled=1 7 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-hadjango 8 | sslverify=1 9 | sslcacert=/etc/pki/tls/certs/ca-bundle.crt 10 | metadata_expire=300 11 | 12 | [hadjango-source] 13 | name=hadjango-source 14 | baseurl=https://hadjango.github.io/rpms/el/7/SRPMS 15 | repo_gpgcheck=0 16 | gpgcheck=1 17 | enabled=1 18 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-hadjango 19 | sslverify=1 20 | sslcacert=/etc/pki/tls/certs/ca-bundle.crt 21 | metadata_expire=300 22 | -------------------------------------------------------------------------------- /wsgi/README.md: -------------------------------------------------------------------------------- 1 | Contains the uwsgi configuration files (`*.ini`) and wsgi files `*.py` for 2 | simple wsgi applications that start on load. 3 | 4 |
5 |
stub.ini / stub.py
6 |
7 | A “hello world” wsgi application that is the initial upstream for 8 | nginx before `fab init` has been run. 9 |
10 |
uwsgi_status.ini / uwsgi_status.py
11 |
12 | A very simple app, bound on port `:9999`, that merges the output of 13 | the various uwsgi stat sockets at `/var/run/uwsgi/*.stats` and combines 14 | that with information pulled from the process table (see 15 | `src/hadjango/uwsgi/wsgi.py`), outputing the result as json. 16 | This script returns the data that powers the uwsgi dashboard 17 | (at http://live.addons/uwsgi/). 18 |
19 |
20 | -------------------------------------------------------------------------------- /docker/epel.repo: -------------------------------------------------------------------------------- 1 | [epel] 2 | name=Extra Packages for Enterprise Linux 7 - $basearch 3 | #baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch 4 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=$basearch 5 | failovermethod=priority 6 | enabled=1 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 9 | 10 | [epel-debuginfo] 11 | name=Extra Packages for Enterprise Linux 7 - $basearch - Debug 12 | #baseurl=http://download.fedoraproject.org/pub/epel/7/$basearch/debug 13 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-debug-7&arch=$basearch 14 | failovermethod=priority 15 | enabled=0 16 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 17 | gpgcheck=1 18 | 19 | [epel-source] 20 | name=Extra Packages for Enterprise Linux 7 - $basearch - Source 21 | #baseurl=http://download.fedoraproject.org/pub/epel/7/SRPMS 22 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-source-7&arch=$basearch 23 | failovermethod=priority 24 | enabled=0 25 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 26 | gpgcheck=1 -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Laurențiu Păncescu 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a 6 | copy of this software and associated documentation files (the 7 | "Software"), to deal in the Software without restriction, including 8 | without limitation the rights to use, copy, modify, merge, publish, 9 | distribute, sublicense, and/or sell copies of the Software, and to 10 | permit persons to whom the Software is furnished to do so, subject to 11 | the following conditions: 12 | 13 | The above copyright notice and this permission notice shall be included 14 | in all copies or substantial portions of the Software. 15 | 16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS 17 | OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 18 | MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. 19 | IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY 20 | CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, 21 | TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE 22 | SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 23 | -------------------------------------------------------------------------------- /docker/epel-testing.repo: -------------------------------------------------------------------------------- 1 | [epel-testing] 2 | name=Extra Packages for Enterprise Linux 7 - Testing - $basearch 3 | #baseurl=http://download.fedoraproject.org/pub/epel/testing/7/$basearch 4 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-epel7&arch=$basearch 5 | failovermethod=priority 6 | enabled=0 7 | gpgcheck=1 8 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 9 | 10 | [epel-testing-debuginfo] 11 | name=Extra Packages for Enterprise Linux 7 - Testing - $basearch - Debug 12 | #baseurl=http://download.fedoraproject.org/pub/epel/testing/7/$basearch/debug 13 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-debug-epel7&arch=$basearch 14 | failovermethod=priority 15 | enabled=0 16 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 17 | gpgcheck=1 18 | 19 | [epel-testing-source] 20 | name=Extra Packages for Enterprise Linux 7 - Testing - $basearch - Source 21 | #baseurl=http://download.fedoraproject.org/pub/epel/testing/7/SRPMS 22 | mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-source-epel7&arch=$basearch 23 | failovermethod=priority 24 | enabled=0 25 | gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 26 | gpgcheck=1 -------------------------------------------------------------------------------- /wsgi/stub.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """ 3 | A very simple wsgi application, used for the initial upstream before the 4 | environments have been initialized. 5 | """ 6 | 7 | html = """ 8 | 9 | 10 | High Availability Django Demo 11 | 34 | 35 | 36 |

Welcome to the High Availability Django Demo!

37 |

To continue, run:

38 |
fab init
39 |

from the root of the checked out repository. 40 | 41 | """ 42 | 43 | 44 | def application(environ, start_response): 45 | status = '200 OK' 46 | headers = [ 47 | ('Content-Type', 'text/html'), 48 | ('Content-Length', str(len(html))), 49 | ] 50 | start_response(status, headers) 51 | return [html] 52 | -------------------------------------------------------------------------------- /fabfile/test.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import unicode_literals 3 | 4 | from fabric.api import task 5 | 6 | from .utils import docker_exec, build_venv 7 | 8 | 9 | __all__ = ('run_all', 'es', 'failed', 'force_db', 'no_es', 'tdd') 10 | 11 | 12 | def run_tests(name="live", flags='', args=''): 13 | with build_venv(name): 14 | docker_exec("py.test %s src/olympia %s" % (flags, args)) 15 | 16 | 17 | @task 18 | def tdd(name="live", args=''): 19 | """to run the entire test suite, but stop on the first error""" 20 | run_tests(name, "-x --pdb", args) 21 | 22 | 23 | @task 24 | def run_all(name="live", args=''): 25 | """to run the entire test suite""" 26 | run_tests(name, args=args) 27 | 28 | 29 | @task 30 | def es(name="live", args=''): 31 | """to run the ES tests""" 32 | run_tests(name, "-m es_tests", args) 33 | 34 | 35 | @task 36 | def failed(name="live", args=''): 37 | """to rerun the failed tests from the previous run""" 38 | run_tests(name, "--lf", args) 39 | 40 | 41 | @task 42 | def force_db(name="live", args=''): 43 | """to run the entire test suite with a new database""" 44 | run_tests(name, "--create-db", args) 45 | 46 | 47 | @task 48 | def no_es(name="live", args=''): 49 | """to run all but the ES tests""" 50 | run_tests(name, "-m 'no es_tests'", args) 51 | -------------------------------------------------------------------------------- /src/hadjango/uwsgi/bootstrap.py: -------------------------------------------------------------------------------- 1 | """ 2 | A script which preloads most modules in Django. 3 | 4 | This file is executed via execfile() from other files in this directory. 5 | 6 | It is assumed that the original file which is executing this file has set 7 | os.environ['DJANGO_SETTINGS_MODULE'] prior to calling execfile(). 8 | """ 9 | import os 10 | from importlib import import_module 11 | 12 | 13 | os.environ["CELERY_LOADER"] = "django" 14 | 15 | 16 | def run_mgmt_validate(): 17 | import django.core.management 18 | utility = django.core.management.ManagementUtility() 19 | command = utility.fetch_command('runserver') 20 | command.validate(display_num_errors=True) 21 | 22 | 23 | def load_templatetags(): 24 | import_module('django.template.base').get_templatetags_modules() 25 | 26 | 27 | def load_admin(): 28 | import_module('django.contrib.admin').autodiscover() 29 | 30 | 31 | def load_i18n(lang_code): 32 | import_module('django.utils.translation').activate(lang_code) 33 | 34 | 35 | def load_urls(): 36 | import_module('django.core.urlresolvers').resolve('/') 37 | 38 | 39 | def setup(): 40 | import django 41 | 42 | django.setup() 43 | 44 | from django.conf import settings 45 | 46 | load_templatetags() 47 | if 'django.contrib.admin' in settings.INSTALLED_APPS: 48 | load_admin() 49 | load_i18n(settings.LANGUAGE_CODE) 50 | 51 | load_urls() 52 | 53 | run_mgmt_validate() 54 | 55 | 56 | setup() 57 | -------------------------------------------------------------------------------- /docker/start-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | # If running within docker-machine, the current directory will be a mounted 3 | # volume, owned by the user running Docker on the host machine. 4 | # 5 | # In that case, we don't want to trample all over the contents of this 6 | # directory with files owned by root. So we create a new user with the same 7 | # UID, and drop privileges before running any commands. 8 | # 9 | # If we are not running in docker-machine, we create a user with uid 1000 10 | # and execute commands as that user. 11 | 12 | # Get the numeric user ID of the current directory. 13 | uid=$(ls -nd . | awk '{ print $3 }') 14 | gid=$(ls -nd . | awk '{ print $4 }') 15 | 16 | # If the current working directory is owned by root, that means we're running 17 | # with plain-old docker, not docker-machine. 18 | if [ "$uid" == "0" ]; then 19 | uid=1000 20 | gid=1000 21 | fi 22 | 23 | group_name=$(getent group $gid) 24 | 25 | groupadd -g 1000 olympia 26 | 27 | # Create an `olympia` user with that ID, and the current directory 28 | # as its home directory. 29 | if [ -z "$group_name" ]; then 30 | useradd -Md $(pwd) -u $uid -g $gid -G olympia olympia 31 | else 32 | useradd -Md $(pwd) -u $uid -g $gid olympia 33 | fi 34 | 35 | if [ ! -d /deps ]; then 36 | mkdir /deps 37 | chown $uid:$gid /deps 38 | fi 39 | 40 | mkdir -p /var/run/supervisor 41 | chown $uid:$gid /var/run/supervisor 42 | chown $uid:$gid /var/run/uwsgi 43 | 44 | # Switch to that user and execute our actual command. 45 | exec su root -c 'exec "$@"' sh -- "$@" 46 | -------------------------------------------------------------------------------- /docker/supervisor.conf: -------------------------------------------------------------------------------- 1 | [supervisord] 2 | logfile = /code/deploy/logs/supervisord.log 3 | pidfile = /var/run/supervisor/supervisord.pid 4 | 5 | [program:uwsgi_status] 6 | command = /usr/sbin/uwsgi /code/wsgi/uwsgi_status.ini 7 | autorestart = true 8 | redirect_stderr = true 9 | stdout_logfile = /code/deploy/logs/supervisord.uwsgi_status.log 10 | stopsignal = INT 11 | 12 | [program:stub] 13 | command = /usr/sbin/uwsgi /code/wsgi/stub.ini 14 | autorestart = true 15 | redirect_stderr = true 16 | stdout_logfile = /code/deploy/logs/supervisord.stub.log 17 | stopsignal = INT 18 | 19 | [program:olympia_vassals] 20 | command = /usr/sbin/uwsgi --master --thunder-lock --emperor "/code/deploy/builds/[a-z]*/vassal.ini" 21 | autorestart = true 22 | redirect_stderr = true 23 | stdout_logfile = /code/deploy/logs/supervisord.vassals.log 24 | stopsignal = INT 25 | priority = 996 26 | 27 | [program:olympia_zergs] 28 | command = /usr/sbin/uwsgi --master --thunder-lock --emperor "/code/deploy/builds/[a-z]*/zerg.ini" 29 | redirect_stderr = true 30 | stdout_logfile = /code/deploy/logs/supervisord.zergs.log 31 | stopsignal = INT 32 | startsecs = 5 33 | priority = 997 34 | 35 | [group:olympia] 36 | programs = olympia_zergs,olympia_vassals 37 | priority = 998 38 | 39 | # The following sections enable supervisorctl. 40 | 41 | [unix_http_server] 42 | file = /var/run/supervisor/supervisor.sock 43 | 44 | [rpcinterface:supervisor] 45 | supervisor.rpcinterface_factory = supervisor.rpcinterface:make_main_rpcinterface 46 | 47 | [supervisorctl] 48 | serverurl = unix:///var/run/supervisor/supervisor.sock 49 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: '2' 2 | 3 | services: 4 | web: 5 | # build: . 6 | image: hadjango/djangocon-2016-demo 7 | entrypoint: ./docker/start-docker.sh 8 | command: supervisord -n -c /code/docker/supervisor.conf 9 | privileged: true 10 | expose: 11 | - "1999" 12 | - "2000" 13 | - "2010" 14 | - "2020" 15 | - "2030" 16 | - "2040" 17 | - "2050" 18 | - "2060" 19 | - "2070" 20 | - "2080" 21 | - "2090" 22 | - "2100" 23 | - "2110" 24 | - "2120" 25 | - "2130" 26 | - "2140" 27 | - "2150" 28 | - "2160" 29 | - "2170" 30 | - "2180" 31 | - "2190" 32 | - "2200" 33 | - "2210" 34 | - "2220" 35 | - "2230" 36 | - "2240" 37 | - "2250" 38 | - "9999" 39 | volumes: 40 | - .:/code 41 | links: 42 | - memcached 43 | - mysqld 44 | - elasticsearch 45 | - redis 46 | environment: 47 | - PYTHONUNBUFFERED=1 48 | - RECURSION_LIMIT=10000 49 | - TERM=xterm-256color 50 | 51 | nginx: 52 | # build: ./nginx 53 | image: hadjango/addons-nginx 54 | entrypoint: /code/docker/start-nginx.sh 55 | command: nginx -g 'daemon off;' 56 | ports: 57 | - "80" 58 | ports: 59 | - "80:80" 60 | volumes: 61 | - .:/code 62 | links: 63 | - web:web 64 | 65 | memcached: 66 | image: memcached:1.4 67 | 68 | mysqld: 69 | image: mysql:5.6 70 | environment: 71 | - MYSQL_ALLOW_EMPTY_PASSWORD=yes 72 | - MYSQL_DATABASE=olympia 73 | 74 | elasticsearch: 75 | image: elasticsearch:1.6 76 | 77 | redis: 78 | image: redis:2.8 79 | -------------------------------------------------------------------------------- /docker/RPM-GPG-KEY-EPEL-7: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.11 (GNU/Linux) 3 | 4 | mQINBFKuaIQBEAC1UphXwMqCAarPUH/ZsOFslabeTVO2pDk5YnO96f+rgZB7xArB 5 | OSeQk7B90iqSJ85/c72OAn4OXYvT63gfCeXpJs5M7emXkPsNQWWSju99lW+AqSNm 6 | jYWhmRlLRGl0OO7gIwj776dIXvcMNFlzSPj00N2xAqjMbjlnV2n2abAE5gq6VpqP 7 | vFXVyfrVa/ualogDVmf6h2t4Rdpifq8qTHsHFU3xpCz+T6/dGWKGQ42ZQfTaLnDM 8 | jToAsmY0AyevkIbX6iZVtzGvanYpPcWW4X0RDPcpqfFNZk643xI4lsZ+Y2Er9Yu5 9 | S/8x0ly+tmmIokaE0wwbdUu740YTZjCesroYWiRg5zuQ2xfKxJoV5E+Eh+tYwGDJ 10 | n6HfWhRgnudRRwvuJ45ztYVtKulKw8QQpd2STWrcQQDJaRWmnMooX/PATTjCBExB 11 | 9dkz38Druvk7IkHMtsIqlkAOQMdsX1d3Tov6BE2XDjIG0zFxLduJGbVwc/6rIc95 12 | T055j36Ez0HrjxdpTGOOHxRqMK5m9flFbaxxtDnS7w77WqzW7HjFrD0VeTx2vnjj 13 | GqchHEQpfDpFOzb8LTFhgYidyRNUflQY35WLOzLNV+pV3eQ3Jg11UFwelSNLqfQf 14 | uFRGc+zcwkNjHh5yPvm9odR1BIfqJ6sKGPGbtPNXo7ERMRypWyRz0zi0twARAQAB 15 | tChGZWRvcmEgRVBFTCAoNykgPGVwZWxAZmVkb3JhcHJvamVjdC5vcmc+iQI4BBMB 16 | AgAiBQJSrmiEAhsPBgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRBqL66iNSxk 17 | 5cfGD/4spqpsTjtDM7qpytKLHKruZtvuWiqt5RfvT9ww9GUUFMZ4ZZGX4nUXg49q 18 | ixDLayWR8ddG/s5kyOi3C0uX/6inzaYyRg+Bh70brqKUK14F1BrrPi29eaKfG+Gu 19 | MFtXdBG2a7OtPmw3yuKmq9Epv6B0mP6E5KSdvSRSqJWtGcA6wRS/wDzXJENHp5re 20 | 9Ism3CYydpy0GLRA5wo4fPB5uLdUhLEUDvh2KK//fMjja3o0L+SNz8N0aDZyn5Ax 21 | CU9RB3EHcTecFgoy5umRj99BZrebR1NO+4gBrivIfdvD4fJNfNBHXwhSH9ACGCNv 22 | HnXVjHQF9iHWApKkRIeh8Fr2n5dtfJEF7SEX8GbX7FbsWo29kXMrVgNqHNyDnfAB 23 | VoPubgQdtJZJkVZAkaHrMu8AytwT62Q4eNqmJI1aWbZQNI5jWYqc6RKuCK6/F99q 24 | thFT9gJO17+yRuL6Uv2/vgzVR1RGdwVLKwlUjGPAjYflpCQwWMAASxiv9uPyYPHc 25 | ErSrbRG0wjIfAR3vus1OSOx3xZHZpXFfmQTsDP7zVROLzV98R3JwFAxJ4/xqeON4 26 | vCPFU6OsT3lWQ8w7il5ohY95wmujfr6lk89kEzJdOTzcn7DBbUru33CQMGKZ3Evt 27 | RjsC7FDbL017qxS+ZVA/HGkyfiu4cpgV8VUnbql5eAZ+1Ll6Dw== 28 | =hdPa 29 | -----END PGP PUBLIC KEY BLOCK----- 30 | -------------------------------------------------------------------------------- /conf/uwsgi/zerg.skel: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | plugins = cheaper_busyness,logfile,python 3 | uid = olympia 4 | gid = olympia 5 | chmod-socket = 666 6 | chown-socket = olympia:olympia 7 | 8 | ; a, b, etc... 9 | ; (this is based on the name of the current symlink's directory) 10 | build = %c 11 | 12 | log-x-forwarded-for = true 13 | log-format = [%(worker_id)] %(addr) [%(ltime)] "%(method) %(uri) %(proto)" %(status) %(size) "%(referer)" "%(uagent)" %(micros) 14 | 15 | master = true 16 | memory-report = true 17 | thunder-lock = true 18 | 19 | ; Initialize adaptive process spawning (aka the cheaper subsystem) 20 | ; maximum number of workers that can be spawned 21 | workers = 14 22 | ; Use the 'busyness' algorithm 23 | cheaper-algo = busyness 24 | ; tries to keep 4 idle workers 25 | cheaper = 4 26 | ; starts with minimal workers 27 | cheaper-initial = 4 28 | ; spawn at most 1 worker at a time 29 | cheaper-step = 1 30 | ; how many seconds between busyness checks 31 | cheaper-overload = 20 32 | cheaper-busyness-multiplier = 5 33 | ; how many requests are in backlog before quick response triggered 34 | cheaper-busyness-backlog-alert = 33 35 | cheaper-busyness-backlog-step = 1 36 | 37 | auto-procname = true 38 | procname-master = uwsgi master <%(build)> 39 | wsgi-file = /code/deploy/builds/%(build)/hadjango/uwsgi/wsgi.py 40 | python-path = /code/deploy/builds/%(build) 41 | home = /code/deploy/builds/%(build) 42 | buffer-size = 32768 43 | env = PYTHON_EGG_CACHE=/tmp/.%(build)-python-eggs 44 | env = DJANGO_SETTINGS_MODULE=settings 45 | req-logger = file:/code/deploy/logs/access.%(build).log 46 | logger = file:/code/deploy/logs/error.%(build).log 47 | stats = /var/run/uwsgi/%(build).stats 48 | 49 | lazy-apps = true 50 | touch-workers-reload = /code/deploy/builds/%(build)/hadjango/uwsgi/wsgi.py 51 | 52 | ; attach to zerg pool 53 | zerg = /var/run/uwsgi/%(build).sock 54 | -------------------------------------------------------------------------------- /docker/RPM-GPG-KEY-hadjango: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v2.0.22 (GNU/Linux) 3 | 4 | mQENBFeVjuwBCADYR++dX1+50rszQV5OkYB0MTW2bAWypR384yFS4Mt44xtQOEdg 5 | vDZUzs0ky5ABjFcAgal+ET15sNs/g6l++Qqh1/vJ6BzySxNQoQXx+lUqmrOMxKlJ 6 | 3JWdsJ7Xj2L+YCHsNygbRBfFyFJpGy5ZYkMjxAQOojJy1rVYjTbwP1uzMmK1nRAg 7 | uecxPhkiHIE2z70+XNVd2S7pEppH1lhKx1tJMOr0tSP4ByPjw/49RUU19QCVTxOE 8 | CxioODX07O8EM+X+Z5pPHuxHUJv4KaWRmU+ey7XfJBvx7Bqhnx9cY34z+OubeDTJ 9 | hphUHidSxFtGXjvmuPFoftlFSFV7WEsTmLmRABEBAAG0JEZyYW5raWUgRGludGlu 10 | byA8ZmRpbnRpbm9AZ21haWwuY29tPokBOQQTAQIAIwUCV5WO7AIbAwcLCQgHAwIB 11 | BhUIAgkKCwQWAgMBAh4BAheAAAoJECT19zSLizZo6fMH/08laHE6YnijwGYScPmE 12 | HxTMNBN9XT46lvTKrqxzEM94OEGQe8RCa5eDg+JHzqPFdTZG8irUH8mZrfeRqyqj 13 | e/1JnoJkDaJqrKPXjcHYFq7388wzCkfzGIPlV40AcrqmGhsw0+cuXhLCLXsH6Zlp 14 | dOPfWoUQusME6wBwEIbABndeb47pRJV26F4U8cTUXYKlqV1dtjDcJWAkzpwJhVGa 15 | /nquN3OaoXDFt4b0IuBjQUxdw/uANVlmcJdqUxdpvq/n0Eka3+kDk9JF8ZKDZUCh 16 | J/LXJiNU4NsPp5xcqx5ySDMzvQFcpKhUbl2V+saTKdRCojaO2WrwIN0DfLEkI/lc 17 | BUe5AQ0EV5WO7AEIAOz58fp7w7436i9emah0ALr+FmpxlTgwEQg3VeOcUbT+hjvK 18 | yYom+5oPiPzK9R/tS+XUPXu3glYM6el+owR6rcCRQr/RPs+GeFmHxuikKPQqSNhX 19 | Owqw1PZKgaa0bb5DYW8EretmFOzr0tA2W3O6KCf2v9U/tZoruyE+eqPOWFya2bdl 20 | hQuu8ZuuivSSh6kbmWnpn9yv6JWYeDWU28SiOErcmrZdT16cQSXqHALqV38T7oRy 21 | jSjoVs2R+4CfgAE67WlUWDhgEscjjtq1exdXnw4pvNRBiESb6pCzDFk4Dp7yPdrL 22 | AUpZk4xKTrojTKmWSoddzTTzFVC9M++vRnbi6RsAEQEAAYkBHwQYAQIACQUCV5WO 23 | 7AIbDAAKCRAk9fc0i4s2aMG5CACFUgyqIe/PbmMNrtxesSpKf/j6D7G26hmCYPs9 24 | PEfF02/FOBzN4vdO8uXlEuhKwRQKS1qjDukmw9YBveu5+gKcZsKF1vPRfgtCymW+ 25 | o4c8XYBBh7ePVeh1Jc3uIBvtn65MFMnx7ORN0o4UjFDlknvkw8GCebldGe5pcP5C 26 | OAxXnb+glleuYd1dc1d5RZidhIyLxGu1Zecpr4xfu9cBTpEMdNWjJ60uZywhoQmk 27 | TC7omOQ281wdHTsEHj8RRNBgmMbTepvg3NCc01qovQqu1qjE1qgfF/pEwJs6UphH 28 | s0u0cwa71u2c2wTe+weiCBacEZEhfLYDNoqHizfwuJ08fi9v 29 | =Oug9 30 | -----END PGP PUBLIC KEY BLOCK----- 31 | -------------------------------------------------------------------------------- /docker/start-nginx.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # If running within docker-machine, the current directory will be a mounted 3 | # volume, owned by the user running Docker on the host machine. 4 | # 5 | # In that case, we don't want to trample all over the contents of this 6 | # directory with files owned by root. So we create a new user with the same 7 | # UID, and drop privileges before running any commands. 8 | # 9 | # If we are not running in docker-machine, we create a user with uid 1000 10 | # and execute commands as that user. 11 | 12 | # Get the numeric user ID of the current directory. 13 | uid=$(ls -nd /code | awk '{ print $3 }') 14 | gid=$(ls -nd /code | awk '{ print $4 }') 15 | 16 | # If the current working directory is owned by root, that means we're running 17 | # with plain-old docker, not docker-machine. 18 | if [ "$uid" == "0" ]; then 19 | uid=1000 20 | gid=1000 21 | fi 22 | 23 | group_name=$(getent group $gid) 24 | 25 | groupadd -g 1000 olympia 2>&1 2>/dev/null 26 | 27 | # Create an `olympia` user with that ID, and the current directory 28 | # as its home directory. 29 | if [ -z "$group_name" ]; then 30 | useradd -Md $(pwd) -u $uid -g $gid -G olympia olympia 2>&1 2>/dev/null 31 | else 32 | useradd -Md $(pwd) -u $uid -g $gid olympia 2>&1 2>/dev/null 33 | fi 34 | 35 | if [ ! -L /code/deploy/builds/_live ]; then 36 | mkdir -p /code/deploy/tmp/live 37 | mkdir -p /code/deploy/tmp/stage 38 | echo "upstream web { server web:1999; }" > /code/deploy/tmp/live/live.conf; 39 | echo "upstream webstage { server web:1999; }" > /code/deploy/tmp/stage/stage.conf; 40 | 41 | ln -sfvn /code/deploy/tmp/live /code/deploy/builds/_live 42 | ln -sfvn /code/deploy/tmp/stage /code/deploy/builds/_stage 43 | fi 44 | 45 | chown -R $uid:$gid /code/deploy/tmp 46 | chown $uid:$gid /code/deploy/builds/* 47 | 48 | exec su root -c 'exec "$@"' sh -- "$@" 49 | -------------------------------------------------------------------------------- /src/local_settings.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from olympia.lib.settings_base import INSTALLED_APPS 4 | 5 | 6 | INSTALLED_APPS = ('hadjango', ) + INSTALLED_APPS + ('olympia.landfill', ) 7 | 8 | BROKER_URL = 'amqp://olympia:olympia@rabbitmq/olympia' 9 | CELERY_RESULT_BACKEND = 'redis://redis:6379/1' 10 | REDIS_LOCATION = 'redis://redis:6379/0?socket_timeout=0.5' 11 | ES_HOSTS = ['elasticsearch:9200'] 12 | ES_URLS = ['http://%s' % h for h in ES_HOSTS] 13 | SITE_DIR = 'http://olympia.dev' 14 | 15 | CACHES = { 16 | 'default': { 17 | 'BACKEND': 'caching.backends.memcached.MemcachedCache', 18 | 'LOCATION': 'memcached:11211', 19 | } 20 | } 21 | 22 | 23 | ROOT = os.path.realpath(os.path.dirname(__file__)) 24 | 25 | BUILD_NAME = os.path.basename(ROOT) 26 | STATIC_ROOT = os.path.join(ROOT, "assets", "static") 27 | MEDIA_ROOT = os.path.join(ROOT, "assets", "media") 28 | STATIC_URL = "/static/%s/" % BUILD_NAME 29 | 30 | CELERY_ALWAYS_EAGER = True 31 | 32 | DATABASES = { 33 | 'default': { 34 | 'ENGINE': 'django.db.backends.mysql', 35 | 'NAME': 'olympia', 36 | 'USER': 'root', 37 | 'PASSWORD': '', 38 | 'HOST': 'mysqld', 39 | 'PORT': '', 40 | 'OPTIONS': {'sql_mode': 'STRICT_ALL_TABLES'}, 41 | 'TEST_CHARSET': 'utf8', 42 | 'TEST_COLLATION': 'utf8_general_ci', 43 | # Run all views in a transaction unless they are decorated not to. 44 | 'ATOMIC_REQUESTS': True, 45 | # Pool our database connections up for 300 seconds 46 | 'CONN_MAX_AGE': 300, 47 | }, 48 | } 49 | 50 | # A database to be used by the services scripts, which does not use Django. 51 | # The settings can be copied from DATABASES, but since its not a full Django 52 | # database connection, only some values are supported. 53 | SERVICES_DATABASE = { 54 | 'NAME': DATABASES['default']['NAME'], 55 | 'USER': DATABASES['default']['USER'], 56 | 'PASSWORD': DATABASES['default']['PASSWORD'], 57 | 'HOST': DATABASES['default']['HOST'], 58 | 'PORT': DATABASES['default']['PORT'], 59 | } 60 | -------------------------------------------------------------------------------- /src/hadjango/uwsgi/wsgi.py: -------------------------------------------------------------------------------- 1 | import re 2 | import os 3 | import sys 4 | import site 5 | import uwsgi 6 | 7 | 8 | project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))) 9 | 10 | sys.path.insert(0, project_root) 11 | site.addsitedir(os.path.join( 12 | project_root, 13 | "lib", 14 | "python" + sys.version[0:3], 15 | "site-packages")) 16 | 17 | 18 | def get_site_name(): 19 | settings_module = os.environ.get('DJANGO_SETTINGS_MODULE') or '' 20 | matches = re.match(r'settings\.(\w+)\.live', settings_module) 21 | if matches: 22 | site_name = matches.group(1) 23 | else: 24 | site_name = 'unknown' 25 | return site_name 26 | 27 | 28 | site_name = get_site_name() 29 | 30 | 31 | def set_uwsgi_proc_name(): 32 | build_dir = os.path.basename(project_root) 33 | try: 34 | deploy_tag = open(os.path.join(project_root, '.DEPLOY_TAG')).read().strip() 35 | except IOError: 36 | deploy_tag = '?' 37 | 38 | os.environ['DEPLOY_TAG'] = deploy_tag 39 | 40 | uwsgi.setprocname("uwsgi worker %(worker_id)d <%(build)s> [%(tag)s]" % { 41 | 'worker_id': uwsgi.worker_id(), 42 | 'build': build_dir, 43 | 'tag': deploy_tag, 44 | }) 45 | 46 | 47 | set_uwsgi_proc_name() 48 | 49 | execfile(os.path.join(os.path.dirname(__file__), 'bootstrap.py')) 50 | 51 | _application = None 52 | 53 | 54 | def application(environ, start_response): 55 | global _application 56 | 57 | uwsgi.set_logvar('worker_id', str(uwsgi.worker_id())) 58 | 59 | if not os.environ.get('DJANGO_SETTINGS_MODULE'): 60 | os.environ['DJANGO_SETTINGS_MODULE'] = environ.get('DJANGO_SETTINGS_MODULE', 'settings') 61 | 62 | if _application is None: 63 | try: 64 | from django.core.wsgi import get_wsgi_application 65 | except ImportError: 66 | import django.core.handlers.wsgi 67 | _application = django.core.handlers.wsgi.WSGIHandler() 68 | else: 69 | _application = get_wsgi_application() 70 | 71 | return _application(environ, start_response) 72 | -------------------------------------------------------------------------------- /src/hadjango/management/commands/warmup.py: -------------------------------------------------------------------------------- 1 | import random 2 | 3 | from django.conf import settings 4 | from django.core.management.base import BaseCommand 5 | 6 | import requests 7 | from requests_futures.sessions import FuturesSession 8 | 9 | from hadjango.build_port_convert import build_to_port 10 | 11 | 12 | class Command(BaseCommand): 13 | """ 14 | Command that primes processes/cache for a given build 15 | """ 16 | 17 | help = 'Warms up processes/cache for a given host/port' 18 | 19 | def add_arguments(self, parser): 20 | parser.add_argument('--build', '-b', dest='build', 21 | default=settings.BUILD_NAME, help='Build to warm up: (a|b|c|...)') 22 | parser.add_argument('--concurrents', '-c', dest='concurrents', 23 | default='6', help='Number of concurrent requests to make') 24 | 25 | def handle(self, **options): 26 | server = "web" 27 | build = options['build'] 28 | port = build_to_port(build) 29 | max_workers = int(options['concurrents']) 30 | 31 | session = FuturesSession(max_workers=max_workers) 32 | 33 | futures = [] 34 | random_version = random.randint(0, 65535) 35 | for i in range(0, max_workers): 36 | version = '' if i == 0 else '?v=%d%d' % (i, random_version) 37 | futures.append(session.get('http://%(server)s:%(port)d/en-US/firefox/%(version)s' % { 38 | 'server': server, 39 | 'port': port, 40 | 'version': version, 41 | })) 42 | 43 | for i, future in enumerate(futures): 44 | # wait for the response to complete 45 | try: 46 | response = future.result() 47 | except requests.ConnectionError: 48 | status = 'XXX' 49 | time = 0 50 | else: 51 | status = response.status_code 52 | time = int(round(response.elapsed.total_seconds() * 1000.0)) 53 | self.stdout.write("%(server)s: (%(status)s) %(time)6d ms: worker %(i)s\n" % { 54 | 'server': server, 55 | 'status': status, 56 | 'time': time, 57 | 'i': i + 1, 58 | }) 59 | 60 | self.stdout.write("Successfully warmed up.") 61 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | 3 | # Allow scripts to detect we're running in our own container 4 | RUN touch /addons-server-centos7-container 5 | 6 | # Set the locale. This is mainly so that tests can write non-ascii files to 7 | # disk. 8 | ENV LANG en_US.UTF-8 9 | ENV LC_ALL en_US.UTF-8 10 | 11 | ADD docker/RPM-GPG-KEY-mysql /etc/pki/rpm-gpg/RPM-GPG-KEY-mysql 12 | ADD docker/RPM-GPG-KEY-hadjango /etc/pki/rpm-gpg/RPM-GPG-KEY-hadjango 13 | ADD docker/RPM-GPG-KEY-EPEL-7 /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 14 | 15 | ADD docker/epel.repo /etc/yum.repos.d/epel.repo 16 | ADD docker/epel-testing.repo /etc/yum.repos.d/epel-testing.repo 17 | ADD docker/hadjango.repo /etc/yum.repos.d/hadjango.repo 18 | 19 | # For mysql-python dependencies 20 | ADD docker/mysql.repo /etc/yum.repos.d/mysql.repo 21 | 22 | RUN rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-mysql \ 23 | && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-hadjango \ 24 | && rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-EPEL-7 \ 25 | && yum update -y \ 26 | && yum install -y \ 27 | # Supervisor is being used to start and keep our services running 28 | supervisor \ 29 | # General (dev-) dependencies 30 | bash-completion \ 31 | gcc-c++ \ 32 | curl \ 33 | make \ 34 | libjpeg-devel \ 35 | cyrus-sasl-devel \ 36 | libxml2-devel \ 37 | libxslt-devel \ 38 | zlib-devel \ 39 | libffi-devel \ 40 | openssl-devel \ 41 | python-devel \ 42 | # Git, because we're using git-checkout dependencies 43 | git \ 44 | # Nodejs for less, stylus, uglifyjs and others 45 | nodejs \ 46 | # Dependencies for mysql-python 47 | mysql-community-devel \ 48 | mysql-community-client \ 49 | mysql-community-libs \ 50 | epel-release \ 51 | swig \ 52 | python-uwsgidecorators \ 53 | uwsgi-devel \ 54 | uwsgi-logger-file \ 55 | uwsgi-plugin-python \ 56 | uwsgi-plugin-zergpool \ 57 | uwsgi-router-http \ 58 | uwsgi-router-raw \ 59 | uwsgi-router-uwsgi \ 60 | uwsgi-stats-pusher-file \ 61 | uwsgi-stats-pusher-socket \ 62 | uwsgi-plugin-cheaper-busyness \ 63 | python-pip \ 64 | python-setuptools \ 65 | python-virtualenv \ 66 | && yum clean all 67 | 68 | RUN pip install wheel pyOpenSSL ndg-httpsclient pyasn1 certifi urllib3 psutil supervisor fabric \ 69 | && rm -rf /root/.cache 70 | 71 | COPY . /code 72 | WORKDIR /code 73 | 74 | ENV SWIG_FEATURES="-D__x86_64__" 75 | 76 | # Preserve bash history across image updates. 77 | # This works best when you link your local source code 78 | # as a volume. 79 | ENV HISTFILE /code/docker/artifacts/bash_history 80 | 81 | # Configure bash history. 82 | ENV HISTSIZE 50000 83 | ENV HISTIGNORE ls:exit:"cd .." 84 | 85 | # This prevents dupes but only in memory for the current session. 86 | ENV HISTCONTROL erasedups 87 | -------------------------------------------------------------------------------- /fabfile/build_port_convert.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """Convert a build name (e.g. 'a', 'b', etc.) to a port number, or vice-versa""" 3 | from __future__ import print_function 4 | 5 | import os 6 | import sys 7 | 8 | 9 | START_PORT = 2000 10 | PORT_INCREMENT = 10 11 | MAX_PORT = 65535 12 | MAX_PORT_NUMS = (MAX_PORT - START_PORT) // PORT_INCREMENT 13 | 14 | NUM_TO_CHAR = dict([((i), chr(i + ord('a'))) for i in range(0, 26)]) 15 | CHAR_TO_NUM = dict(map(reversed, NUM_TO_CHAR.items())) 16 | 17 | 18 | def base26_decode(string): 19 | num = 0 20 | i = 0 21 | while i < len(string): 22 | char = string[i] 23 | offset = 0 24 | if i == 0: 25 | offset = 1 26 | power = len(string) - i - 1 27 | num += (26 ** power) * (CHAR_TO_NUM[char] + offset) 28 | if offset: 29 | while power > 1: 30 | power -= 1 31 | num += 26 ** power 32 | i += 1 33 | return num - 1 34 | 35 | 36 | def base26_encode(num): 37 | string = '' 38 | num += 1 39 | while num: 40 | num, val = divmod((num - 1), 26) 41 | string = "%s%s" % (NUM_TO_CHAR[val], string) 42 | return string 43 | 44 | 45 | def port_to_build(port): 46 | if port < START_PORT: 47 | usage("Port number must be greater than or equal to %d" % START_PORT) 48 | if port > MAX_PORT: 49 | usage("Port number must be less than %d" % MAX_PORT) 50 | num, index = divmod((port - START_PORT), PORT_INCREMENT) 51 | return (base26_encode(num), index) 52 | 53 | 54 | def build_to_port(name): 55 | if name[-1].isdigit(): 56 | index = int(name[-1]) 57 | name = name[:-1] 58 | else: 59 | index = 0 60 | 61 | num = base26_decode(name) 62 | port = START_PORT + (num * PORT_INCREMENT) + index 63 | return port 64 | 65 | 66 | USAGE = """%(error)s 67 | Usage: %(script_name)s [port|build_name [index]] 68 | 69 | build_name A string composed of lowercase letters, as part of the sequence 70 | a, b, c, ..., z, aa, ab, ac, ..., zz, aaa, ..., %(max_build_name)s 71 | index Optional (defaults to 0), an integer between 0 and %(max_index)s 72 | which is added to the port number for the build_name 73 | port An integer, greater than %(start)s, which is converted into a 74 | build_name and index. 75 | """.strip() 76 | 77 | 78 | def usage(error=""): 79 | if error: 80 | error = "ERROR: %s\n" % error 81 | script_name = os.path.basename(sys.argv[0]) 82 | print(USAGE % { 83 | "script_name": script_name, 84 | "max_build_name": base26_encode(MAX_PORT_NUMS), 85 | "max_index": PORT_INCREMENT, 86 | "start": START_PORT, 87 | "error": error, 88 | }) 89 | sys.exit(1) 90 | 91 | 92 | if __name__ == '__main__': 93 | if len(sys.argv) != 2: 94 | usage() 95 | arg = sys.argv[-1] 96 | if arg.isdigit(): 97 | print("%s %d" % port_to_build(int(arg))) 98 | elif len(arg) > 3: 99 | usage() 100 | else: 101 | print(build_to_port(arg)) 102 | -------------------------------------------------------------------------------- /wsgi/uwsgi_status.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import sys 4 | import re 5 | import errno 6 | import glob 7 | import socket 8 | import json 9 | import logging 10 | 11 | import psutil 12 | 13 | 14 | logging.basicConfig() 15 | logger = logging.getLogger() 16 | 17 | re_procname = re.compile(r"(?i)" 18 | r"uwsgi (?P(?:worker|master))" 19 | r"(?: (?P\d+))? " 20 | r"<(?P[^\.]+)>" 21 | r"(?: \[(?P[^\]]+)\])?") 22 | 23 | 24 | class UwsgiProcess(object): 25 | 26 | def __init__(self, pinfo): 27 | self.pid = pinfo['pid'] 28 | self.status = pinfo['status'] 29 | self.name = pinfo['cmdline'][0] 30 | self.create_time = pinfo['create_time'] 31 | m = re_procname.match(self.name) 32 | if not m: 33 | raise Exception("Bad uwsgi process name") 34 | matches = m.groupdict() 35 | self.is_master = matches['type'] == 'master' 36 | if self.is_master: 37 | self.workers = [] 38 | else: 39 | self.worker_num = int(matches['worker_num']) 40 | self.ppid = pinfo['ppid'] 41 | self.deploy_tag = matches['deploy_tag'] 42 | self.build = matches['build'] 43 | 44 | 45 | def get_uwsgi_procs(): 46 | procs = {} 47 | for proc in psutil.process_iter(): 48 | try: 49 | pinfo = proc.as_dict( 50 | attrs=['pid', 'ppid', 'name', 'cmdline', 'create_time', 'status']) 51 | except psutil.NoSuchProcess: 52 | pass 53 | else: 54 | if not len(pinfo['cmdline']): 55 | continue 56 | if pinfo['cmdline'][0].startswith('u') and pinfo['ppid'] != 1: 57 | if re_procname.match(pinfo['cmdline'][0]): 58 | procs[pinfo['pid']] = UwsgiProcess(pinfo) 59 | 60 | stat_sock_files = glob.glob('/var/run/uwsgi/*.stats') 61 | for stat_sock_file in stat_sock_files: 62 | js = '' 63 | try: 64 | s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 65 | s.connect(stat_sock_file) 66 | 67 | while True: 68 | d = s.recv(4096) 69 | if len(d) < 1: 70 | break 71 | js += d.decode('utf-8') 72 | except IOError as e: 73 | if e.errno == errno.ECONNREFUSED: 74 | logger.error("Connection refused to socket %s" % stat_sock_file) 75 | elif e.errno != errno.EINTR: 76 | raise 77 | continue 78 | except: 79 | raise Exception("Unable to get uwsgi statistics for socket %s" 80 | % stat_sock_file) 81 | 82 | raw_stats = json.loads(js) 83 | if raw_stats['pid'] not in procs: 84 | continue 85 | 86 | workers = raw_stats.pop('workers') 87 | 88 | master_proc = procs[raw_stats['pid']] 89 | master_proc.status = 'active' 90 | master_proc.__dict__.update(raw_stats) 91 | 92 | for worker in workers: 93 | pid = worker['pid'] 94 | if pid in procs: 95 | uwsgi_worker = procs.pop(pid) 96 | uwsgi_worker.__dict__.update(worker) 97 | master_proc.workers.append(uwsgi_worker.__dict__) 98 | master_proc.deploy_tag = uwsgi_worker.deploy_tag 99 | 100 | for pid in reversed(sorted(procs.keys())): 101 | proc = procs[pid] 102 | if not proc.is_master and proc.ppid in procs: 103 | procs[proc.ppid].workers.append(procs.pop(pid).__dict__) 104 | 105 | build_data = {} 106 | for pid in reversed(sorted(procs.keys())): 107 | proc = procs[pid] 108 | build = proc.build 109 | if build not in build_data: 110 | build_data[build] = [] 111 | 112 | build_data[build].append(proc.__dict__) 113 | 114 | data = [] 115 | for build in sorted(build_data.keys()): 116 | data.append({ 117 | "name": build, 118 | "processes": build_data[build], 119 | }) 120 | 121 | return { 122 | "configs": data 123 | } 124 | 125 | 126 | def application(environ, start_response): 127 | status = '200 OK' 128 | output = json.dumps(get_uwsgi_procs()) 129 | headers = [ 130 | ('Content-Type', 'application/json'), 131 | ('Content-Length', str(len(output))) 132 | ] 133 | start_response(status, headers) 134 | return [output] 135 | 136 | 137 | if __name__ == '__main__': 138 | sys.stdout.write(json.dumps(get_uwsgi_procs())) 139 | -------------------------------------------------------------------------------- /fabfile/utils.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import unicode_literals 3 | 4 | from contextlib import contextmanager 5 | import os 6 | import re 7 | 8 | import six 9 | 10 | from fabric.api import env, local 11 | from fabric.context_managers import cd, path, quiet 12 | from fabric.state import win32 13 | 14 | 15 | ROOT_DIR = os.path.dirname(os.path.dirname(__file__)) 16 | 17 | IN_DOCKER = os.path.exists('/addons-server-centos7-container') 18 | 19 | if IN_DOCKER: 20 | DOCKER_NAME = "" 21 | else: 22 | COMPOSE_PROJECT_NAME = os.path.basename(ROOT_DIR.replace('-', '').replace('_', '')) 23 | DOCKER_NAME = "%s_%%(server)s_1" % COMPOSE_PROJECT_NAME 24 | 25 | 26 | def docker_exec(*cmd, **kwargs): 27 | server = kwargs.pop('server', 'web') 28 | root = kwargs.pop('root', False if server == 'web' else True) 29 | default_cwd = "addons-server" if server == "web" else "/" 30 | cwd = env['cwd'] if env['cwd'] else default_cwd 31 | if not cmd[0].startswith('cd '): 32 | cmd = ("cd %s" % cwd, ) + cmd 33 | cmd = " && ".join(cmd) 34 | if server == "web": 35 | cmd = _prefix_env_vars(cmd) 36 | cmd = cmd.replace("'", "'\"'\"'") 37 | if root: 38 | full_cmd = "bash -c '%s'" % cmd 39 | else: 40 | full_cmd = """su olympia -c 'bash -c '"'"'%s'"'"''""" % cmd.replace("'", "'\"'\"'") 41 | if not IN_DOCKER: 42 | container_name = DOCKER_NAME % {'server': server} 43 | full_cmd = "docker exec -t -i %s %s" % (container_name, full_cmd) 44 | if (six.PY2 and isinstance(full_cmd, unicode)): 45 | full_cmd = full_cmd.encode('utf-8') 46 | return local(full_cmd, **kwargs) 47 | 48 | 49 | @contextmanager 50 | def build_venv(name): 51 | old_build_name = env.get('build_name') 52 | env['build_name'] = name 53 | build_dir = "/code/deploy/builds/%s" % name 54 | with cd("%s" % build_dir): 55 | docker_exec("[ -f bin/pip ] || virtualenv . --never-download") 56 | with path(os.path.join(build_dir, 'bin'), behavior='prepend'): 57 | yield 58 | env['build_name'] = old_build_name 59 | 60 | 61 | _find_unsafe = re.compile(r'[a-zA-Z0-9_^@%+=:,./-]').search 62 | 63 | 64 | def quote(s): 65 | """Return a shell-escaped version of the string *s*.""" 66 | if not s: 67 | return "''" 68 | 69 | if _find_unsafe(s) is None: 70 | return s 71 | 72 | # use single quotes, and put single quotes into double quotes 73 | # the string $'b is then quoted as '$'"'"'b' 74 | 75 | return "'" + s.replace("'", "'\"'\"'") + "'" 76 | 77 | 78 | def _shell_escape(string): 79 | """ 80 | Escape double quotes, backticks and dollar signs in given ``string``. 81 | 82 | For example:: 83 | 84 | >>> _shell_escape('abc$') 85 | 'abc\\\\$' 86 | >>> _shell_escape('"') 87 | '\\\\"' 88 | """ 89 | for char in ('"', '$', '`'): 90 | string = string.replace(char, '\%s' % char) 91 | return string 92 | 93 | 94 | def _prefix_env_vars(command, local=False): 95 | """ 96 | Prefixes ``command`` with any shell environment vars, e.g. ``PATH=foo ``. 97 | 98 | Currently, this only applies the PATH updating implemented in 99 | `~fabric.context_managers.path` and environment variables from 100 | `~fabric.context_managers.shell_env`. 101 | 102 | Will switch to using Windows style 'SET' commands when invoked by 103 | ``local()`` and on a Windows localhost. 104 | """ 105 | env_vars = {} 106 | 107 | # path(): local shell env var update, appending/prepending/replacing $PATH 108 | path = env.path 109 | if path: 110 | if env.path_behavior == 'append': 111 | path = '$PATH:\"%s\"' % path 112 | elif env.path_behavior == 'prepend': 113 | path = '\"%s\":$PATH' % path 114 | elif env.path_behavior == 'replace': 115 | path = '\"%s\"' % path 116 | 117 | env_vars['PATH'] = path 118 | 119 | # shell_env() 120 | env_vars.update(env.get('docker_shell_env')) 121 | 122 | if env_vars: 123 | set_cmd, exp_cmd = '', '' 124 | if win32 and local: 125 | set_cmd = 'SET ' 126 | else: 127 | exp_cmd = 'export ' 128 | 129 | exports = ' '.join( 130 | '%s%s="%s"' % (set_cmd, k, v if k == 'PATH' else _shell_escape(v)) 131 | for k, v in env_vars.iteritems() 132 | ) 133 | shell_env_str = '%s%s && ' % (exp_cmd, exports) 134 | else: 135 | shell_env_str = '' 136 | 137 | return "%s%s" % (shell_env_str, command) 138 | 139 | 140 | def dealias_build(name): 141 | if name in ("live", "stage"): 142 | name = "_%s" % name 143 | path = "%s/deploy/builds/%s" % (ROOT_DIR, name) 144 | with quiet(): 145 | return os.path.basename( 146 | docker_exec("cd %s && pwd -P" % path, capture=True)) 147 | -------------------------------------------------------------------------------- /docker/RPM-GPG-KEY-mysql: -------------------------------------------------------------------------------- 1 | -----BEGIN PGP PUBLIC KEY BLOCK----- 2 | Version: GnuPG v1.4.9 (SunOS) 3 | 4 | mQGiBD4+owwRBAC14GIfUfCyEDSIePvEW3SAFUdJBtoQHH/nJKZyQT7h9bPlUWC3 5 | RODjQReyCITRrdwyrKUGku2FmeVGwn2u2WmDMNABLnpprWPkBdCk96+OmSLN9brZ 6 | fw2vOUgCmYv2hW0hyDHuvYlQA/BThQoADgj8AW6/0Lo7V1W9/8VuHP0gQwCgvzV3 7 | BqOxRznNCRCRxAuAuVztHRcEAJooQK1+iSiunZMYD1WufeXfshc57S/+yeJkegNW 8 | hxwR9pRWVArNYJdDRT+rf2RUe3vpquKNQU/hnEIUHJRQqYHo8gTxvxXNQc7fJYLV 9 | K2HtkrPbP72vwsEKMYhhr0eKCbtLGfls9krjJ6sBgACyP/Vb7hiPwxh6rDZ7ITnE 10 | kYpXBACmWpP8NJTkamEnPCia2ZoOHODANwpUkP43I7jsDmgtobZX9qnrAXw+uNDI 11 | QJEXM6FSbi0LLtZciNlYsafwAPEOMDKpMqAK6IyisNtPvaLd8lH0bPAnWqcyefep 12 | rv0sxxqUEMcM3o7wwgfN83POkDasDbs3pjwPhxvhz6//62zQJ7Q2TXlTUUwgUmVs 13 | ZWFzZSBFbmdpbmVlcmluZyA8bXlzcWwtYnVpbGRAb3NzLm9yYWNsZS5jb20+iGkE 14 | ExECACkCGyMGCwkIBwMCBBUCCAMEFgIDAQIeAQIXgAIZAQUCUwHUZgUJGmbLywAK 15 | CRCMcY07UHLh9V+DAKCjS1gGwgVI/eut+5L+l2v3ybl+ZgCcD7ZoA341HtoroV3U 16 | 6xRD09fUgeq0O015U1FMIFBhY2thZ2Ugc2lnbmluZyBrZXkgKHd3dy5teXNxbC5j 17 | b20pIDxidWlsZEBteXNxbC5jb20+iG8EMBECAC8FAk53Pa0oHSBidWlsZEBteXNx 18 | bC5jb20gd2lsbCBzdG9wIHdvcmtpbmcgc29vbgAKCRCMcY07UHLh9bU9AJ9xDK0o 19 | xJFL9vTl9OSZC4lX0K9AzwCcCrS9cnJyz79eaRjL0s2r/CcljdyIZQQTEQIAHQUC 20 | R6yUtAUJDTBYqAULBwoDBAMVAwIDFgIBAheAABIJEIxxjTtQcuH1B2VHUEcAAQGu 21 | kgCffz4GUEjzXkOi71VcwgCxASTgbe0An34LPr1j9fCbrXWXO14msIADfb5piEwE 22 | ExECAAwFAj4+o9EFgwlmALsACgkQSVDhKrJykfIk4QCfWbEeKN+3TRspe+5xKj+k 23 | QJSammIAnjUz0xFWPlVx0f8o38qNG1bq0cU9iEwEExECAAwFAj5CggMFgwliIokA 24 | CgkQtvXNTca6JD+WkQCgiGmnoGjMojynp5ppvMXkyUkfnykAoK79E6h8rwkSDZou 25 | iz7nMRisH8uyiEYEEBECAAYFAj+s468ACgkQr8UjSHiDdA/2lgCg21IhIMMABTYd 26 | p/IBiUsP/JQLiEoAnRzMywEtujQz/E9ono7H1DkebDa4iEYEEBECAAYFAj+0Q3cA 27 | CgkQhZavqzBzTmbGwwCdFqD1frViC7WRt8GKoOS7hzNN32kAnirlbwpnT7a6NOsQ 28 | 83nk11a2dePhiEYEEBECAAYFAkNbs+oACgkQi9gubzC5S1x/dACdELKoXQKkwJN0 29 | gZztsM7kjsIgyFMAnRRMbHQ7V39XC90OIpaPjk3a01tgiEYEExECAAYFAkTxMyYA 30 | CgkQ9knE9GCTUwwKcQCgibak/SwhxWH1ijRhgYCo5GtM4vcAnAhtzL57wcw1Kg1X 31 | m7nVGetUqJ7fiEwEEBECAAwFAkGBywEFgwYi2YsACgkQGFnQH2d7oexCjQCcD8sJ 32 | NDc/mS8m8OGDUOx9VMWcnGkAnj1YWOD+Qhxo3mI/Ul9oEAhNkjcfiEwEEBECAAwF 33 | AkGByzQFgwYi2VgACgkQgcL36+ITtpIiIwCdFVNVUB8xe8mFXoPm4d9Z54PTjpMA 34 | niSPA/ZsfJ3oOMLKar4F0QPPrdrGiEwEEBECAAwFAkGBy2IFgwYi2SoACgkQa3Ds 35 | 2V3D9HMJqgCbBYzr5GPXOXgP88jKzmdbjweqXeEAnRss4G2G/3qD7uhTL1SPT1SH 36 | jWUXiEwEEBECAAwFAkHQkyQFgwXUEWgACgkQfSXKCsEpp8JiVQCghvWvkPqowsw8 37 | w7WSseTcw1tflvkAni+vLHl/DqIly0LkZYn5jzK1dpvfiEwEEBECAAwFAkIrW7oF 38 | gwV5SNIACgkQ5hukiRXruavzEwCgkzL5QkLSypcw9LGHcFSx1ya0VL4An35nXkum 39 | g6cCJ1NP8r2I4NcZWIrqiEwEEhECAAwFAkAqWToFgwd6S1IACgkQPKEfNJT6+GEm 40 | XACcD+A53A5OGM7w750W11ukq4iZ9ckAnRMvndAqn3YTOxxlLPj2UPZiSgSqiEwE 41 | EhECAAwFAkA9+roFgwdmqdIACgkQ8tdcY+OcZZyy3wCgtDcwlaq20w0cNuXFLLNe 42 | EUaFFTwAni6RHN80moSVAdDTRkzZacJU3M5QiEwEEhECAAwFAkEOCoQFgwaWmggA 43 | CgkQOcor9D1qil/83QCeITZ9wIo7XAMjC6y4ZWUL4m+edZsAoMOhRIRi42fmrNFu 44 | vNZbnMGej81viEwEEhECAAwFAkKApTQFgwUj/1gACgkQBA3AhXyDn6jjJACcD1A4 45 | UtXk84J13JQyoH9+dy24714Aniwlsso/9ndICJOkqs2j5dlHFq6oiEwEExECAAwF 46 | Aj5NTYQFgwlXVwgACgkQLbt2v63UyTMFDACglT5G5NVKf5Mj65bFSlPzb92zk2QA 47 | n1uc2h19/IwwrsbIyK/9POJ+JMP7iEwEExECAAwFAkHXgHYFgwXNJBYACgkQZu/b 48 | yM2C/T4/vACfXe67xiSHB80wkmFZ2krb+oz/gBAAnjR2ucpbaonkQQgnC3GnBqmC 49 | vNaJiEwEExECAAwFAkIYgQ4FgwWMI34ACgkQdsEDHKIxbqGg7gCfQi2HcrHn+yLF 50 | uNlH1oSOh48ZM0oAn3hKV0uIRJphonHaUYiUP1ttWgdBiGUEExECAB0FCwcKAwQD 51 | FQMCAxYCAQIXgAUCS3AvygUJEPPzpwASB2VHUEcAAQEJEIxxjTtQcuH1sNsAniYp 52 | YBGqy/HhMnw3WE8kXahOOR5KAJ4xUmWPGYP4l3hKxyNK9OAUbpDVYIh7BDARAgA7 53 | BQJCdzX1NB0AT29wcy4uLiBzaG91bGQgaGF2ZSBiZWVuIGxvY2FsISBJJ20gKnNv 54 | KiBzdHVwaWQuLi4ACgkQOcor9D1qil/vRwCdFo08f66oKLiuEAqzlf9iDlPozEEA 55 | n2EgvCYLCCHjfGosrkrU3WK5NFVgiI8EMBECAE8FAkVvAL9IHQBTaG91bGQgaGF2 56 | ZSBiZWVuIGEgbG9jYWwgc2lnbmF0dXJlLCBvciBzb21ldGhpbmcgLSBXVEYgd2Fz 57 | IEkgdGhpbmtpbmc/AAoJEDnKK/Q9aopfoPsAn3BVqKOalJeF0xPSvLR90PsRlnmG 58 | AJ44oisY7Tl3NJbPgZal8W32fbqgbIkCIgQQAQIADAUCQYHLhQWDBiLZBwAKCRCq 59 | 4+bOZqFEaKgvEACCErnaHGyUYa0wETjj6DLEXsqeOiXad4i9aBQxnD35GUgcFofC 60 | /nCY4XcnCMMEnmdQ9ofUuU3OBJ6BNJIbEusAabgLooebP/3KEaiCIiyhHYU5jarp 61 | ZAh+Zopgs3Oc11mQ1tIaS69iJxrGTLodkAsAJAeEUwTPq9fHFFzC1eGBysoyFWg4 62 | bIjz/zClI+qyTbFA5g6tRoiXTo8ko7QhY2AA5UGEg+83Hdb6akC04Z2QRErxKAqr 63 | phHzj8XpjVOsQAdAi/qVKQeNKROlJ+iq6+YesmcWGfzeb87dGNweVFDJIGA0qY27 64 | pTb2lExYjsRFN4Cb13NfodAbMTOxcAWZ7jAPCxAPlHUG++mHMrhQXEToZnBFE4nb 65 | nC7vOBNgWdjUgXcpkUCkop4b17BFpR+k8ZtYLSS8p2LLz4uAeCcSm2/msJxT7rC/ 66 | FvoH8428oHincqs2ICo9zO/Ud4HmmO0O+SsZdVKIIjinGyOVWb4OOzkAlnnhEZ3o 67 | 6hAHcREIsBgPwEYVTj/9ZdC0AO44Nj9cU7awaqgtrnwwfr/o4V2gl8bLSkltZU27 68 | /29HeuOeFGjlFe0YrDd/aRNsxbyb2O28H4sG1CVZmC5uK1iQBDiSyA7Q0bbdofCW 69 | oQzm5twlpKWnY8Oe0ub9XP5p/sVfck4FceWFHwv+/PC9RzSl33lQ6vM2wIkCIgQT 70 | AQIADAUCQp8KHAWDBQWacAAKCRDYwgoJWiRXzyE+D/9uc7z6fIsalfOYoLN60ajA 71 | bQbI/uRKBFugyZ5RoaItusn9Z2rAtn61WrFhu4uCSJtFN1ny2RERg40f56pTghKr 72 | D+YEt+Nze6+FKQ5AbGIdFsR/2bUk+ZZRSt83e14Lcb6ii/fJfzkoIox9ltkifQxq 73 | Y7Tvk4noKu4oLSc8O1Wsfc/y0B9sYUUCmUfcnq58DEmGie9ovUslmyt5NPnveXxp 74 | 5UeaRc5Rqt9tK2B4A+7/cqENrdZJbAMSunt2+2fkYiRunAFPKPBdJBsY1sxeL/A9 75 | aKe0viKEXQdAWqdNZKNCi8rd/oOP99/9lMbFudAbX6nL2DSb1OG2Z7NWEqgIAzjm 76 | pwYYPCKeVz5Q8R+if9/fe5+STY/55OaI33fJ2H3v+U435VjYqbrerWe36xJItcJe 77 | qUzW71fQtXi1CTEl3w2ch7VF5oj/QyjabLnAlHgSlkSi6p7By5C2MnbCHlCfPnIi 78 | nPhFoRcRGPjJe9nFwGs+QblvS/Chzc2WX3s/2SWm4gEUKRX4zsAJ5ocyfa/vkxCk 79 | SxK/erWlCPf/J1T70+i5waXDN/E3enSet/WL7h94pQKpjz8OdGL4JSBHuAVGA+a+ 80 | dknqnPF0KMKLhjrgV+L7O84FhbmAP7PXm3xmiMPriXf+el5fZZequQoIagf8rdRH 81 | HhRJxQgI0HNknkaOqs8dtrkCDQQ+PqMdEAgA7+GJfxbMdY4wslPnjH9rF4N2qfWs 82 | EN/lxaZoJYc3a6M02WCnHl6ahT2/tBK2w1QI4YFteR47gCvtgb6O1JHffOo2HfLm 83 | RDRiRjd1DTCHqeyX7CHhcghj/dNRlW2Z0l5QFEcmV9U0Vhp3aFfWC4Ujfs3LU+hk 84 | AWzE7zaD5cH9J7yv/6xuZVw411x0h4UqsTcWMu0iM1BzELqX1DY7LwoPEb/O9Rkb 85 | f4fmLe11EzIaCa4PqARXQZc4dhSinMt6K3X4BrRsKTfozBu74F47D8Ilbf5vSYHb 86 | uE5p/1oIDznkg/p8kW+3FxuWrycciqFTcNz215yyX39LXFnlLzKUb/F5GwADBQf+ 87 | Lwqqa8CGrRfsOAJxim63CHfty5mUc5rUSnTslGYEIOCR1BeQauyPZbPDsDD9MZ1Z 88 | aSafanFvwFG6Llx9xkU7tzq+vKLoWkm4u5xf3vn55VjnSd1aQ9eQnUcXiL4cnBGo 89 | TbOWI39EcyzgslzBdC++MPjcQTcA7p6JUVsP6oAB3FQWg54tuUo0Ec8bsM8b3Ev4 90 | 2LmuQT5NdKHGwHsXTPtl0klk4bQk4OajHsiy1BMahpT27jWjJlMiJc+IWJ0mghkK 91 | Ht926s/ymfdf5HkdQ1cyvsz5tryVI3Fx78XeSYfQvuuwqp2H139pXGEkg0n6KdUO 92 | etdZWhe70YGNPw1yjWJT1IhUBBgRAgAMBQJOdz3tBQkT+wG4ABIHZUdQRwABAQkQ 93 | jHGNO1By4fUUmwCbBYr2+bBEn/L2BOcnw9Z/QFWuhRMAoKVgCFm5fadQ3Afi+UQl 94 | AcOphrnJ 95 | =443I 96 | -----END PGP PUBLIC KEY BLOCK----- 97 | 98 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Django UWSGI deploy demo 2 | 3 | This repository demonstrates the principles behind the talk “High-Availability 4 | Django” given by Frankie Dintino of _The Atlantic_ at the 2016 Djangocon. 5 | 6 | In order to simulate the deployment of a large-ish Django project, it uses the 7 | source for [addons.mozilla.org](https://addons.mozilla.org/), [found 8 | here](https://github.com/mozilla/addons-server). 9 | 10 | ## Running 11 | 12 | In order to run this demo VM, Docker, Fabric, and VirtualBox must be 13 | installed. Once installed, the VM can be provisioned by running (from the 14 | root of the checked out repository): 15 | 16 | ```shell 17 | git submodule init 18 | git submodule update 19 | # Skip the next two steps if you don't want docker-machine 20 | docker-machine create --driver virtualbox addons 21 | eval $(docker-machine env addons) 22 | docker-compose up -d 23 | fab init 24 | ``` 25 | 26 | During the `fab init` step you will be prompted once when the database is initialized, 27 | and then again to create the superuser. 28 | 29 | Using `docker-machine` is not required unless you are using the older Docker Toolbox 30 | on Mac (or you have installed it with `brew install docker`). Keep in mind however 31 | that by default, running `docker-compose up` without using a docker-machine VM will 32 | bind to port 80 on your localhost, which means it will need to be unused, so 33 | you may wish to use `docker-machine` even if you don't need to. 34 | Alternatively, if you are using Docker for Mac, you can use 35 | [https://github.com/nlf/dlite](https://github.com/nlf/dlite) to have docker use a 36 | separate bridged network (and thus separate ips for its containers). 37 | 38 | Grab the ip address using the command `fab ip` (unless you are not using `docker-machine` 39 | or `dlite`, in which case it will be `127.0.0.1`), and then add to your `/etc/hosts` files: 40 | 41 | ``` 42 | 192.168.64.7 live.addons stage.addons 43 | ``` 44 | 45 | Replacing `192.168.64.7` with the ip address for docker. 46 | 47 | After running `fab init`, you should be able to visit http://live.addons/ and http://stage.addons/ 48 | to see the two initial builds (note that there may be a delay while the code initializes). 49 | You can view a live-updating uwsgi status dashboard at http://live.addons/uwsgi/ 50 | 51 | ## Creating new builds 52 | 53 | Builds are stored in folders, as part of the sequence {a, b, c, ..., z, aa, ab, ...} 54 | 55 | In many scenarios, builds will cycle between a, b, and c, and the unused build 56 | (the one linked neither to live or stage) can be removed or archived after each build. 57 | 58 | To find the next unused build folder, run `fab find_free_build`. 59 | 60 | Then, to create the build, run, e.g. `fab create_build:z` where “z” should be 61 | whatever build name was returned from `find_free_build`. 62 | 63 | ## Staging and deploying builds 64 | 65 | To set a build as active (which will create a wsgi configuration that 66 | spawns vassal in uWSGI emperor mode), run `fab activate:z`. 67 | 68 | To then stage this build, run `fab stage:z` (again, the string after the “:” 69 | being the build name), which will unlink the current stage build and link the 70 | specified build to stage. 71 | 72 | To “warm up” a build by hitting it with concurrent requests, use the command 73 | `fab warmup:z` (the build name is optional and defaults to the stage build). 74 | 75 | To swap the stage and live builds, run `fab swap_live`. By default, this will 76 | swap live with the stage build, but it is possible to specify a different 77 | build by, e.g. `fab swap_live:z`. 78 | 79 | To spin down an old build (it cannot be the current live or stage builds), run 80 | `fab deactivate:z`. 81 | 82 | ## `fab init` and `fab create_build` 83 | 84 | (Note: This repository's directory will be mounted as `/code` within the docker container, 85 | so when you see paths like `/code/deploy/...` below, keep in mind that this refers also 86 | to `./deploy/...` within this repository.) 87 | 88 | Running `fab init` executes a number of steps to get a stage and live build up and running: 89 | 90 | - Builds all of the requirements into wheels, output to /code/deploy/deps/wheelhouse, which 91 | then allows for faster `pip install` when creating new addons-server builds 92 | - Creates two builds, “a” and “b”, in directories `/code/deploy/builds/{a,b}` 93 | (`fab create_build:a` and `fab create_build:b`). The `create_build` command 94 | does the following: 95 | 96 | - Creates symlinks for the static and media directories of the build so 97 | that nginx static serving works correctly. 98 | 99 | *Note*: All static assets (static files and user-uploaded media) live 100 | outside of the `builds` directory, in `/code/deploy/assets`. This is 101 | to allow for the common use case where static files and user media use 102 | a distinct mount or partition from the rest of the code. In order to 103 | keep all static file urls distinct (beyond whatever staticfile cache busting 104 | is used), the static files for all builds are kept in separate directories, 105 | e.g. `/code/deploy/assets/static/a`. 106 | 107 | - Uses `rsync` to copy the source in the `addons-server` submodule to the 108 | build directory, and copies the contents of `/code/src` on top of that. 109 | - From the build directory, runs `git init` and commits a randomly generated 110 | file. Mozilla’s addons-server uses the git commit hash to cache-bust minified 111 | assets, so this ensures that this step won’t fail and that the hash will be 112 | unique for each build. 113 | - Runs `npm install` as well as `pip install` with the requirements files, using 114 | the `--no-index` flag and our wheeldir of `code/deploy/deps/wheelhouse` to 115 | keep this speedy. 116 | - Runs `manage.py collectstatic` and `manage.py compress_assets` (the latter being a 117 | jingo-minify management command specific to the mozilla addons-server project) 118 | from the build’s virtualenv. 119 | - Determines what port number corresponds to the current build name, and creates 120 | `/code/deploy/builds/a/live.conf` and `/code/deploy/builds/a/stage.conf`, which 121 | are nginx configuration files that specify the upstream. This will only come 122 | into play if the build gets designated as the stage or live build (since nginx 123 | includes `/code/deploy/builds/_live/live.conf` and `/code/deploy/builds/_stage/stage.conf`). 124 | - Creates symlinks to the uwsgi skeleton file `/code/conf/uwsgi/vassal.skel` at 125 | `/code/deploy/builds/{a,b}/vassal.ini`. This step causes the uWSGI emperor to spawn 126 | a vassal that will manage the zerg instances. At this point the application on the 127 | ``web`` container will be listening on ports `2000` and `2010` (for the a and b builds, 128 | respectively), but because there are no zerg instances attached to the vassal 129 | http requests will not yet route. 130 | 131 | - After the builds “a” and “b” are created and linked, respectively, to 132 | `/code/deploy/builds/_live` and `/code/deploy/builds/_stage`, 133 | `fab init` initiates the database and populates it with sample test data, 134 | executed using the manage.py in the `_live` (read “a”) build’s virtualenv. 135 | - It then symlinks to the uwsgi skeleton file `/code/conf/uwsgi/zerg.skel` from 136 | `/code/deploy/builds/{a,b}/zerg.ini`. This “activates” (`fab activate:a`) the 137 | build, spawning zerg instances that attach to the vassal. 138 | - Lastly, `nginx -s reload` is run on the nginx container, updating the upstreams. 139 | 140 | ## Troubleshooting 141 | 142 | * Problem: You encounter the error “Error response from daemon: client is newer than server (client API version: 1.24, server API version: 1.22)” 143 | * Solution: `export DOCKER_API_VERSION=1.22` 144 | -------------------------------------------------------------------------------- /fabfile/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import unicode_literals, print_function 3 | 4 | from distutils.spawn import find_executable 5 | from glob import glob 6 | import socket 7 | import time 8 | import os 9 | import re 10 | 11 | from fabric.api import abort, cd, env, hide, lcd, local, task, settings, quiet 12 | 13 | from .build_port_convert import build_to_port, base26_encode 14 | from .utils import docker_exec, build_venv, dealias_build 15 | # from . import test # noqa 16 | 17 | 18 | ROOT_DIR = os.path.dirname(os.path.dirname(__file__)) 19 | 20 | NUM_ADDONS = NUM_THEMES = 10 21 | 22 | env.docker_shell_env = { 23 | 'NPM_CONFIG_PREFIX': '/code/deploy/deps/', 24 | 'PIP_CACHE_DIR': '/code/deploy/deps/cache/', 25 | 'LANG': 'en_US.UTF-8', 26 | 'LC_ALL': 'en_US.UTF-8', 27 | } 28 | 29 | 30 | @task 31 | def init(): 32 | """to initialize a docker image""" 33 | build_wheels() 34 | docker_exec("mkdir -p /code/deploy/assets/{static,media}") 35 | create_build("a") 36 | create_build("b") 37 | docker_exec( 38 | "ln -sfvn a /code/deploy/builds/_live", 39 | "ln -sfvn b /code/deploy/builds/_stage") 40 | initialize_db() 41 | populate_data() 42 | activate("a") 43 | activate("b") 44 | nginx_reload() 45 | 46 | 47 | @task 48 | def build_wheels(): 49 | """Build the wheels for all dependencies""" 50 | tmp_dir = docker_exec("mktemp -d", capture=True) 51 | with cd(tmp_dir): 52 | docker_exec( 53 | "cp /code/addons-server/requirements/*.txt .", 54 | "cp /code/src/requirements/hadjango.txt .", 55 | "mkdir -p /code/deploy/deps/wheelhouse", 56 | r"perl -pi -e 's/^ .+$//g;s/ \\//g;s/setuptools==23\.0\.0/setuptools==25\.1\.1/g;' *.txt", 57 | "pip wheel -f /code/deploy/deps/wheelhouse --wheel-dir=/code/deploy/deps/wheelhouse --no-deps -r dev.txt", 58 | "pip wheel -f /code/deploy/deps/wheelhouse --wheel-dir=/code/deploy/deps/wheelhouse --no-deps -r docs.txt", 59 | "pip wheel -f /code/deploy/deps/wheelhouse --wheel-dir=/code/deploy/deps/wheelhouse --no-deps -r hadjango.txt", 60 | ("pip wheel -f /code/deploy/deps/wheelhouse --wheel-dir=/code/deploy/deps/wheelhouse --no-deps -r" 61 | " <(perl -pe 's/^\-e //g;' prod_without_hash.txt)")) 62 | 63 | 64 | @task 65 | def create_build(name): 66 | """Creates a new build of the Mozilla addons-server""" 67 | symlink_static_dirs(name) 68 | copy_src(name) 69 | git_init(name) 70 | with build_venv(name): 71 | npm_install(name) 72 | pip_install(name) 73 | build_assets(name) 74 | port = build_to_port(name) 75 | docker_exec( 76 | 'echo "upstream web { server web:%d; }" > live.conf' % port, 77 | 'echo "upstream webstage { server web:%d; }" > stage.conf' % port, 78 | 'date +%FT%T > .DEPLOY_TAG') 79 | docker_exec("ln -svfn /code/conf/uwsgi/vassal.skel vassal.ini") 80 | 81 | 82 | def symlink_static_dirs(name): 83 | """ 84 | Creates symlinks for the static and media directories of build ``name`` so 85 | that nginx static serving works correctly. 86 | 87 | The directory './deploy/assets/static/%(name)s' will be created, along with 88 | the following symlink 89 | 90 | - ./deploy/builds/%(name)s/assets/static -> ./deploy/assets/static/%(name)s 91 | """ 92 | build_dir = "/code/deploy/builds/%s" % name 93 | static_dir = "/code/deploy/assets/static/%s" % name 94 | link_dir = "%s/assets/static" % build_dir 95 | docker_exec( 96 | "mkdir -p %s" % static_dir, 97 | "mkdir -p %s/assets" % build_dir, 98 | "ln -sfvn /code/deploy/assets/media %s/assets/media" % build_dir, 99 | "[ -L %(link_dir)s ] || ln -s %(static_dir)s %(link_dir)s" % { 100 | 'link_dir': link_dir, 101 | 'static_dir': static_dir, 102 | }) 103 | 104 | for d in glob("%s/deploy/assets/static/*" % ROOT_DIR): 105 | link_dir = "/code/deploy/assets/static/%s/%s" % (os.path.basename(d), name) 106 | docker_exec("[ -L %(link_dir)s ] || ln -s %(static_dir)s %(link_dir)s" % { 107 | 'link_dir': link_dir, 108 | 'static_dir': static_dir, 109 | }) 110 | 111 | 112 | def copy_src(name): 113 | build_dir = "/code/deploy/builds/%s" % name 114 | docker_exec( 115 | "git ls-files" 116 | " | perl -pne 's/\"//g; s/e\\\\314\\\\201/é/g;'" # Fix jétpack.xpi path 117 | " | rsync -a --info=progress2 --files-from=- . %s" % build_dir) 118 | docker_exec("cp /code/src/local_settings.py %s" % build_dir) 119 | with build_venv(name): 120 | docker_exec("cp -R /code/src/hadjango .") 121 | docker_exec("cp /code/src/requirements/*.txt requirements") 122 | docker_exec("cp /code/fabfile/build_port_convert.py hadjango") 123 | docker_exec( 124 | r"perl -pi -e " 125 | r"'s/^ .+$//g;s/ \\//g;" 126 | r"s/setuptools==23\.0\.0/setuptools==25\.1\.1/g;' " 127 | "requirements/*.txt") 128 | 129 | 130 | def git_init(name): 131 | """Create fake git repo so that jingo minify can cache bust images""" 132 | if not os.path.exists("%s/deploy/builds/%s/.git" % (ROOT_DIR, name)): 133 | with build_venv(name): 134 | docker_exec( 135 | 'git init', 136 | 'cat /dev/urandom | head -c256 > .random', 137 | 'git add .random', 138 | 'git commit -m "Initial commit"') 139 | 140 | 141 | def pip_install(name): 142 | build_wheels() 143 | args = "-f /code/deploy/deps/wheelhouse --no-index --no-deps" 144 | with build_venv(name): 145 | olympia_egg_link = ( 146 | "/code/deploy/builds/%s/lib/python2.7/site-packages/olympia.egg-link" % name) 147 | docker_exec(( 148 | '[ -f %(egg_link)s ]' 149 | ' || pip install %(args)s -e .') % {'egg_link': olympia_egg_link, 'args': args}) 150 | docker_exec( 151 | "pip install %s -r requirements/dev.txt" % args, 152 | "pip install %s -r requirements/docs.txt" % args, 153 | "pip install %s -r requirements/prod_without_hash.txt" % args, 154 | "pip install %s -r requirements/hadjango.txt" % args) 155 | 156 | 157 | def npm_install(name="_live"): 158 | if os.path.exists(os.path.join(ROOT_DIR, 'deploy', 'builds', name, 'node_modules')): 159 | return 160 | with build_venv(name): 161 | # npm install has a bug when run inside docker on a mounted volume 162 | # (see ). So we run npm 163 | # install in a temp directory, then move it back. 164 | tmp_dir = docker_exec("mktemp -d", capture=True) 165 | docker_exec("cp package.json %s" % tmp_dir) 166 | with cd(tmp_dir): 167 | docker_exec("npm install") 168 | docker_exec("mv %s/node_modules ." % tmp_dir) 169 | 170 | 171 | def initialize_db(): 172 | """to create a new database""" 173 | with build_venv("_live"): 174 | docker_exec( 175 | "python manage.py reset_db", 176 | "python manage.py syncdb --noinput", 177 | "python manage.py loaddata initial.json", 178 | "python manage.py import_prod_versions", 179 | "schematic --fake src/olympia/migrations/", 180 | "python manage.py createsuperuser", 181 | "python manage.py loaddata zadmin/users") 182 | 183 | 184 | def populate_data(): 185 | """to populate a new database""" 186 | with build_venv("_live"): 187 | docker_exec( 188 | # reindex --wipe will force the ES mapping to be re-installed. Useful to 189 | # make sure the mapping is correct before adding a bunch of add-ons. 190 | "python manage.py reindex --wipe --force --noinput", 191 | "python manage.py generate_addons --app firefox %s" % NUM_ADDONS, 192 | "python manage.py generate_addons --app thunderbird %s" % NUM_ADDONS, 193 | "python manage.py generate_addons --app android %s" % NUM_ADDONS, 194 | "python manage.py generate_addons --app seamonkey %s" % NUM_ADDONS, 195 | "python manage.py generate_themes %s" % NUM_THEMES, 196 | # Now that addons have been generated, reindex. 197 | "python manage.py reindex --force --noinput", 198 | # Also update category counts (denormalized field) 199 | "python manage.py cron category_totals") 200 | 201 | 202 | def build_assets(name): 203 | with build_venv(name): 204 | docker_exec( 205 | "python manage.py compress_assets", 206 | "python manage.py collectstatic --noinput") 207 | 208 | 209 | @task 210 | def swap_live(name=None): 211 | """Swap the specified build (default: _stage) with _live""" 212 | if name is None: 213 | name = dealias_build("_stage") 214 | live = dealias_build("_live") 215 | with cd("/code/deploy/builds"): 216 | docker_exec( 217 | "ln -snvf %s _stage" % live, 218 | "ln -snvf %s _live" % name) 219 | nginx_reload() 220 | 221 | 222 | @task 223 | def stage(name): 224 | """Stage the designated build""" 225 | live = dealias_build("_live") 226 | stage = dealias_build("_stage") 227 | if name == live: 228 | abort("Cannot stage the live build; use 'fab swap_live' instead") 229 | if name == stage: 230 | abort("Build %s is already staged" % name) 231 | if not os.path.islink("%s/deploy/builds/%s/zerg.ini" % (ROOT_DIR, name)): 232 | activate(name) 233 | time.sleep(10) 234 | warmup(name) 235 | with cd("/code/deploy/builds"): 236 | docker_exec("ln -snvf %s _stage" % name) 237 | 238 | 239 | @task 240 | def activate(name): 241 | """Start uWSGI zerg instances for a build""" 242 | with build_venv(name): 243 | if not os.path.islink("%s/deploy/builds/%s/vassal.ini" % (ROOT_DIR, name)): 244 | docker_exec("ln -svfn /code/conf/uwsgi/vassal.skel vassal.ini") 245 | time.sleep(3) 246 | docker_exec("ln -svfn /code/conf/uwsgi/zerg.skel zerg.ini") 247 | # Wait for the emperor to poll again 248 | time.sleep(3) 249 | 250 | 251 | @task 252 | def deactivate(name): 253 | """Stop uWSGI zerg instance for a build""" 254 | if not os.path.islink("%s/deploy/builds/%s/zerg.ini" % (ROOT_DIR, name)): 255 | return 256 | with build_venv(name): 257 | docker_exec("rm -f zerg.ini") 258 | time.sleep(3) 259 | 260 | 261 | @task 262 | def stop(name): 263 | """Stop all uwsgi vassals and zergs for a build""" 264 | deactivate(name) 265 | if not os.path.islink("%s/deploy/builds/%s/vassal.ini" % (ROOT_DIR, name)): 266 | return 267 | with build_venv(name): 268 | docker_exec("rm -f vassal.ini") 269 | time.sleep(3) 270 | 271 | 272 | @task 273 | def warmup(name="_stage"): 274 | """Warm up a build, to be used before swapping to live""" 275 | with build_venv(name): 276 | docker_exec("python manage.py warmup") 277 | 278 | 279 | @task 280 | def nginx_reload(): 281 | """Reload nginx config""" 282 | docker_exec("nginx -s reload", server="nginx") 283 | 284 | 285 | @task 286 | def ip(): 287 | """The ip where the addons site can be accessed""" 288 | docker_host = os.environ.get("DOCKER_HOST") or "" 289 | if not docker_host: 290 | with quiet(): 291 | docker_env = local("docker-machine env addons", capture=True) 292 | if docker_env: 293 | match = re.search(r'DOCKER_HOST="(tcp://[^"]+?)"', docker_env) 294 | if match: 295 | docker_host = match.group(1) 296 | 297 | match = re.search(r'tcp://([^:]+):', docker_host) 298 | if match: 299 | print(match.group(1)) 300 | else: 301 | try: 302 | # host used by dlite 303 | _, _, ips = socket.gethostbyname_ex("local.docker") 304 | except: 305 | abort("Could not determine docker-machine host; perhaps localhost?") 306 | else: 307 | print(ips[0]) 308 | 309 | 310 | @task 311 | def find_free_build(): 312 | """Find what the next free build directory name is.""" 313 | current_builds = set([os.path.basename(p) for p in glob("%s/deploy/builds/[a-z]*" % ROOT_DIR)]) 314 | i = 0 315 | while True: 316 | build = base26_encode(i) 317 | if build not in current_builds: 318 | print(build) 319 | return 320 | i += 1 321 | 322 | 323 | @task 324 | def rm(name): 325 | """Delete a build""" 326 | live_name = dealias_build("_live") 327 | stage_name = dealias_build("_stage") 328 | if name in (live_name, stage_name): 329 | abort("Cannot delete the current live or stage build") 330 | stop(name) 331 | docker_exec("rm -rf /code/deploy/builds/%s" % name) 332 | 333 | 334 | @task 335 | def djshell(name="_live"): 336 | """Connect to a running addons-server django shell""" 337 | with build_venv(name): 338 | docker_exec("python manage.py shell") 339 | 340 | 341 | @task 342 | def shell(server='web'): 343 | """Connect to a running addons-server docker shell""" 344 | cwd = '/code' if server == 'web' else '/' 345 | with cd(cwd): 346 | docker_exec("bash", root=True, server=server) 347 | 348 | 349 | @task 350 | def make_incremental_build(name, basis="live"): 351 | """ 352 | Archive a build, hard-linking unchanged files from the "basis" build (default live) 353 | 354 | This can significantly reduce the disk space used by multiple builds. 355 | 356 | On mac, requires ``brew install coreutils`` 357 | """ 358 | cp_bin = find_executable('gcp') 359 | if find_executable('gcp'): 360 | cp_bin = "gcp" 361 | else: 362 | cp_bin = "cp" 363 | 364 | live_name = dealias_build("_live") 365 | stage_name = dealias_build("_stage") 366 | if name in (live_name, stage_name): 367 | abort("Cannot turn the live or stage build into an incremental build") 368 | 369 | basis = dealias_build(basis) 370 | 371 | stop(name) 372 | 373 | with lcd("%s/deploy/builds" % ROOT_DIR): 374 | local("mv %(name)s %(name)s~" % {'name': name}) 375 | with settings(hide("stderr"), warn_only=True): 376 | cp_ret = local("%(cp_bin)s -al %(basis)s %(name)s" % { 377 | 'basis': basis, 378 | 'name': name, 379 | 'cp_bin': cp_bin, 380 | }) 381 | if not cp_ret.succeeded: 382 | local("mv %(name)s~ %(name)s" % {'name': name}) 383 | abort("Local cp bin does not support -l flag (on mac: brew install coreutils)") 384 | local("rsync -acH --delete %(name)s~/ %(name)s" % {'name': name}) 385 | local("rm -rf %(name)s~" % {'name': name}) 386 | --------------------------------------------------------------------------------