├── src ├── myaas │ ├── __init__.py │ ├── utils │ │ ├── __init__.py │ │ ├── filesystem.py │ │ ├── retry.py │ │ ├── socket.py │ │ ├── database.py │ │ ├── container.py │ │ └── btrfs.py │ ├── backends │ │ ├── __init__.py │ │ ├── exceptions.py │ │ ├── postgres.py │ │ ├── mysql.py │ │ └── base.py │ ├── settings.py │ ├── update.py │ ├── reaper.py │ └── server.py ├── .dockerignore ├── nginx │ ├── Dockerfile │ └── nginx.conf ├── requirements-dev.txt ├── gui │ ├── Makefile │ ├── Dockerfile │ ├── docker-compose.yml │ ├── bower.json │ ├── index.html │ └── main.js ├── requirements.txt ├── runserver.py ├── docker-entrypoint.sh ├── docker-compose.yml ├── Dockerfile ├── Makefile ├── gunicorn.conf.py └── README.md ├── .gitignore ├── fabfile ├── requirements.txt ├── README.md ├── __init__.py ├── taskset │ └── __init__.py └── db.py ├── extras ├── mysql-proxy │ ├── Dockerfile │ ├── README.md │ └── entrypoint.sh └── mysql │ ├── custom-entrypoint.sh │ ├── README.md │ ├── mariadb.cnf │ ├── Dockerfile │ ├── configure-memory.sh │ └── myaas.cnf ├── README.md └── LICENSE /src/myaas/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/myaas/utils/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/myaas/backends/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.pyc 3 | tags 4 | -------------------------------------------------------------------------------- /src/.dockerignore: -------------------------------------------------------------------------------- 1 | **/__pycache__ 2 | *.pyc 3 | Dockerfile 4 | .dockerignore 5 | -------------------------------------------------------------------------------- /src/nginx/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM nginx:1.11.9 2 | 3 | COPY nginx.conf /etc/nginx/nginx.conf -------------------------------------------------------------------------------- /src/requirements-dev.txt: -------------------------------------------------------------------------------- 1 | flake8 2 | nose 3 | rednose 4 | ipython 5 | ipdb 6 | ipdbplugin 7 | unittest2 8 | mock >= 1.0.1 9 | -------------------------------------------------------------------------------- /src/myaas/utils/filesystem.py: -------------------------------------------------------------------------------- 1 | from os.path import getsize 2 | 3 | 4 | def is_empty(path): 5 | return not getsize(path) > 100 6 | -------------------------------------------------------------------------------- /fabfile/requirements.txt: -------------------------------------------------------------------------------- 1 | fabric >= 1.8.3 2 | paramiko >= 1.10.1 3 | requests 4 | python-dotenv>=0.4.0 5 | tabulate==0.7.5 6 | ago==0.0.7 7 | 8 | -------------------------------------------------------------------------------- /src/gui/Makefile: -------------------------------------------------------------------------------- 1 | deps: 2 | docker-compose run --rm npm install && docker-compose run --rm bower install 3 | dev: 4 | python -m SimpleHTTPServer 5 | -------------------------------------------------------------------------------- /extras/mysql-proxy/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM gliderlabs/alpine:3.3 2 | 3 | RUN apk add --no-cache bash socat jq curl netcat-openbsd 4 | 5 | COPY entrypoint.sh / 6 | 7 | ENTRYPOINT ["/entrypoint.sh"] 8 | 9 | EXPOSE 3306 10 | -------------------------------------------------------------------------------- /src/requirements.txt: -------------------------------------------------------------------------------- 1 | Flask>=0.12.3,<1 2 | requests>=2.20.0,<3 3 | docker-py==1.8.1 4 | PyMySQL==0.6.4 5 | psycopg2==2.7.1 6 | gunicorn>=19.5.0,<20 7 | python-decouple==3.0 8 | sh 9 | click 10 | sentry_sdk[flask] 11 | -------------------------------------------------------------------------------- /src/gui/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM node:7 2 | 3 | WORKDIR /code 4 | 5 | COPY bower.json . 6 | 7 | RUN npm install bower && \ 8 | /code/node_modules/bower/bin/bower --allow-root install && \ 9 | rm -rf node_modules 10 | 11 | COPY . /code 12 | 13 | ENTRYPOINT python -m SimpleHTTPServer 80 14 | -------------------------------------------------------------------------------- /extras/mysql/custom-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # original entrypoint skips running custom scripts if datadir exists 4 | # but we want to execute this script every time the container is started 5 | . /docker-entrypoint-initdb.d/configure-memory.sh 6 | 7 | # now execute original entrypoint as ususal 8 | docker-entrypoint.sh $@ 9 | -------------------------------------------------------------------------------- /src/gui/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | 4 | nodebase: 5 | image: node 6 | working_dir: "/opt" 7 | ports: 8 | - "8080:8080" 9 | volumes: 10 | - ".:/opt" 11 | 12 | npm: 13 | extends: nodebase 14 | entrypoint: npm 15 | 16 | bower: 17 | extends: nodebase 18 | entrypoint: /opt/node_modules/bower/bin/bower --allow-root 19 | -------------------------------------------------------------------------------- /src/runserver.py: -------------------------------------------------------------------------------- 1 | from myaas.server import app 2 | from myaas.settings import DEBUG, SENTRY_DSN 3 | 4 | app.debug = DEBUG 5 | 6 | import sentry_sdk 7 | from sentry_sdk.integrations.flask import FlaskIntegration 8 | 9 | sentry_sdk.init( 10 | dsn=SENTRY_DSN, 11 | integrations=[FlaskIntegration()] 12 | ) 13 | 14 | if __name__ == '__main__': 15 | app.run(host='0.0.0.0', port=5001) 16 | -------------------------------------------------------------------------------- /extras/mysql/README.md: -------------------------------------------------------------------------------- 1 | # Optimized MySQL image for running with myaas 2 | 3 | Available on docker hub as `habitissimo/habitissimo/myaas-mysql`. 4 | 5 | * UTF-8 (unicode ci) by default 6 | * Small query cache 7 | * Small InnoDB pool (but bigger than default) 8 | * InnoDB optimized for SSD 9 | * No double write (no ACID guarantees) 10 | * No native AIO (causes problems on COW filesystems, at least on btrfs) 11 | -------------------------------------------------------------------------------- /src/docker-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | case $1 in 4 | "update") 5 | shift 6 | python -m myaas.update $@ 7 | exit 0 8 | ;; 9 | "reaper") 10 | shift 11 | python -m myaas.reaper $@ 12 | exit 0 13 | ;; 14 | esac 15 | 16 | if [ -z $MYAAS_HOSTNAME ]; then 17 | >&2 echo "" 18 | >&2 echo "MYAAS_HOSTNAME environment variable is not set" 19 | exit 1 20 | fi 21 | 22 | exec "$@" 23 | -------------------------------------------------------------------------------- /src/myaas/backends/exceptions.py: -------------------------------------------------------------------------------- 1 | class DBTimeoutException(Exception): 2 | pass 3 | 4 | 5 | class NonExistentDatabase(Exception): 6 | pass 7 | 8 | 9 | class NonExistentTemplate(NonExistentDatabase): 10 | pass 11 | 12 | 13 | class ImportInProgress(Exception): 14 | pass 15 | 16 | 17 | class ImportDataError(Exception): 18 | pass 19 | 20 | 21 | class NotReachableException(Exception): 22 | pass 23 | 24 | 25 | class ContainerRunning(Exception): 26 | pass 27 | -------------------------------------------------------------------------------- /extras/mysql/mariadb.cnf: -------------------------------------------------------------------------------- 1 | # MariaDB-specific config file. 2 | # Read by /etc/mysql/my.cnf 3 | 4 | [client] 5 | # Default is Latin1, if you need UTF-8 set this (also in server section) 6 | #default-character-set = utf8 7 | 8 | [mysqld] 9 | # 10 | # * Character sets 11 | # 12 | # Default is Latin1, if you need UTF-8 set all this (also in client section) 13 | # 14 | character-set-server = utf8 15 | collation-server = utf8_unicode_ci 16 | character_set_server = utf8 17 | collation_server = utf8_unicode_ci 18 | -------------------------------------------------------------------------------- /src/docker-compose.yml: -------------------------------------------------------------------------------- 1 | version: "2" 2 | services: 3 | nginx: 4 | build: nginx 5 | image: habitissimo/myaas-nginx 6 | ports: 7 | - 80:80 8 | links: 9 | - myaas 10 | - gui 11 | 12 | myaas: 13 | build: . 14 | image: habitissimo/myaas 15 | environment: 16 | - DEBUG=1 17 | - TZ=Europe/Madrid 18 | - MYAAS_HOSTNAME=127.0.0.1 19 | volumes: 20 | - /var/run/docker.sock:/var/run/docker.sock 21 | - .:/code/ 22 | - /opt/myaas:/myaas 23 | 24 | 25 | gui: 26 | build: gui 27 | image: habitissimo/myaas-gui 28 | -------------------------------------------------------------------------------- /src/gui/bower.json: -------------------------------------------------------------------------------- 1 | { 2 | "name": "myaas-gui", 3 | "description": "Backbone gui for myaas service", 4 | "main": "", 5 | "authors": [ 6 | "Miguel Ángel Durán González " 7 | ], 8 | "license": "MIT", 9 | "homepage": "", 10 | "private": true, 11 | "ignore": [ 12 | "**/.*", 13 | "node_modules", 14 | "bower_components", 15 | "test", 16 | "tests" 17 | ], 18 | "dependencies": { 19 | "backbone": "^1.3.3", 20 | "semantic-ui-calendar": "^0.0.6", 21 | "semantic": "semantic-ui#^2.2.7", 22 | "clipboard": "^1.5.16" 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /fabfile/README.md: -------------------------------------------------------------------------------- 1 | # Client example 2 | 3 | This is a fabric interface for this service, run `fab -l` to get a list of available commands. 4 | 5 | Maybe you will need to install some dependencies first, run `pip install -r requirements.txt`. 6 | 7 | # Configuration 8 | 9 | You will need to export an environment variable with the url to the myaas service. 10 | 11 | ``` 12 | export DB_URL=http://localhost:5001 13 | 14 | # show created databases 15 | fab db.ls 16 | 17 | # show available templates for new databases 18 | fab db.templates 19 | 20 | # create a new database foo from template bar 21 | fab db.new:bar,foo 22 | 23 | # delete the database 24 | fab db.rm:bar,foo 25 | ``` 26 | -------------------------------------------------------------------------------- /fabfile/__init__.py: -------------------------------------------------------------------------------- 1 | import os 2 | import dotenv 3 | 4 | from fabric.api import env 5 | from fabric import state 6 | 7 | from db import DBProvider 8 | 9 | 10 | state.output['running'] = False 11 | state.output['stdout'] = False 12 | 13 | 14 | def load_dotenv(): 15 | current_path = os.path.abspath(os.path.dirname(__file__)) 16 | dotenv_file = os.path.join(current_path, ".env") 17 | if os.path.isfile(dotenv_file): 18 | dotenv.load_dotenv(dotenv_file) 19 | 20 | def check_conf(): 21 | if not env.myaas_url: 22 | print "DB_URL environment variables is not defined." 23 | import sys 24 | sys.exit(1) 25 | 26 | load_dotenv() 27 | env.myaas_url = os.getenv('DB_URL') 28 | check_conf() 29 | db = DBProvider().expose_as_module('db') 30 | -------------------------------------------------------------------------------- /src/myaas/utils/retry.py: -------------------------------------------------------------------------------- 1 | import random 2 | import time 3 | 4 | 5 | class RetryPolicy(object): 6 | def __init__(self, maxtries, delay=None, exceptions=(Exception,)): 7 | if delay is None: 8 | # 100ms +/- 50ms of randomized jitter 9 | self.delay = lambda i: 0.1 + ((random.random() - 0.5) / 10) 10 | else: 11 | self.delay = lambda i: delay 12 | 13 | self.maxtries = maxtries 14 | self.exceptions = exceptions 15 | 16 | def __call__(self, function): 17 | for i in range(0, self.maxtries): 18 | try: 19 | return function() 20 | except self.exceptions as error: 21 | last_exception = error 22 | time.sleep(self.delay(i)) 23 | raise last_exception 24 | -------------------------------------------------------------------------------- /extras/mysql/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM mariadb:10.4.19 2 | 3 | # Install cgget command 4 | RUN apt-get update && apt-get install -y cgroup-tools bc && rm -rf /var/apt/lists/* && apt-get clean 5 | 6 | # customize base config 7 | ADD mariadb.cnf /etc/mysql/conf.d/mariadb.cnf 8 | ADD myaas.cnf /etc/mysql/conf.d/myaas.cnf 9 | 10 | # make myaas.cnf writable 11 | RUN chown -R mysql:mysql /etc/mysql/conf.d 12 | ADD configure-memory.sh /docker-entrypoint-initdb.d/ 13 | 14 | # customize entrypoint 15 | COPY custom-entrypoint.sh /usr/local/bin/ 16 | ENTRYPOINT ["custom-entrypoint.sh"] 17 | 18 | CMD ["mysqld --innodb-doublewrite=0"] 19 | 20 | HEALTHCHECK --start-period=30s --interval=30s --timeout=30s --retries=3 CMD mysql --connect-timeout=10 --user=root --password=$MYSQL_ROOT_PASSWORD -h 127.0.0.1 -e "show databases;" 21 | -------------------------------------------------------------------------------- /src/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM python:3.7 2 | 3 | RUN apt-get update -y && \ 4 | apt-get install -y mariadb-client postgresql-client btrfs-tools && \ 5 | apt-get clean -y && \ 6 | rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 7 | 8 | WORKDIR /code/ 9 | 10 | # improve cacheability by copying first only the requirements 11 | # files and installing dependencies 12 | ADD requirements.txt /code/ 13 | RUN pip install -r requirements.txt 14 | ADD requirements-dev.txt /code/ 15 | RUN pip install -r requirements-dev.txt 16 | 17 | # copy all the rest 18 | ADD myaas /code/myaas/ 19 | ADD docker-entrypoint.sh /docker-entrypoint.sh 20 | RUN chmod +x /docker-entrypoint.sh 21 | ADD runserver.py /code/ 22 | ADD gunicorn.conf.py /code/ 23 | 24 | 25 | ENV PYTHONUNBUFFERED=1 26 | 27 | ENTRYPOINT ["/docker-entrypoint.sh"] 28 | CMD ["gunicorn", "-c", "gunicorn.conf.py", "runserver:app"] 29 | 30 | HEALTHCHECK --interval=5s --timeout=2s --retries=3 CMD curl --fail http://127.0.0.1/ || exit 1 31 | -------------------------------------------------------------------------------- /src/myaas/utils/socket.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | 4 | def reserve_port(): 5 | """ 6 | This function finds a free port number for new containers to be created, 7 | it releases the port just before returning the port number, so there is 8 | a chance for another process to get it, let's see if it works. 9 | 10 | This requires the myaas container to be running with --net=host otherwise 11 | the port returned by this method will be a free port inside the container, 12 | but may not be free on the host machine. 13 | """ 14 | s = socket.socket() 15 | s.bind(("", 0)) 16 | (ip, port) = s.getsockname() 17 | s.close() 18 | 19 | return port 20 | 21 | 22 | def test_tcp_connection(ip, port): 23 | """ 24 | Tries to establish a TCP connection, returns a boolean indicating if the 25 | connection succeded. 26 | """ 27 | sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) 28 | result = sock.connect_ex((ip, port)) 29 | sock.close() 30 | 31 | return result == 0 32 | -------------------------------------------------------------------------------- /src/nginx/nginx.conf: -------------------------------------------------------------------------------- 1 | user nginx; 2 | worker_processes 1; 3 | 4 | error_log /var/log/nginx/error.log warn; 5 | pid /var/run/nginx.pid; 6 | 7 | 8 | events { 9 | worker_connections 1024; 10 | } 11 | 12 | 13 | http { 14 | include /etc/nginx/mime.types; 15 | default_type application/octet-stream; 16 | 17 | log_format main '$remote_addr - $remote_user [$time_local] "$request" ' 18 | '$status $body_bytes_sent "$http_referer" ' 19 | '"$http_user_agent" "$http_x_forwarded_for"'; 20 | 21 | access_log /var/log/nginx/access.log main; 22 | 23 | sendfile on; 24 | #tcp_nopush on; 25 | 26 | keepalive_timeout 65; 27 | 28 | #gzip on; 29 | 30 | server { 31 | server_name localhost; 32 | listen 80; 33 | 34 | location ~ /v1 { 35 | rewrite ^/v1(.*)$ /$1 break; 36 | proxy_pass http://myaas; 37 | } 38 | 39 | location / { 40 | proxy_pass http://gui; 41 | } 42 | } 43 | } 44 | -------------------------------------------------------------------------------- /src/myaas/utils/database.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import importlib 3 | 4 | from .. import settings 5 | from .container import list_containers 6 | 7 | logger = logging.getLogger(__name__) 8 | 9 | 10 | def get_enabled_backend(): 11 | return importlib.import_module(settings.BACKEND) 12 | 13 | 14 | def get_myaas_containers(): 15 | return filter(_is_database_container, list_containers()) 16 | 17 | 18 | def list_databases(): 19 | containers = filter(_is_database_container, list_containers()) 20 | return [_get_database_name(c) for c in containers] 21 | 22 | 23 | def list_database_templates(): 24 | containers = filter(_is_template_container, list_containers()) 25 | return [_get_database_name(c) for c in containers] 26 | 27 | 28 | def _is_database_container(container): 29 | if not container['Labels']: 30 | return False 31 | 32 | if 'com.myaas.instance' not in container['Labels']: 33 | return False 34 | 35 | return container['Labels'].get('com.myaas.instance') != '' 36 | 37 | 38 | def _is_template_container(container): 39 | if not container['Labels']: 40 | return False 41 | 42 | if 'com.myaas.is_template' not in container['Labels']: 43 | return False 44 | 45 | return container['Labels'].get('com.myaas.is_template') == 'True' 46 | 47 | 48 | def _get_database_name(container): 49 | labels = container['Labels'] 50 | if labels['com.myaas.is_template'] == 'True': 51 | return labels['com.myaas.template'] 52 | else: 53 | return "%s,%s" % (labels['com.myaas.template'], labels['com.myaas.instance']) 54 | -------------------------------------------------------------------------------- /extras/mysql/configure-memory.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # get max available memory from inside container 4 | HARD_LIMIT_IN_BYTES=$(cgget -n --values-only --variable memory.limit_in_bytes /) 5 | HARD_LIMIT_IN_MEGABYTES=$(echo "${HARD_LIMIT_IN_BYTES}/1024/1024" | bc) 6 | SOFT_LIMIT_IN_BYTES=$(cgget -n --values-only --variable memory.soft_limit_in_bytes /) 7 | SOFT_LIMIT_IN_MEGABYTES=$(echo "${SOFT_LIMIT_IN_BYTES}/1024/1024" | bc) 8 | 9 | # get lower limit as effective limit 10 | if [[ $HARD_LIMIT_IN_MEGABYTES -le $SOFT_LIMIT_IN_MEGABYTES ]]; then 11 | LIMIT_IN_MEGABYTES=$HARD_LIMIT_IN_MEGABYTES 12 | else 13 | LIMIT_IN_MEGABYTES=$SOFT_LIMIT_IN_MEGABYTES 14 | fi 15 | 16 | # If reserved memory is more than 2GB 17 | if [[ "$LIMIT_IN_MEGABYTES" -gt "2048" ]]; then 18 | # Use all but 1GB for InnoDB 19 | POOL_SIZE=$(echo "${LIMIT_IN_MEGABYTES} - 1024" | bc) 20 | # Create n pool instance of at least 1Gb each 21 | POOL_INSTANCES=$(echo "$POOL_SIZE/1024" | bc) 22 | else 23 | # use a 50% memory for Innodb 24 | POOL_SIZE=$(echo "${LIMIT_IN_MEGABYTES} / 2" | bc) 25 | POOL_INSTANCES=1 26 | fi 27 | 28 | echo "" 29 | echo "Configuring InnoDB instance pool" 30 | echo "HARD MEMORY LIMIT: ${HARD_LIMIT_IN_MEGABYTES}M" 31 | echo "SOFT MEMORY LIMIT: ${SOFT_LIMIT_IN_MEGABYTES}M" 32 | echo "POOL_SIZE: ${POOL_SIZE}M" 33 | echo "POOL_INSTANCES: ${POOL_INSTANCES}" 34 | echo "" 35 | 36 | sed -i "s/^innodb_buffer_pool_size.*/innodb_buffer_pool_size = ${POOL_SIZE}M/" /etc/mysql/conf.d/myaas.cnf 37 | sed -i "s/^innodb_buffer_pool_instances.*/innodb_buffer_pool_instances = ${POOL_INSTANCES}/" /etc/mysql/conf.d/myaas.cnf 38 | -------------------------------------------------------------------------------- /extras/mysql-proxy/README.md: -------------------------------------------------------------------------------- 1 | # Mysql proxy for MyAAS 2 | 3 | Available on docker hub as `habitissimo/mysql-proxy`. 4 | 5 | You can use this image to integrate myaas in your docker workflow, any container linking to a mysql container can link to this image instead and use the myaas service transparently. 6 | 7 | Despite the name indicates it is a proxy for mysql it should work for MyAAS working with any backend (including Postgres), but the listen port for the proxy is hardcoded to 3306. The proxy is protocol agnostic, it's just a TCP proxy. 8 | 9 | ## How it works 10 | 11 | On start it will request a new database instance to the MyAAS server, once the instance is online it will run a socat proxy to it, your containers linking to this container will be able to connect to that mysql instance on port 3306. 12 | 13 | When the container is stoped it will send a request to the MyAAS server to delete the database (unless MYAAS_NO_REMOVE is set to a non 0 value). 14 | 15 | ## Usage 16 | 17 | Example usage: 18 | 19 | ``` 20 | docker run -it \ 21 | -e MYAAS_URL=http://myaas-server:5001 \ 22 | -e MYAAS_TEMPLATE=template \ 23 | -e MYAAS_NAME=name \ 24 | habitissimo/mysql-proxy 25 | ``` 26 | 27 | ## Environment variables 28 | 29 | Required: 30 | * **MYAAS_URL**: API endpoint of myaas instance 31 | * **MYAAS_TEMPLATE**: template name to use for new database 32 | * **MYAAS_NAME**: a name for the instance to be created 33 | 34 | Optional: 35 | * **MYAAS_NO_REMOVE**: (default is 0) do not remove created database on stop 36 | * **MYAAS_WAIT_TIMEOUT**: (default is 15) max wait time in seconds until server is alive 37 | * **MYAAS_DEBUG**: (default is 0) set to 1 to print debug information about curl requests 38 | * **MYAAS_TTL**: in seconds (default is 0), set to 0 to avoid the deletion of the database 39 | -------------------------------------------------------------------------------- /src/myaas/backends/postgres.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | from .. import settings 4 | from .base import AbstractDatabase, AbstractDatabaseTemplate 5 | from .exceptions import ImportDataError 6 | 7 | 8 | class Database(AbstractDatabase): 9 | @property 10 | def provider_name(self): 11 | return "postgres" 12 | 13 | @property 14 | def datadir(self): 15 | return "/var/lib/postgresql/data" 16 | 17 | @property 18 | def environment(self): 19 | return settings.POSTGRES_ENVIRONMENT 20 | 21 | @property 22 | def image(self): 23 | return settings.POSTGRES_IMAGE 24 | 25 | @property 26 | def service_port(self): 27 | return 5432 28 | 29 | @property 30 | def user(self): 31 | return "postgres" 32 | 33 | @property 34 | def password(self): 35 | return settings.POSTGRES_ENVIRONMENT['POSTGRES_PASSWORD'] 36 | 37 | @property 38 | def database(self): 39 | return settings.POSTGRES_ENVIRONMENT['POSTGRES_DB'] 40 | 41 | 42 | class Template(Database, AbstractDatabaseTemplate): 43 | @property 44 | def database_backend(self): 45 | return Database 46 | 47 | def import_data(self, pg_dump): 48 | command = self._build_pg_command() 49 | env = os.environ.copy() 50 | env['PGPASSWORD'] = self.password 51 | with open(pg_dump, 'r') as f: 52 | out, err = self._run_command(command, stdin=f, env=env) 53 | if err: 54 | raise ImportDataError(err) 55 | 56 | def get_engine_status(self): 57 | pass 58 | 59 | def _build_pg_command(self): 60 | return ["psql", 61 | f"--username={self.user}", 62 | f"--host={self.internal_ip}", 63 | f"--port={self.service_port}", 64 | self.database] 65 | -------------------------------------------------------------------------------- /extras/mysql/myaas.cnf: -------------------------------------------------------------------------------- 1 | # MyASS-specific config file. 2 | # Read by /etc/mysql/my.cnf 3 | 4 | [mysqld] 5 | back_log = 5 6 | max_connections = 50 7 | table_open_cache = 2048 8 | default-storage-engine = INNODB 9 | long_query_time = 10 10 | 11 | ### MyISAM ### 12 | key_buffer_size = 64M 13 | 14 | ### Query cache 15 | thread_cache_size = 8 16 | query_cache_size = 256M 17 | query_cache_limit = 2M 18 | 19 | ### BTRFS ### 20 | innodb_doublewrite = false 21 | innodb_use_native_aio = false 22 | innodb_flush_method = fsync 23 | 24 | ### InnoDB params 25 | innodb_file_per_table = 1 26 | innodb_buffer_pool_size = 2G 27 | innodb_buffer_pool_instances = 2 28 | innodb_log_buffer_size = 256M 29 | innodb_log_file_size = 512M 30 | innodb_open_files = 4000 31 | innodb_data_file_path = ibdata1:10M:autoextend 32 | innodb_flush_log_at_trx_commit = 0 33 | innodb_write_io_threads = 16 34 | innodb_read_io_threads = 8 35 | innodb_thread_concurrency = 16 36 | innodb_fast_shutdown = 0 37 | 38 | ### Emulate 10.1 defaults ### 39 | #innodb_strict_mode = off 40 | sql_mode = NO_AUTO_CREATE_USER,NO_ENGINE_SUBSTITUTION 41 | #innodb_buffer_pool_dump_at_shutdown = OFF 42 | #innodb_buffer_pool_load_at_startup = OFF 43 | #innodb_autoinc_lock_mode = 1 44 | #innodb_buffer_pool_dump_pct = 100 45 | #innodb_max_dirty_pages_pct_lwm = 0.001000 46 | #innodb_max_undo_log_size = 1073741824 47 | #innodb_purge_threads = 1 48 | #innodb_use_atomic_writes = OFF 49 | #innodb_use_trim = OFF 50 | #thread_cache_size = 0 51 | #thread_pool_max_threads = 1000 52 | #thread_stack = 295936 53 | 54 | ### SSD tunning 55 | innodb_flush_neighbors = 0 56 | innodb_io_capacity = 4000 57 | 58 | ### Disable' performance logging ### 59 | performance_schema = off 60 | 61 | ### Prevent connection reset while importing big SQL files ### 62 | max_allowed_packet = 16M 63 | net_read_timeout = 900 64 | net_write_timeout = 900 65 | wait_timeout = 28800 66 | -------------------------------------------------------------------------------- /src/myaas/utils/container.py: -------------------------------------------------------------------------------- 1 | import docker 2 | from os import getenv 3 | from multiprocessing import cpu_count 4 | from random import sample as random_sample 5 | from collections import Counter 6 | 7 | from .. import settings 8 | 9 | client = docker.Client(base_url=settings.DOCKER_HOST) 10 | 11 | 12 | def find_container(name): 13 | # prepend / to name 14 | name = f'/{name}' 15 | containers = client.containers(all=True) 16 | containers = [c for c in containers if name in c['Names']] 17 | if not containers: 18 | return None 19 | return containers[0] 20 | 21 | 22 | def list_containers(all=True): 23 | client = docker.Client() 24 | return client.containers(all=all) 25 | 26 | 27 | def translate_host_basedir(path): 28 | # TODO: if container is created with a custom hostname this will not work 29 | # improve self id detection in the future. 30 | self_id = getenv('HOSTNAME') 31 | self_container = client.containers(filters={'id': self_id})[0] 32 | mount_config = client.inspect_container(self_container)['Mounts'] 33 | for mount in mount_config: 34 | if mount['Destination'] == settings.BASE_DIR: 35 | break 36 | 37 | if mount['Destination'] != settings.BASE_DIR: 38 | raise KeyError("Could not find %s mountpoint" % settings.BASE_DIR) 39 | 40 | return path.replace(mount['Destination'], mount['Source'], 1) 41 | 42 | 43 | def get_random_cpuset(cores_to_assign): 44 | available_cores = cpu_count() 45 | random_cores = random_sample(range(available_cores), cores_to_assign) 46 | return ",".join(map(str, random_cores)) 47 | 48 | def get_mapped_cpuset(): 49 | # fake a static variable 50 | if 'cnt' not in get_mapped_cpuset.__dict__: 51 | cpu_map = settings.CPU_MAP.split(':') 52 | get_mapped_cpuset.cnt = Counter(cpu_map) 53 | 54 | least_used = get_mapped_cpuset.cnt.most_common()[-1][0] 55 | get_mapped_cpuset.cnt[least_used] += 1 56 | 57 | return least_used 58 | -------------------------------------------------------------------------------- /src/Makefile: -------------------------------------------------------------------------------- 1 | test='' 2 | CWD:= $(shell pwd) 3 | HOSTNAME := $(shell hostname -i) 4 | TAG ?= devel 5 | IMAGE=habitissimo/myaas:$(TAG) 6 | DOCKER_RUN_FLAGS = --rm -e "MYAAS_HOSTNAME=$(HOSTNAME)" 7 | run : DOCKER_RUN_FLAGS += -p 5001:80 --name=dbm-server 8 | dev : DOCKER_RUN_FLAGS += -p 5001:80 -v "$(PWD):/code" -e "DEBUG=True" --name=myaas-server-dev 9 | reaper : DOCKER_RUN_FLAGS += -it --name=myaas-daemon 10 | reaper-dev: DOCKER_RUN_FLAGS += -it --name=myaas-daemon -v "$(PWD):/code" -e "DEBUG=True" 11 | shell : DOCKER_RUN_FLAGS += -it -v "$(PWD):/code" --entrypoint=ipython --name=myaas-ipython 12 | bash : DOCKER_RUN_FLAGS += -it -v "$(PWD):/code" --entrypoint=bash --name=myaas-bash 13 | flake : DOCKER_RUN_FLAGS += -it -v "$(PWD):/code" --name=myaas-flake8 14 | tests : DOCKER_RUN_FLAGS += -it -v "$(PWD):/code" --name=myaas-nosetest 15 | update : DOCKER_RUN_FLAGS += -t --name=myaas-update 16 | DOCKER_RUN=docker run $(DOCKER_RUN_FLAGS) \ 17 | -v /var/run/docker.sock:/var/run/docker.sock \ 18 | -v /opt/myaas:/myaas \ 19 | $(IMAGE) 20 | 21 | # build the image 22 | build: 23 | docker build -t $(IMAGE) . 24 | 25 | # push the image to the docker registry 26 | push: build 27 | docker push $(IMAGE) 28 | 29 | # run the ttl reaper 30 | reaper: 31 | $(DOCKER_RUN) daemon 32 | 33 | reaper-dev: 34 | $(DOCKER_RUN) daemon 35 | 36 | # run the image 37 | run: build 38 | $(DOCKER_RUN) 39 | 40 | # run the image in development mode 41 | dev: 42 | $(DOCKER_RUN) python runserver.py 43 | 44 | # run the update task (find templates and import them) 45 | update: build 46 | ${DOCKER_RUN} update 47 | 48 | # check pep8 49 | pep: flake 50 | flake: 51 | -@PAGER=cat git grep -i myass -- './*' ':(exclude)Makefile' && echo "####\nWarning myASS typo detected!\n####" 52 | @${DOCKER_RUN} flake8 --ignore=E501,F841 . 53 | 54 | # run the test suite 55 | test: tests 56 | tests: flake 57 | ${DOCKER_RUN} nosetests -v -x --rednose --pdb $(test) 58 | 59 | # open an ipython shell for testing 60 | shell: 61 | ${DOCKER_RUN} 62 | 63 | # opens a shell inside the container for testing 64 | bash: 65 | ${DOCKER_RUN} 66 | 67 | .PHONY: build push run dev update test tests shell 68 | -------------------------------------------------------------------------------- /src/myaas/backends/mysql.py: -------------------------------------------------------------------------------- 1 | import pymysql 2 | 3 | from .. import settings 4 | 5 | from .base import AbstractDatabase, AbstractDatabaseTemplate 6 | from .exceptions import ImportDataError 7 | 8 | 9 | class Database(AbstractDatabase): 10 | @property 11 | def provider_name(self): 12 | return "mysql" 13 | 14 | @property 15 | def datadir(self): 16 | return "/var/lib/mysql" 17 | 18 | @property 19 | def environment(self): 20 | return settings.MYSQL_ENVIRONMENT 21 | 22 | @property 23 | def image(self): 24 | return settings.MYSQL_IMAGE 25 | 26 | @property 27 | def service_port(self): 28 | return 3306 29 | 30 | @property 31 | def user(self): 32 | return settings.DB_USERNAME 33 | 34 | @property 35 | def password(self): 36 | return settings.DB_PASSWORD 37 | 38 | @property 39 | def database(self): 40 | return settings.DB_DATABASE 41 | 42 | def test_connection(self): 43 | super().test_connection() 44 | try: 45 | conn = pymysql.connect( 46 | host=self.internal_ip, 47 | port=self.service_port, 48 | user=self.user, 49 | passwd=self.password, 50 | db=self.database) 51 | conn.close() 52 | except pymysql.OperationalError: 53 | return False 54 | 55 | return True 56 | 57 | 58 | class Template(Database, AbstractDatabaseTemplate): 59 | @property 60 | def database_backend(self): 61 | return Database 62 | 63 | def import_data(self, sql_file): 64 | mysql_command = self._build_mysql_command() 65 | with open(sql_file, 'r') as f: 66 | out, err = self._run_command(mysql_command, stdin=f) 67 | if err: 68 | raise ImportDataError(err) 69 | 70 | def get_engine_status(self): 71 | mysql_command = self._build_mysql_command() 72 | mysql_command.append("-e") 73 | mysql_command.append("show engine innodb status\G") 74 | out, err = self._run_command(mysql_command) 75 | return out 76 | 77 | def _build_mysql_command(self): 78 | return ["mysql", 79 | "--user=root", 80 | f"--password={self.password}", 81 | f"--host={self.internal_ip}", 82 | f"--port={self.service_port}", 83 | self.database] 84 | -------------------------------------------------------------------------------- /src/myaas/settings.py: -------------------------------------------------------------------------------- 1 | from decouple import config 2 | 3 | # Backend to enable, valid choices are: 4 | # "myaas.backends.mysql" 5 | # "myaas.backends.postgres" 6 | BACKEND = config("MYAAS_BACKEND", default="myaas.backends.mysql") 7 | 8 | 9 | # Controls the debug mode of the application 10 | DEBUG = config('MYAAS_DEBUG', default=False, cast=bool) 11 | 12 | # Docker socket address, can be replaced with a TCP address iy you prefer not 13 | # to bind mount the docker socket 14 | DOCKER_HOST = config('MYAAS_DOCKER_HOST', 15 | default=config('DOCKER_HOST', 16 | default="unix://var/run/docker.sock")) 17 | 18 | # All containers created by this service will have this prefix in their name 19 | CONTAINER_PREFIX = config('MYAAS_PREFIX', default='myaas-') 20 | # Default time that a container stays up, in seconds. 21 | CONTAINER_TTL = config('MYAAS_CONTAINER_TTL', cast=int, default=86400) 22 | 23 | # Default docker imgage for the mysql backend 24 | # Currently tested with: 25 | # - mariadb:10 26 | # - mysql:5 27 | # - habitissimo/myaas-mysql:10.1 (based on mariadb:10) 28 | MYSQL_IMAGE = config("MYAAS_MYSQL_IMAGE", default="habitissimo/myaas-mysql:10.1.23") 29 | POSTGRES_IMAGE = config("MYAAS_POSTGRES_IMAGE", default="postgres:9.4") 30 | 31 | DB_DATABASE = config("MYAAS_DB_DATABASE", default='default') 32 | DB_USERNAME = config("MYAAS_DB_USERNAME", default='myaas') 33 | DB_PASSWORD = config("MYAAS_DB_PASSWORD", default='myaas') 34 | 35 | # Required environment variables for running the mysql image 36 | # All of the listed images above need this environment variables 37 | 38 | MYSQL_ENVIRONMENT = { 39 | "MYSQL_ROOT_PASSWORD": DB_PASSWORD, 40 | "MYSQL_DATABASE": DB_DATABASE, 41 | } 42 | 43 | POSTGRES_ENVIRONMENT = { 44 | "POSTGRES_DB": DB_DATABASE, 45 | "POSTGRES_USER": DB_USERNAME, 46 | "POSTGRES_PASSWORD": DB_PASSWORD, 47 | } 48 | 49 | 50 | # Memory limit to apply to every container (with docker syntax, eg: `2g`) 51 | MEMORY_LIMIT = config('MYAAS_MEMORY_LIMIT', default='2g') 52 | # How many CPUs to assign to every container 53 | CPU_PINNING_INSTANCE_CORES = config("MYAAS_CPU_PINNING_CORES", cast=int, default=2) 54 | 55 | # Internal settings 56 | HOSTNAME = config('MYAAS_HOSTNAME', default='localhost') 57 | BASE_DIR = config('MYAAS_BASE_DIR', default='/myaas') 58 | 59 | DATA_DIR = BASE_DIR + "/data" 60 | DUMP_DIR = BASE_DIR + "/dumps" 61 | 62 | SENTRY_DSN = config('SENTRY_DSN', cast=str, default='') 63 | 64 | CPU_MAP = config('MYAAS_CPU_MAP', default=None) 65 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Docker Build Statu](https://img.shields.io/docker/build/habitissimo/myaas.svg)]() 2 | [![Docker Pulls](https://img.shields.io/docker/pulls/habitissimo/myaas.svg)]() 3 | [![Docker Automated buil](https://img.shields.io/docker/automated/habitissimo/myaas.svg)]() 4 | 5 | # MyAAS (Mysql As A Service) 6 | 7 | This product has been developed internally at habitissimo for allowing developers to get the database instances they need for development as fast as possible. 8 | 9 | ## What this project does 10 | 11 | This project consists on a service which will import a collection of databases periodically. This databases become templates for the final users. 12 | 13 | An user can ask for a database instance from any template available and have it fully functional and loaded with data within seconds, no matter how big is the database, this databases can be destroyed at any moment to request a new instance with fresh data. 14 | 15 | ## Speed 16 | 17 | The main concern we where having in our development process was importing database backups in our development instances, loading this backups by tradicional means (importing a mysqldump file) could take almost an hour, we could use other metohds like innobackupex, but this would mean developers had to download huge files (even with compression) trading speed in import time by slownes in download time. 18 | 19 | This solution is being used to provide a variety of databases ranging from a few megabytes up to several gigabytes, all of them are provisioned within seconds (something between 3 or 5 seconds). 20 | 21 | ## How it works 22 | 23 | You put your sql backups in a folder and run the updater command, this will import the databases and prepare them as templates. This is the slow part, we run it at nights so developers can have acces to yesterday's data in the morning. 24 | 25 | The backups are loaded into a dockerized mysql instance, this docker container binds the datadir to a host volume stored on a filesystem with [Copy On Write](https://es.wikipedia.org/wiki/Copy-on-write) support. 26 | 27 | Once the templates have been loaded the script stops the template database instances. 28 | 29 | Every time a user asks for a new database the service performs a copy on write from the template to a new directory, this directory is mounted as a volume 30 | for a new mysql docker instance launched for this user. As the operation is performed against a [COW](https://es.wikipedia.org/wiki/Copy-on-write) filesystem the operation is both fast and space efficient. 31 | 32 | Finally the service responds with access data required to use the database. 33 | 34 | ## What you will find here: 35 | 36 | - **src**: myaas source [read more](db/README.md) 37 | - **fabfile**: example client to interact with myaas [read more](fabfile/README.md) 38 | 39 | ## TODO 40 | - [ ] Use docker volume API instead of hacking arround with volume bindings 41 | - [ ] Create adapters for postgresql and mongodb 42 | - [ ] Update testsuite, broken after refactoring 43 | 44 | ## Extendibility 45 | 46 | MyAAS has been designed with mysql in mind, but the implementation is database agnostic and can be adapted easily to work with any type of database which stores data in disk. 47 | 48 | Look for [MysqlDatabase adapter](src/myaas/backends/mysql.py) to have an idea of how easy is to support new databases, you just need to extend [AbstractDatabase](src/myaas/backends/base.py) and define a few properties. 49 | 50 | ## Support 51 | 52 | If you have problems using this service [open an issue](../../Habitissimo/myass/issues). 53 | -------------------------------------------------------------------------------- /src/myaas/update.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import traceback 4 | import functools 5 | 6 | from docker.errors import NotFound as ImageNotFound 7 | from sentry_sdk import init, capture_message, configure_scope 8 | 9 | from . import settings 10 | from .utils.container import client 11 | from .utils.database import get_enabled_backend 12 | from .utils.filesystem import is_empty 13 | from .utils.retry import RetryPolicy 14 | from .backends.exceptions import NonExistentTemplate, ImportDataError 15 | 16 | 17 | def list_dump_files(): 18 | files_in_dir = os.listdir(settings.DUMP_DIR) 19 | return filter(lambda x: x.endswith('.sql'), files_in_dir) 20 | 21 | 22 | def indent(string, level=1): 23 | spacing = " " * level 24 | return spacing + string 25 | 26 | 27 | def remove_recreate_database(template): 28 | """ 29 | find existing database, remove it, then recreate 30 | """ 31 | backend = get_enabled_backend().Template 32 | try: 33 | db = backend(client, template, False) 34 | if db.running(): 35 | db.stop() 36 | db.do_backup() 37 | db.remove() 38 | except NonExistentTemplate: 39 | pass # this means this database is being imported for the first time 40 | 41 | try: 42 | database = backend(client, template, True) 43 | except ImageNotFound as e: 44 | import sys 45 | print("\n### ERROR ###", file=sys.stderr) 46 | print(e.explanation.decode(), file=sys.stderr) 47 | print("Pull the image and try again.", file=sys.stderr) 48 | sys.exit(1) 49 | 50 | return database 51 | 52 | 53 | def start_template_database(db_name): 54 | print(f"- Creating database {db_name}") 55 | db = remove_recreate_database(db_name) 56 | 57 | print(indent("* Starting database...")) 58 | db.start() 59 | print(indent("* Started")) 60 | print(indent("* Waiting for database to accept connections")) 61 | try: 62 | db.wait_for_service_listening() 63 | return db 64 | except Exception as e: 65 | print(indent( 66 | f"* Max time waiting for database exceeded" 67 | ", retrying..." 68 | )) 69 | db.stop() 70 | db.restore_backup() 71 | print_exception() 72 | raise e 73 | 74 | 75 | def main(): 76 | dumps = list_dump_files() 77 | for dump in dumps: 78 | db_name,_ = os.path.splitext(dump) 79 | sql_file = os.path.join(settings.DUMP_DIR, dump) 80 | 81 | if is_empty(sql_file): 82 | print(f"- Skipping: {sql_file} is empty") 83 | continue 84 | 85 | start_db_func = functools.partial(start_template_database, db_name) 86 | db = RetryPolicy(5, delay=2)(start_db_func) 87 | if not db: 88 | continue # skip to next database to import 89 | 90 | print(indent("* Importing data...")) 91 | try: 92 | db.import_data(sql_file) 93 | except (ImportDataError, Exception) as e: 94 | with configure_scope() as scope: 95 | scope.set_extra("engine_status", db.get_engine_status()) 96 | scope.set_tag('database', db_name) 97 | capture_message(e) 98 | print(indent("* An error happened, debug information:", level=2)) 99 | print(db.get_engine_status(), file=sys.stderr) 100 | print(indent("* Restoring previous database", level=2)) 101 | db.stop() 102 | db.restore_backup() 103 | finally: 104 | db.remove_backup() 105 | 106 | print(indent("* Stopping database...")) 107 | db.stop() 108 | print(indent("* Stopped")) 109 | 110 | 111 | if __name__ == "__main__": 112 | init(settings.SENTRY_DSN) 113 | try: 114 | main() 115 | except Exception as e: 116 | capture_message(e) 117 | -------------------------------------------------------------------------------- /fabfile/taskset/__init__.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | from __future__ import absolute_import 3 | import inspect 4 | import sys 5 | import types 6 | import warnings 7 | from fabric.tasks import WrappedCallableTask 8 | 9 | def task_method(*args, **kwargs): 10 | """ 11 | Decorator declaring the wrapped method to be task. 12 | 13 | It accepts the same arguments as ``fabric.decorators.task`` so 14 | use it on methods just like fabric's decorator is used on functions. 15 | 16 | The class decorated method belongs to should be a subclass 17 | of :class:`.TaskSet`. 18 | """ 19 | 20 | invoked = bool(not args or kwargs) 21 | if not invoked: 22 | func, args = args[0], () 23 | 24 | def decorator(func): 25 | func._task_info = dict( 26 | args = args, 27 | kwargs = kwargs 28 | ) 29 | return func 30 | 31 | return decorator if invoked else decorator(func) 32 | 33 | def task(*args, **kwargs): 34 | msg = "@taskset.task decorator is deprecated and will be removed soon; please use @taskset.task_method instead." 35 | warnings.warn(msg, DeprecationWarning) 36 | return task_method(*args, **kwargs) 37 | 38 | 39 | class TaskSet(object): 40 | """ 41 | TaskSet is a class that can expose its methods as Fabric tasks. 42 | 43 | Example:: 44 | 45 | # fabfile.py 46 | from fabric.api import local 47 | from taskset import TaskSet, task_method 48 | 49 | class SayBase(TaskSet): 50 | def say(self, what): 51 | raise NotImplemented() 52 | 53 | @task_method(default=True, alias='hi') 54 | def hello(self): 55 | self.say('hello') 56 | 57 | class EchoSay(SayBase): 58 | def say(self, what): 59 | local('echo ' + what) 60 | 61 | say = EchoSay().expose_as_module('say') 62 | 63 | and then:: 64 | 65 | $ fab say.hi 66 | """ 67 | 68 | def expose_to(self, module_name): 69 | """ 70 | Adds tasks to module which name is passed in ``module_name`` argument. 71 | Returns a list of added tasks names. 72 | 73 | Example:: 74 | 75 | instance = MyTaskSet() 76 | __all__ = instance.expose_to(__name__) 77 | """ 78 | module_obj = sys.modules[module_name] 79 | return self._expose_to(module_obj) 80 | 81 | def expose_to_current_module(self): 82 | """ 83 | The same as :meth:`TaskSet.expose_to` but magically 84 | addds tasks to current module. 85 | 86 | Example:: 87 | 88 | instance = MyTaskSet() 89 | __all__ = instance.expose_to_current_module() 90 | """ 91 | frm = inspect.stack()[1] 92 | mod = inspect.getmodule(frm[0]) 93 | return self.expose_to(mod.__name__) 94 | 95 | def expose_as_module(self, module_name, module_type=types.ModuleType): 96 | """ 97 | Creates a new module of type ``module_type`` and named ``module_name``, 98 | populates it with tasks and returns this newly created module. 99 | """ 100 | module = module_type(module_name) 101 | self._expose_to(module) 102 | return module 103 | 104 | def _expose_to(self, module_obj): 105 | task_list = [] 106 | for name, task in self._get_fabric_tasks(): 107 | setattr(module_obj, name, task) 108 | task_list.append(name) 109 | return task_list 110 | 111 | def _is_task(self, func): 112 | return hasattr(func, '_task_info') 113 | 114 | def _task_for_method(self, method): 115 | return WrappedCallableTask(method, *method._task_info['args'], **method._task_info['kwargs']) 116 | 117 | def _get_fabric_tasks(self): 118 | return ( 119 | (name, self._task_for_method(task)) 120 | for name, task in inspect.getmembers(self, self._is_task) 121 | ) 122 | -------------------------------------------------------------------------------- /extras/mysql-proxy/entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | export MYAAS_WAIT_TIMEOUT=${MYAAS_WAIT_TIMEOUT:-15} 5 | export MYAAS_DEBUG=${MYAAS_DEBUG:-0} 6 | export MYAAS_NO_REMOVE=${MYAAS_NO_REMOVE:-0} 7 | export MYAAS_TTL=${MYAAS_TTL:-0} 8 | export CURL="curl --dump-header /tmp/headers.txt --fail --silent" 9 | export PID=0 10 | 11 | print_ts() 12 | { 13 | echo "$(date): $1" 14 | } 15 | 16 | find_or_create_database() 17 | { 18 | local ret=0 19 | print_ts "* Looking for existing database ${MYAAS_TEMPLATE}/${MYAAS_NAME} ..." 20 | JSON=`$CURL ${MYAAS_URL}/db/${MYAAS_TEMPLATE}/${MYAAS_NAME/\//-}` || ret=$? 21 | debug_last_request $ret 22 | if [ $ret -ne 0 ]; then 23 | print_ts " - No existing database found" 24 | create_database 25 | else 26 | print_ts " - Database found" 27 | fi 28 | 29 | HOST=`echo $JSON | jq -M -r '.host'` 30 | PORT=`echo $JSON | jq -M -r '.port'` 31 | 32 | return 0 33 | } 34 | 35 | create_database() 36 | { 37 | local ret=0 38 | print_ts "* Creating database" 39 | JSON=`$CURL --form ttl=${MYAAS_TTL} ${MYAAS_URL}/db/${MYAAS_TEMPLATE}/${MYAAS_NAME/\//-}` || ret=$? 40 | debug_last_request $ret 41 | if [ -z "$JSON" ]; then 42 | print_ts " - [ERROR] Empty server response" 43 | return 1 44 | fi 45 | 46 | return 0 47 | } 48 | 49 | 50 | remove_database() 51 | { 52 | local ret=0 53 | 54 | if [ $MYAAS_NO_REMOVE -ne 0 ]; then 55 | return 56 | fi 57 | 58 | print_ts "* Removing database..." 59 | JSON=`$CURL -X DELETE ${MYAAS_URL}/db/${MYAAS_TEMPLATE}/${MYAAS_NAME/\//-}` || ret=$? 60 | debug_last_request $ret 61 | if [ $ret -eq 0 ]; then 62 | print_ts " - Deleted ${MYAAS_TEMPLATE}-${MYAAS_NAME}" 63 | else 64 | print_ts " - Not found ${MYAAS_TEMPLATE}-${MYAAS_NAME}" 65 | fi 66 | } 67 | 68 | proxy_start() 69 | { 70 | print_ts "* Starting proxy..." 71 | socat TCP-LISTEN:3306,reuseaddr,retry=5,fork TCP:$HOST:$PORT & 72 | PID=$! 73 | print_ts " - Done" 74 | wait $PID 75 | } 76 | 77 | wait_database() 78 | { 79 | local WAITED=0 80 | echo -n "$(date): * waiting for TCP connections to $HOST:$PORT ..." 81 | while ! nc -w 1 -z $HOST $PORT 2>/dev/null 82 | do 83 | if [ $WAITED -ge $MYAAS_WAIT_TIMEOUT ]; then 84 | echo "" 85 | print_ts " - wait timeout reached" 86 | return 1 87 | fi 88 | echo -n . 89 | sleep 1 90 | WAITED=$((WAITED + 1)) 91 | done 92 | echo "OK" 93 | print_ts " - connection stablished" 94 | 95 | return 0 96 | } 97 | 98 | debug_last_request() 99 | { 100 | local exit_status=$1 101 | 102 | if [ $MYAAS_DEBUG -ne 0 ]; then 103 | print_ts "----------- LAST REQUEST: -----------" 104 | echo "Exit status was: $exit_status" 105 | if [ $exit_status -ne 0 ]; then 106 | echo "Refer to https://curl.haxx.se/libcurl/c/libcurl-errors.html to find error code meaning" 107 | fi 108 | echo "" 109 | cat /tmp/headers.txt 110 | echo $JSON | jq 2>/dev/null 111 | # if jq didn0t parse valid JSON print the raw contents 112 | if [ $? -ne 0 ]; then 113 | echo $JSON 114 | fi 115 | print_ts "^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^" 116 | fi 117 | } 118 | 119 | # signal handler to remove database when the container is stopped 120 | signal_handler() 121 | { 122 | echo "" 123 | if [ $PID -ne 0 ]; then 124 | kill $PID || true 125 | fi 126 | remove_database 127 | exit 0 128 | } 129 | 130 | 131 | ## MAIN ## 132 | 133 | # Chek required config 134 | for var in "MYAAS_URL" "MYAAS_TEMPLATE" "MYAAS_NAME"; do 135 | eval "value=\$$var" 136 | if [ -z $value ]; then 137 | print_ts "Missing required variable $var" 138 | exit 2 139 | fi 140 | done 141 | 142 | export CUR_RETRIES=0 143 | export MAX_RETRIES=5 144 | 145 | trap signal_handler SIGTERM SIGINT ERR 146 | 147 | if [ $MYAAS_DEBUG -ne 0 ]; then 148 | print_ts "Debug is enabled" 149 | fi 150 | 151 | print_ts "Using server ${MYAAS_URL}" 152 | 153 | find_or_create_database 154 | until wait_database; do 155 | if [ $CUR_RETRIES -ge $MAX_RETRIES ]; then 156 | echo "" 157 | print_ts "Max retries reached, aborting..." 158 | remove_database 159 | exit 1 160 | fi 161 | find_or_create_database 162 | sleep 2 163 | CUR_RETRIES=$((CUR_RETRIES+1)) 164 | done 165 | proxy_start # waits until proxy is killed 166 | remove_database 167 | -------------------------------------------------------------------------------- /fabfile/db.py: -------------------------------------------------------------------------------- 1 | import requests 2 | import pprint 3 | import sys 4 | from datetime import datetime 5 | try: 6 | from urllib.parse import urljoin 7 | except: 8 | from urlparse import urljoin 9 | 10 | from fabric.api import env 11 | from ago import human 12 | from tabulate import tabulate 13 | from taskset import TaskSet, task_method 14 | 15 | 16 | class DBApiClient(object): 17 | 18 | def __init__(self, base_url): 19 | self.base_url = base_url 20 | self.headers = {'Accept': 'text/plain, application/json'} 21 | 22 | def get(self, url, expects=[200], **kwargs): 23 | return self._command('get', url, expects, **kwargs) 24 | 25 | def post(self, url, expects=[201,304], **kwargs): 26 | return self._command('post', url, expects, **kwargs) 27 | 28 | def delete(self, url, expects=[204,304], **kwargs): 29 | return self._command('delete', url, expects, **kwargs) 30 | 31 | def _command(self, verb, url, expects, **kwargs): 32 | try: 33 | response = requests.request(verb, urljoin(self.base_url, url), 34 | headers=self.headers, **kwargs) 35 | except requests.exceptions.ConnectionError: 36 | print("Could not connect to API endpoint, maybe the service is down") 37 | sys.exit(1) 38 | 39 | if response.status_code == 404: 40 | print("Database not found") 41 | sys.exit(1) 42 | 43 | if response.status_code not in expects: 44 | print("An error happened") 45 | try: 46 | pprint.pprint(response.json()) 47 | except: 48 | pprint.pprint(response.request.headers) 49 | pprint.pprint(response.text) 50 | sys.exit(1) 51 | 52 | return response 53 | 54 | 55 | class DBProvider(TaskSet): 56 | def __init__(self): 57 | self.client = DBApiClient(env.myaas_url) 58 | 59 | @task_method 60 | def new(self, template, name): 61 | response = self.client.post('/db/{template}/{name}'.format(**locals())) 62 | if response.status_code == 304: 63 | print("This database already exists") 64 | return 65 | self._print_single_db(response.json()) 66 | 67 | @task_method(alias="show") 68 | def info(self, template, name): 69 | response = self.client.get('/db/{template}/{name}'.format(**locals())) 70 | self._print_single_db(response.json()) 71 | 72 | @task_method 73 | def shell(self, template, name): 74 | response = self.client.get('/db/{template}/{name}'.format(**locals())) 75 | print("mysql -u{user} -p{password} --host={host} --port={port} {database}".format(**response.json())) 76 | sys.exit(1) # prevent fabric to add 'Done' at the end 77 | 78 | @task_method 79 | def rm(self, template, name): 80 | self.client.delete('/db/{template}/{name}'.format(**locals())) 81 | print("Database deleted") 82 | 83 | @task_method 84 | def ls(self): 85 | response = self.client.get('/db') 86 | headers = ["Template", "Name", "Status", "Created"] 87 | table = [] 88 | for db in response.json()[u'databases']: 89 | db['created'] = datetime.fromtimestamp(db['created']) 90 | human_date = human(db['created'], precision=1) 91 | table.append([db['template'], db['name'], db['state'], human_date]) 92 | 93 | print tabulate(table, headers) 94 | 95 | @task_method 96 | def templates(self): 97 | response = self.client.get('/templates') 98 | pprint.pprint(response.json()[u'templates']) 99 | 100 | @task_method 101 | def container(self, template, name): 102 | response = self.client.get('/db/{template}/{name}'.format(**locals()), 103 | params={'all': True}) 104 | pprint.pprint(response.json()) 105 | 106 | def _print_single_db(self, json_data): 107 | table = [ 108 | ('Host', json_data['host']), 109 | ('Port', json_data['port']), 110 | ('User', json_data['user']), 111 | ('Password', json_data['password']), 112 | ('Database', json_data['database']), 113 | ('Running', json_data['running']) 114 | ] 115 | 116 | print(tabulate(table, tablefmt='simple')) 117 | -------------------------------------------------------------------------------- /src/myaas/reaper.py: -------------------------------------------------------------------------------- 1 | import signal 2 | import logging 3 | from time import sleep 4 | from datetime import datetime 5 | 6 | import click 7 | 8 | from .settings import DEBUG 9 | from .utils.database import get_myaas_containers, get_enabled_backend 10 | from .utils.container import client 11 | 12 | 13 | logging.basicConfig( 14 | format='%(asctime)s %(name)s %(levelname)s: %(message)s', 15 | level=logging.DEBUG if DEBUG else logging.INFO) 16 | 17 | logger = logging.getLogger("myaas-reaper") 18 | 19 | 20 | def get_container_name(container): 21 | return container['Names'][0].lstrip('/') 22 | 23 | 24 | class SignalHandler: 25 | def __init__(self): 26 | self.__killed = False 27 | signal.signal(signal.SIGTERM, self.stop) 28 | signal.signal(signal.SIGINT, self.stop) 29 | 30 | @property 31 | def exit(self): 32 | return self.__killed 33 | 34 | def stop(self, signum, frame): 35 | self.__killed = True 36 | 37 | 38 | class ContainerFilter(object): 39 | def __init__(self, expired=False, dead=False, unhealthy=False): 40 | self._expired = expired 41 | self._dead = dead 42 | self._unhealthy = unhealthy 43 | 44 | def filter(self, containers): 45 | if self._expired: 46 | logger.info('* Filtering expired') 47 | if self._dead: 48 | logger.info('* Filtering exited') 49 | if self._unhealthy: 50 | logger.info('* Filtering unhealthy') 51 | return filter(self._is_removable, containers) 52 | 53 | def _is_removable(self, container): 54 | name = get_container_name(container) 55 | if self._expired and self._is_expired(container): 56 | logger.info("%s is expired, queued for deletion", name) 57 | return True 58 | 59 | if self._dead and self._is_dead(container): 60 | logger.info("%s is dead, queued for deletion", name) 61 | return True 62 | 63 | if self._unhealthy and self._is_unhealthy(container): 64 | logger.info("%s is unhealthy, queued for deletion", name) 65 | return True 66 | 67 | return False 68 | 69 | def _is_expired(self, container): 70 | if 'com.myaas.expiresAt' in container['Labels']: 71 | expiry_ts = round(float(container['Labels']['com.myaas.expiresAt'])) # noqa 72 | else: 73 | # asume a 24 hours TTL 74 | expiry_ts = int(container['Created']) + 86400 75 | 76 | return datetime.utcnow() >= datetime.utcfromtimestamp(expiry_ts) 77 | 78 | def _is_dead(self, container): 79 | return container['State'] == 'exited' 80 | 81 | def _is_unhealthy(self, container): 82 | return 'unhealthy' in container['Status'] 83 | 84 | 85 | def remove_database(container): 86 | template = container['Labels']['com.myaas.template'] 87 | name = container['Labels']['com.myaas.instance'] 88 | try: 89 | logger.info(f'removing {name}') 90 | backend = get_enabled_backend().Database 91 | backend(client, template, name).remove() 92 | except Exception as e: 93 | logger.exception( 94 | f"Failed to remove database {template} {name}") 95 | 96 | 97 | @click.command() 98 | @click.option('-e', '--expired', is_flag=True, default=False, help='Remove expired containers.') 99 | @click.option('-d', '--dead', is_flag=True, default=False, help='Remove exited containers.') 100 | @click.option('-u', '--unhealthy', is_flag=True, default=False, help='Remove unhealthy containers.') 101 | @click.option('--dry-run', is_flag=True, default=False, help='Only print name of containers that would be removed and exit.') 102 | def cleanup(expired, dead, unhealthy, dry_run): 103 | if not (expired or dead or unhealthy): 104 | raise click.UsageError("at least one filter must be enabled, use --help for more information") 105 | 106 | cf = ContainerFilter(expired, dead, unhealthy) 107 | databases = cf.filter(get_myaas_containers()) 108 | 109 | if dry_run: 110 | logger.info("Started in dry mode") 111 | for d in databases: 112 | name = get_container_name(d) 113 | logger.info("would remove %s", name) 114 | return 115 | 116 | logger.info("Starting myaas ttl reaper...") 117 | sighandler = SignalHandler() 118 | while not sighandler.exit: 119 | databases = cf.filter(get_myaas_containers()) 120 | for d in databases: 121 | remove_database(d) 122 | sleep(1) 123 | 124 | logger.info("Stopped") 125 | 126 | 127 | if __name__ == '__main__': 128 | cleanup() 129 | -------------------------------------------------------------------------------- /src/gunicorn.conf.py: -------------------------------------------------------------------------------- 1 | import multiprocessing 2 | 3 | bind = '0.0.0.0:80' 4 | 5 | # backlog - The number of pending connections. This refers 6 | # to the number of clients that can be waiting to be 7 | # served. Exceeding this number results in the client 8 | # getting an error when attempting to connect. It should 9 | # only affect servers under significant load. 10 | # 11 | # Must be a positive integer. Generally set in the 64-2048 12 | # range. 13 | # 14 | backlog = 128 15 | # 16 | # Worker processes 17 | # 18 | # workers - The number of worker processes that this server 19 | # should keep alive for handling requests. 20 | # 21 | # A positive integer generally in the 2-4 x $(NUM_CORES) 22 | # range. You'll want to vary this a bit to find the best 23 | # for your particular application's work load. 24 | # 25 | workers = min(multiprocessing.cpu_count(), 8) * 2 + 1 26 | # 27 | # worker_class - The type of workers to use. The default 28 | # sync class should handle most 'normal' types of work 29 | # loads. You'll want to read 30 | # http://docs.gunicorn.org/en/latest/design.html#choosing-a-worker-type 31 | # for information on when you might want to choose one 32 | # of the other worker classes. 33 | # 34 | # A string referring to a Python path to a subclass of 35 | # gunicorn.workers.base.Worker. The default provided values 36 | # can be seen at 37 | # http://docs.gunicorn.org/en/latest/settings.html#worker-class 38 | # 39 | worker_class = 'sync' 40 | # 41 | # worker_connections - For the eventlet and gevent worker classes 42 | # this limits the maximum number of simultaneous clients that 43 | # a single process can handle. 44 | # 45 | # A positive integer generally set to around 1000. 46 | # 47 | # worker_connections = 1000 48 | # 49 | # timeout - If a worker does not notify the master process in this 50 | # number of seconds it is killed and a new worker is spawned 51 | # to replace it. 52 | # 53 | # Generally set to thirty seconds. Only set this noticeably 54 | # higher if you're sure of the repercussions for sync workers. 55 | # For the non sync workers it just means that the worker 56 | # process is still communicating and is not tied to the length 57 | # of time required to handle a single request. 58 | # 59 | timeout = 900 60 | # 61 | # keepalive - The number of seconds to wait for the next request 62 | # on a Keep-Alive HTTP connection. 63 | # 64 | # A positive integer. Generally set in the 1-5 seconds range. 65 | # 66 | keepalive = 2 67 | # 68 | # spew - Install a trace function that spews every line of Python 69 | # that is executed when running the server. This is the 70 | # nuclear option. 71 | # 72 | # True or False 73 | # 74 | spew = False 75 | # 76 | # Server mechanics 77 | # 78 | # daemon - Detach the main Gunicorn process from the controlling 79 | # terminal with a standard fork/fork sequence. 80 | # 81 | # True or False 82 | # 83 | daemon = False 84 | # 85 | # pidfile - The path to a pid file to write 86 | # 87 | # A path string or None to not write a pid file. 88 | # 89 | pidfile = None 90 | # 91 | # user - Switch worker processes to run as this user. 92 | # 93 | # A valid user id (as an integer) or the name of a user that 94 | # can be retrieved with a call to pwd.getpwnam(value) or None 95 | # to not change the worker process user. 96 | # 97 | user = None 98 | # 99 | # group - Switch worker process to run as this group. 100 | # 101 | # A valid group id (as an integer) or the name of a user that 102 | # can be retrieved with a call to pwd.getgrnam(value) or None 103 | # to change the worker processes group. 104 | # 105 | group = None 106 | # 107 | # umask - A mask for file permissions written by Gunicorn. Note that 108 | # this affects unix socket permissions. 109 | # 110 | # A valid value for the os.umask(mode) call or a string 111 | # compatible with int(value, 0) (0 means Python guesses 112 | # the base, so values like "0", "0xFF", "0022" are valid 113 | # for decimal, hex, and octal representations) 114 | # 115 | umask = 0 116 | # 117 | # tmp_upload_dir - A directory to store temporary request data when 118 | # requests are read. This will most likely be disappearing soon. 119 | # 120 | # A path to a directory where the process owner can write. Or 121 | # None to signal that Python should choose one on its own. 122 | # 123 | tmp_upload_dir = None 124 | # 125 | # 126 | # Logging 127 | # 128 | # logfile - The path to a log file to write to. 129 | # 130 | # A path string. "-" means log to stdout. 131 | # 132 | # loglevel - The granularity of log output 133 | # 134 | # A string of "debug", "info", "warning", "error", "critical" 135 | # 136 | loglevel = 'info' 137 | errorlog = '-' 138 | accesslog = '-' 139 | access_log_format = '%(h)s %(l)s %(u)s %(t)s "%(r)s" %(s)s %(b)s "%(f)s" "%(a)s"' # noqa 140 | -------------------------------------------------------------------------------- /src/myaas/server.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import os 3 | 4 | from flask import Flask, Response, request, jsonify, abort 5 | 6 | from .settings import HOSTNAME, DEBUG, CONTAINER_TTL 7 | from .utils.container import client 8 | from .utils.database import (get_myaas_containers, get_enabled_backend, 9 | list_databases, list_database_templates) 10 | from .backends.exceptions import (NonExistentDatabase, NonExistentTemplate, 11 | ImportInProgress) 12 | 13 | app = Flask(__name__) 14 | 15 | logger = logging 16 | logger.basicConfig( 17 | format='%(asctime)s {:4} %(levelname)s: %(message)s'.format(os.getpid()), 18 | level=logging.DEBUG if DEBUG else logging.WARNING) 19 | 20 | 21 | @app.route('/', methods=['get']) 22 | def hello_world(): 23 | return jsonify( 24 | status="Service is running", 25 | templates=list_database_templates(), 26 | databases=list_databases()) 27 | 28 | 29 | @app.route('/db', methods=['get']) 30 | def show_databases(): 31 | databases = [] 32 | for c in get_myaas_containers(): 33 | db = { 34 | 'template': c['Labels']['com.myaas.template'], 35 | 'name': c['Labels']['com.myaas.instance'], 36 | 'state': c['Status'], 37 | 'created': c['Created'], 38 | } 39 | if 'com.myaas.expiresAt' in c['Labels']: 40 | db.update({'expires_at': c['Labels']['com.myaas.expiresAt']}) 41 | databases.append(db) 42 | 43 | return jsonify(databases=databases) 44 | 45 | 46 | @app.route('/templates', methods=['get']) 47 | def show_templates(): 48 | return jsonify(templates=list_database_templates()) 49 | 50 | 51 | @app.route('/db/