├── forklift ├── README.md ├── services │ ├── __init__.py │ ├── proxy.py │ ├── satellite.py │ ├── amqp.py │ ├── email.py │ ├── redis.py │ ├── memcached.py │ ├── syslog.py │ ├── elasticsearch.py │ ├── postgres.py │ └── base.py ├── registry.py ├── arguments.py ├── base.py ├── __init__.py └── drivers.py ├── dev_requirements.txt ├── requirements.txt ├── test_requirements.txt ├── MANIFEST.in ├── runtests.sh ├── .gitignore ├── .coveragerc ├── tests ├── test_id_rsa.pub ├── __init__.py ├── forklift │ ├── __init__.py │ ├── test_help.py │ ├── test_providers.py │ ├── test_services_api.py │ ├── test_sshd.py │ ├── test_cleanroom.py │ └── test_forklift.py ├── test_id_rsa ├── test_arguments.py ├── test_util.py ├── base.py └── test_services.py ├── .travis.yml ├── setup.py ├── .githooks └── pre-commit ├── pylintrc ├── README.md └── LICENSE /forklift/README.md: -------------------------------------------------------------------------------- 1 | ../README.md -------------------------------------------------------------------------------- /dev_requirements.txt: -------------------------------------------------------------------------------- 1 | -r requirements.txt 2 | -r test_requirements.txt 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | docker-py>=1.1.0 2 | psutil 3 | pyxdg 4 | pyyaml 5 | -------------------------------------------------------------------------------- /test_requirements.txt: -------------------------------------------------------------------------------- 1 | coverage 2 | pep8 3 | pylint 4 | pylint-mccabe 5 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include README.md 2 | include requirements.txt 3 | include test_requirements.txt 4 | -------------------------------------------------------------------------------- /runtests.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | 3 | coverage erase 4 | coverage run -m unittest "$@" 5 | coverage report 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | /*.egg 3 | /.coverage 4 | /build 5 | /dist 6 | /docker_forklift.egg-info 7 | /htmlcov 8 | -------------------------------------------------------------------------------- /.coveragerc: -------------------------------------------------------------------------------- 1 | [run] 2 | branch = True 3 | source = forklift 4 | 5 | [report] 6 | exclude_lines = 7 | pragma: no cover 8 | raise NotImplementedError 9 | -------------------------------------------------------------------------------- /tests/test_id_rsa.pub: -------------------------------------------------------------------------------- 1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDDU/Ta2y0rsORiSzplgd6OBSHFeqqmqIwwFoESQSdPohf6rVBhWrkwDeKZvWxmnX2HKC+nfecwOD2DdXxZV1BfENXxXIj/WepE2fb9cQcaY+zXwP813Q1deiCVuXOgCVXZio4E1FoM2cJuT2uYHgvP8WNJAG1+7EGZN5O6pwmr6610Ho/c/PmrbCU6kQo/it6tXGBQy4NuxRo+zqOaHND4Hz2Sfgzr/5e5lN9imQQvtrmfeQOaiFfVjMByydQg68iglOPoE6rdYamXNrKvnCJJJgKHxWNa2aZfb4Pzli/i91QJGg3ElpWoxf5ubrEd6zDB8TWIoTBhM3CKcxaw68Cl forklift.test.key 2 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Base test module. 18 | """ 19 | -------------------------------------------------------------------------------- /tests/forklift/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Base test module for Forklift. 18 | """ 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "3.5" 4 | - "3.4" 5 | - "3.3" 6 | install: 7 | - pip install -r dev_requirements.txt 8 | - pip install coveralls 9 | script: 10 | - .githooks/pre-commit -f 11 | - ./runtests.sh 12 | after_success: 13 | - coveralls 14 | deploy: 15 | provider: pypi 16 | user: ixa 17 | password: 18 | secure: KotqwXcFnooWKlSp1jM98898loE9ugWpba7tXGcDFuHhXQGT/wXtNgzROa2EhPxs7IYfQtxq4HD3dadYSziuUzHRTTjg5YUiW9Gx4N0gFtfthraroKu4IB62B9k84YPmxIIZZNvkJOSd1Ip5x2/+TJntPcroi0UK4GFRhLmcOKM= 19 | server: https://pypi.python.org/pypi 20 | on: 21 | tags: true 22 | all_branches: true 23 | python: 3.4 24 | condition: "\"$TRAVIS_TAG\" = \"v$(python setup.py --version)\"" 25 | -------------------------------------------------------------------------------- /forklift/services/__init__.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Services that can be provided to running applications. 18 | """ 19 | 20 | from .base import register, Service 21 | 22 | # pylint:disable=unused-import 23 | from .elasticsearch import Elasticsearch 24 | from .email import Email 25 | from .memcached import Memcache 26 | from .postgres import PostgreSQL, PostGIS 27 | from .proxy import Proxy 28 | from .syslog import Syslog 29 | from .redis import Redis 30 | from .amqp import RabbitMQ 31 | -------------------------------------------------------------------------------- /forklift/registry.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Helpers related to maintaining a registry of classes 18 | """ 19 | 20 | 21 | class Registry(dict): 22 | """ 23 | A registry class, used for registering services, drivers, etc. 24 | 25 | This is not the registry itself. The registry itself is in 26 | forklift.services, forklift.drivers, etc. 27 | """ 28 | 29 | def __call__(self, name): 30 | """ 31 | Use registry as a decorator to register Forklift services 32 | """ 33 | 34 | def inner(cls): 35 | """ 36 | Decorator 37 | """ 38 | self[name] = cls 39 | 40 | return cls 41 | 42 | return inner 43 | -------------------------------------------------------------------------------- /tests/test_id_rsa: -------------------------------------------------------------------------------- 1 | -----BEGIN RSA PRIVATE KEY----- 2 | MIIEowIBAAKCAQEAw1P02tstK7DkYks6ZYHejgUhxXqqpqiMMBaBEkEnT6IX+q1Q 3 | YVq5MA3imb1sZp19hygvp33nMDg9g3V8WVdQXxDV8VyI/1nqRNn2/XEHGmPs18D/ 4 | Nd0NXXoglblzoAlV2YqOBNRaDNnCbk9rmB4Lz/FjSQBtfuxBmTeTuqcJq+utdB6P 5 | 3Pz5q2wlOpEKP4rerVxgUMuDbsUaPs6jmhzQ+B89kn4M6/+XuZTfYpkEL7a5n3kD 6 | mohX1YzAcsnUIOvIoJTj6BOq3WGplzayr5wiSSYCh8VjWtmmX2+D85Yv4vdUCRoN 7 | xJaVqMX+bm6xHeswwfE1iKEwYTNwinMWsOvApQIDAQABAoIBAAuNV5nqBI3k9Fzr 8 | 3hRKzgLQQuMPEjCxHzlo21FdERogmBJ3VZ2sY+93osM89MBM6DOqKMdbJuUvLlQs 9 | CqKuC07UDBMuSdvwUW6pBXaPzL++0S8hdvaUkEnLA0pXoYMBilZHX4bhmEHSTzF+ 10 | +PoSLNirihezMsai6m5JXy9W2beVKkWN2amOGq4vEI2FmAiwoZa484JTcgAJJIqW 11 | pndM9Lv0Rz52kMPH1KwzuCqsN1U7cpoFEPjNcJpdbhhgrac393ngGga+DnuP++sO 12 | sqPGlA6avAehp8GGTQX4pBlF8XitOM349MbDvbt8SJWdjFKUv0YS6dEXgBSWqj1n 13 | n1HQCrECgYEA4Oytdjm8fRE7Zr9nY04w5kOy/e3noqR6oMUdxsfE1YxZr/AxLIhH 14 | drBnfraen8m4uMgN0zAsvUUtb6f8D9q/uT6z3/vkAQ3uD6fomCcBfB8sSgs007KB 15 | u5yWU3AKJ2flPNmJwBORp9+JKzuWjzIBR6qN3+wofbyUerpoceTPxtsCgYEA3lB7 16 | dI7suRUwCPGkZc0dDmIYuZ3foSebR0wKFtCIU5msghe+yxlxS9UbH8mKxYZNzLL7 17 | gN60zp9L/9So6QDY7S4gKccxsihSFV/w/yifamEBvI6uuCH/xgVQd3Racsa3HIXW 18 | ZXys67lAyIFul73fv7EAiN0cawquXFBQmzESbn8CgYAds2Mcb3dSVIoxgSjX1iVi 19 | 4qGqIlYqBeojfKP38b0uLcBSGTeFF+HH7HdeJiNAGlUIIXKh5oSh4ZakXdxNA6e5 20 | gGwkMZDkCsDa1GNEZDIv/7XLeTtakQHMklrPXcYZegCtTlSARpU/Q6dLTC0Pb5z3 21 | p6/gV8DpCZPZuO+9ymcdbQKBgQCecxJqsut5xuYjmcaoYBbTh4tD0IfxHn+Hw2R1 22 | ek+P9jmDx1TPAOVFPTBF6amkuuJ8gRzMbu7DTbonLvTsjRQPiA6YeEatShum7Gii 23 | E9v7QASy1aclyeW4x1TgrudAYu1jwOhcEQ5WPAU2YznloQj2YTuWULHOgSBg30V0 24 | NveCFQKBgHwCE4+jSFcT7lhnJoA8+luxo+2qE02PXJlaJX8XMUJTWMPus4mpjtbu 25 | KcNWag4cSREqbFt2jyK+fdjkIyIccB024jQxSjnDg2AGV/XBNeHkIDb2E1Djp/QW 26 | 08JY0CWWU/wFQOarVqnIjrWa1K4Nse8b0Jt9N/KqtRTMZOTo80jm 27 | -----END RSA PRIVATE KEY----- 28 | -------------------------------------------------------------------------------- /tests/forklift/test_help.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Tests for help invocation. 18 | """ 19 | 20 | import io 21 | import tempfile 22 | 23 | from tests.base import ( 24 | TestCase, 25 | TestForklift, 26 | redirect_stream, 27 | ) 28 | 29 | 30 | class HelpTestForklift(TestForklift): 31 | """ 32 | Mock _readme_stream for tests. 33 | """ 34 | 35 | @staticmethod 36 | def _readme_stream(): 37 | """ 38 | Dummy stream. 39 | """ 40 | 41 | return io.BytesIO("Help yourself.".encode()) 42 | 43 | 44 | class HelpTestCase(TestCase): 45 | """ 46 | Test help invocation. 47 | """ 48 | 49 | forklift_class = HelpTestForklift 50 | 51 | def test_help(self): 52 | """ 53 | Test help invocation. 54 | """ 55 | 56 | with tempfile.NamedTemporaryFile() as tmpfile: 57 | with redirect_stream(tmpfile.file.fileno()): 58 | self.run_forklift('help') 59 | 60 | with open(tmpfile.name) as saved_output: 61 | help_text = saved_output.read() 62 | help_text = help_text.replace('()', '').strip() 63 | self.assertEqual("Help yourself.", help_text) 64 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Setup script. 18 | """ 19 | 20 | from sys import version_info 21 | 22 | from setuptools import setup, find_packages 23 | 24 | assert version_info >= (3,), "Python 3 is required." 25 | 26 | with open('requirements.txt') as requirements, \ 27 | open('test_requirements.txt') as test_requirements: 28 | setup( 29 | name='docker-forklift', 30 | version='0.2.52', 31 | description='Utility for running a container', 32 | author='Infoxchange Australia development team', 33 | author_email='devs@infoxchange.net.au', 34 | url='https://github.com/infoxchange/docker-forklift', 35 | license='Apache 2.0', 36 | long_description=open('README.md').read(), 37 | 38 | packages=find_packages(exclude=['tests']), 39 | package_data={ 40 | 'forklift': [ 41 | 'README.md', 42 | 'requirements.txt', 43 | 'test_requirements.txt', 44 | ], 45 | }, 46 | entry_points={ 47 | 'console_scripts': [ 48 | 'forklift = forklift:main', 49 | ], 50 | }, 51 | 52 | install_requires=requirements.read().splitlines(), 53 | 54 | test_suite='tests', 55 | tests_require=test_requirements.read().splitlines(), 56 | ) 57 | -------------------------------------------------------------------------------- /forklift/services/proxy.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Proxy service. 18 | """ 19 | 20 | from .base import URLService, port_open, register, transient_provider 21 | 22 | 23 | @register('proxy') 24 | class Proxy(URLService): 25 | """ 26 | Proxy service for the application. 27 | """ 28 | 29 | def __init__(self, host='', port=None): 30 | super().__init__('http://{host}{port}'.format( 31 | host=host or '', 32 | port=':' + str(port) if port else '', 33 | )) 34 | 35 | def available(self): 36 | """ 37 | Check whether the proxy is available. 38 | """ 39 | 40 | if self.host: 41 | return port_open(self.host, self.port) 42 | else: 43 | return True 44 | 45 | def environment(self): 46 | """ 47 | The environment to access the proxy. 48 | """ 49 | 50 | if self.host: 51 | return { 52 | 'HTTP_PROXY': self.url_string() 53 | } 54 | else: 55 | return {} 56 | 57 | @classmethod 58 | @transient_provider 59 | def manual(cls, application_id): 60 | """ 61 | Manually-configured proxy. Will not be available unless parameters 62 | are overridden in configuration. 63 | """ 64 | 65 | return cls() 66 | 67 | providers = ('manual',) 68 | -------------------------------------------------------------------------------- /forklift/services/satellite.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Satellite processes started by Forklift itself to provide services. 18 | """ 19 | 20 | import os 21 | from threading import Thread 22 | 23 | from forklift.base import wait_for_parent 24 | 25 | 26 | def start_satellite(target, args=(), kwargs=None, stop=None): 27 | """ 28 | Start a process configured to run the target but kill it after the parent 29 | exits. 30 | """ 31 | 32 | if kwargs is None: 33 | kwargs = {} 34 | 35 | child_pid = os.fork() 36 | if not child_pid: 37 | # Make sure signals sent by the shell aren't propagated to the 38 | # satellite 39 | os.setpgrp() 40 | _satellite(target, args, kwargs, stop) 41 | 42 | 43 | def _satellite(target, args, kwargs, stop): 44 | """ 45 | Run the target, killing it after the parent exits. 46 | """ 47 | 48 | # Run target daemonized. 49 | payload = Thread( 50 | target=target, 51 | args=args, 52 | kwargs=kwargs, 53 | ) 54 | payload.daemon = True 55 | payload.start() 56 | 57 | wait_for_parent() 58 | exit_status = stop() if stop is not None else None 59 | if exit_status is None: 60 | exit_status = 0 61 | 62 | # This is in a child process, so exit without additional cleanup 63 | os._exit(exit_status) # pylint:disable=protected-access 64 | -------------------------------------------------------------------------------- /forklift/arguments.py: -------------------------------------------------------------------------------- 1 | """ 2 | Argument parsing utilities. 3 | """ 4 | 5 | from argparse import Namespace 6 | 7 | 8 | def argument_factory(add_argument, name): 9 | """ 10 | A factory to prepend all argument names with a prefix. 11 | """ 12 | 13 | def wrapped(*args, **kwargs): 14 | """ 15 | Prepend all argument names with a prefix before adding the argument. 16 | """ 17 | 18 | assert all(option[:2] == '--' for option in args) 19 | option_names = [ 20 | '--{0}.{1}'.format(name, option[2:]) 21 | for option in args 22 | ] 23 | return add_argument(*option_names, **kwargs) 24 | 25 | return wrapped 26 | 27 | 28 | def convert_to_args(conf, prefix=None): 29 | """ 30 | Convert hierarchical configuration dictionary to argparse arguments. 31 | 32 | 'environment' at root level is a special case: if it is a hash, 33 | it is converted into an array of VAR=VALUE pairs. 34 | """ 35 | 36 | args = [] 37 | conf = conf or {} 38 | prefix = prefix or () 39 | 40 | if not prefix and 'environment' in conf: 41 | environment = conf['environment'] 42 | if isinstance(environment, dict): 43 | conf['environment'] = [ 44 | '{0}={1}'.format(k, v) 45 | for k, v in environment.items() 46 | ] 47 | 48 | for key, value in conf.items(): 49 | arg_prefix = prefix + (key,) 50 | if isinstance(value, dict): 51 | args.extend(convert_to_args(value, arg_prefix)) 52 | else: 53 | if not isinstance(value, (list, tuple)): 54 | value = (value,) 55 | 56 | if len(value) > 0: 57 | args.append('--' + '.'.join(arg_prefix)) 58 | for val in value: 59 | args.append(str(val)) 60 | 61 | return args 62 | 63 | 64 | def project_args(args, prefix): 65 | """ 66 | Return only keys in the object having the specified prefix, stripping 67 | the prefix. 68 | """ 69 | 70 | pairs = vars(args).items() 71 | strip_len = len(prefix) + 1 72 | 73 | return Namespace(**dict( 74 | (key[strip_len:], value) 75 | for key, value in pairs 76 | if key.startswith(prefix + '.') 77 | )) 78 | -------------------------------------------------------------------------------- /forklift/services/amqp.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2015 Infoxchange 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Redis service 18 | """ 19 | 20 | import logging 21 | 22 | from .base import ( 23 | ProviderNotAvailable, 24 | URLNameDescriptor, 25 | URLHostInfoDescriptor, 26 | URLService, 27 | port_open, 28 | register, 29 | split_host_port, 30 | ) 31 | 32 | LOGGER = logging.getLogger(__name__) 33 | 34 | 35 | @register('amqp') 36 | class RabbitMQ(URLService): 37 | """ 38 | A RabbitMQ/AMQP service 39 | """ 40 | CONTAINER_IMAGE = 'rabbitmq' 41 | DEFAULT_PORT = 5672 42 | 43 | hosts = URLHostInfoDescriptor(default_port=DEFAULT_PORT, joiner=tuple) 44 | vhost = URLNameDescriptor() 45 | 46 | allow_override = URLService.allow_override + ('vhost',) 47 | allow_override_list = URLService.allow_override_list + ('hosts',) 48 | 49 | providers = ('localhost', 'container') 50 | 51 | def environment(self): 52 | """Environment for AMQP""" 53 | 54 | return { 55 | 'AMQP_URLS': self.url_string(), 56 | } 57 | 58 | def check_available(self): 59 | """Check the service is available""" 60 | for host in self.hosts: 61 | if port_open(*split_host_port(host, self.DEFAULT_PORT)): 62 | return True 63 | 64 | raise ProviderNotAvailable("RabbitMQ not available: none of the hosts " 65 | "are up") 66 | 67 | @classmethod 68 | def localhost(cls, application_id): 69 | """RabbitMQ on the local machine""" 70 | return cls(urls=('amqp://localhost:{port}/{vhost}/'.format( 71 | port=cls.DEFAULT_PORT, 72 | vhost=application_id),)) 73 | 74 | @classmethod 75 | def from_container(cls, application_id, container): 76 | """RabbitMQ as a container""" 77 | 78 | return cls(urls=('amqp://guest:guest@{c.host}:{c.port}//'.format( 79 | c=container),)) 80 | -------------------------------------------------------------------------------- /forklift/services/email.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Proxy service. 18 | """ 19 | 20 | from forklift.base import free_port 21 | from .base import register, transient_provider, try_port, URLService 22 | 23 | 24 | @register('email') 25 | class Email(URLService): 26 | """ 27 | An MTA for the application. 28 | """ 29 | 30 | allow_override = ('host', 'port') 31 | 32 | def __init__(self, host, port=25): 33 | super().__init__(( 34 | 'smtp://{host}:{port}/'.format( 35 | host=host, 36 | port=port, 37 | ), 38 | )) 39 | 40 | def environment(self): 41 | """ 42 | The environment to send email. 43 | """ 44 | 45 | return { 46 | 'EMAIL_HOST': self.host, 47 | 'EMAIL_PORT': str(self.port), 48 | } 49 | 50 | def check_available(self): 51 | """ 52 | Check whether the MTA is available. 53 | """ 54 | 55 | return try_port(self.host, self.port) 56 | 57 | @classmethod 58 | def localhost(cls, application_id): 59 | """ 60 | The MTA on the local machine. 61 | """ 62 | return cls(host='localhost') 63 | 64 | @classmethod 65 | @transient_provider 66 | def stdout(cls, application_id): 67 | """ 68 | Mailer printing all the messages to the standard output of Forklift. 69 | """ 70 | 71 | from forklift.services.satellite import start_satellite 72 | from smtpd import DebuggingServer 73 | 74 | port = free_port() 75 | 76 | def run_server(): 77 | """ 78 | Run the syslog server. 79 | """ 80 | 81 | DebuggingServer(('0.0.0.0', port), None) 82 | import asyncore 83 | asyncore.loop() 84 | 85 | start_satellite(target=run_server) 86 | 87 | instance = cls('localhost', port) 88 | instance.wait_until_available() 89 | return instance 90 | 91 | providers = ('localhost', 'stdout') 92 | -------------------------------------------------------------------------------- /forklift/services/redis.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Redis service 18 | """ 19 | 20 | import logging 21 | from telnetlib import Telnet 22 | 23 | from .base import ( 24 | register, 25 | URLNameDescriptor, 26 | URLService, 27 | ) 28 | 29 | LOGGER = logging.getLogger(__name__) 30 | 31 | 32 | @register('redis') 33 | class Redis(URLService): 34 | """ 35 | A Redis service 36 | 37 | This is a single Redis server. 38 | """ 39 | 40 | allow_override = URLService.allow_override + ('db_index',) 41 | db_index = URLNameDescriptor() 42 | 43 | providers = ('localhost', 'container') 44 | 45 | CONTAINER_IMAGE = 'redis' 46 | 47 | DEFAULT_PORT = 6379 48 | 49 | def __init__(self, host, db_index=0): 50 | # FIXME: we don't support multiple redis servers yet 51 | super().__init__('redis://{host}/{db_index}'.format( 52 | host=host, 53 | db_index=db_index, 54 | )) 55 | 56 | def environment(self): 57 | """ 58 | The environment to access Redis 59 | """ 60 | 61 | return { 62 | 'REDIS_HOSTS': self.host, 63 | 'REDIS_DB_INDEX': str(self.db_index), 64 | } 65 | 66 | def check_available(self): 67 | """ 68 | Check whether Redis is available 69 | """ 70 | 71 | # pylint:disable=invalid-name 72 | nc = Telnet(self.host, self.port) 73 | 74 | try: 75 | nc.write(b'PING') 76 | nc.read_until(b'PONG\n', 77 | timeout=1) 78 | finally: 79 | nc.close() 80 | 81 | return True 82 | 83 | @classmethod 84 | def localhost(cls, application_id): 85 | """ 86 | The default Redis provider 87 | """ 88 | 89 | return cls(host='localhost:{port}'.format(port=cls.DEFAULT_PORT)) 90 | 91 | @classmethod 92 | def from_container(cls, application_id, container): 93 | """ 94 | Redis provided by a container. 95 | """ 96 | 97 | return cls( 98 | host='{host}:{port}'.format(**container.__dict__), 99 | ) 100 | -------------------------------------------------------------------------------- /tests/test_arguments.py: -------------------------------------------------------------------------------- 1 | """ 2 | Tests for argument parsing utilities. 3 | """ 4 | 5 | from argparse import Namespace 6 | from collections import OrderedDict 7 | import unittest 8 | 9 | from forklift.arguments import convert_to_args, project_args 10 | 11 | 12 | class ConvertToArgsTestCase(unittest.TestCase): 13 | """ 14 | Test convert_to_args. 15 | """ 16 | 17 | def test_convert_to_args(self): 18 | """ 19 | Test convert_to_args. 20 | """ 21 | 22 | conf = OrderedDict([ 23 | ('simple', 'value'), 24 | ('number', 10), 25 | ('array', [ 26 | 'one', 27 | 'two', 28 | ]), 29 | ('empty_array', []), 30 | ('nested', OrderedDict([ 31 | ('first', 'deep'), 32 | ('second', 'deeper'), 33 | ])), 34 | ]) 35 | 36 | self.assertEqual(convert_to_args(conf), [ 37 | '--simple', 'value', 38 | '--number', '10', 39 | '--array', 'one', 'two', 40 | '--nested.first', 'deep', 41 | '--nested.second', 'deeper', 42 | ]) 43 | 44 | self.assertEqual(convert_to_args(None), []) 45 | 46 | def test_environment(self): 47 | """ 48 | Test 'environment' conversion. 49 | """ 50 | 51 | conf = OrderedDict([ 52 | # This will be converted into an array 53 | ('environment', OrderedDict([ 54 | ('foo', 'badger'), 55 | ('bar', 'mushroom'), 56 | ])), 57 | ('irrelevant', OrderedDict([ 58 | # This is to stay a hash 59 | ('environment', OrderedDict([ 60 | ('baz', 'snake'), 61 | ])), 62 | ])), 63 | ]) 64 | 65 | self.assertEqual(convert_to_args(conf), [ 66 | '--environment', 'foo=badger', 'bar=mushroom', 67 | '--irrelevant.environment.baz', 'snake', 68 | ]) 69 | 70 | # An array to start with is permitted too 71 | conf = OrderedDict([ 72 | ('environment', [ 73 | 'foo=badger', 74 | 'bar=mushroom', 75 | ]), 76 | ]) 77 | 78 | self.assertEqual(convert_to_args(conf), [ 79 | '--environment', 'foo=badger', 'bar=mushroom', 80 | ]) 81 | 82 | 83 | class ProjectArgsTestCase(unittest.TestCase): 84 | """ 85 | Test project_args. 86 | """ 87 | 88 | def test_project_args(self): 89 | """ 90 | Test project_args. 91 | """ 92 | 93 | args = Namespace(**{ 94 | 'one.a': '1a', 95 | 'one.b': '1b', 96 | 'two.c': '2c', 97 | }) 98 | 99 | self.assertEqual( 100 | vars(project_args(args, 'one')), 101 | { 102 | 'a': '1a', 103 | 'b': '1b', 104 | } 105 | ) 106 | -------------------------------------------------------------------------------- /tests/forklift/test_providers.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Test the base Service choosing from the available providers. 18 | """ 19 | 20 | from forklift.base import ImproperlyConfigured 21 | from forklift.services.base import ( 22 | ProviderNotAvailable, 23 | Service, 24 | ) 25 | 26 | from tests.base import (TestCase) 27 | 28 | 29 | class FussyService(Service): 30 | """ 31 | A service with a few providers which are only sometimes available. 32 | """ 33 | 34 | def __init__(self, value, application_id): 35 | self.application_id = application_id 36 | self.value = value 37 | 38 | providers = ( 39 | 'one', 40 | 'two', 41 | ) 42 | 43 | # An array of values deemed available 44 | nice = ( 45 | 'one', 46 | 'two', 47 | ) 48 | 49 | @classmethod 50 | def one(cls, application_id): 51 | """ 52 | One nice provider. 53 | """ 54 | return cls('one', application_id) 55 | 56 | @classmethod 57 | def two(cls, application_id): 58 | """ 59 | Another nice provider. 60 | """ 61 | return cls('two', application_id) 62 | 63 | def check_available(self): 64 | """ 65 | Only treat nice providers as available. 66 | """ 67 | if self.value in self.nice: 68 | return True 69 | else: 70 | raise ProviderNotAvailable() 71 | 72 | 73 | class TestProvide(TestCase): 74 | """ 75 | Test choosing an available provider. 76 | """ 77 | 78 | def test_first_available(self): 79 | """ 80 | The first provider tried is available. 81 | """ 82 | self.assertEqual(FussyService.provide(application_id='test').value, 83 | 'one') 84 | 85 | def test_second_available(self): 86 | """ 87 | One provider isn't available, the other one is. 88 | """ 89 | FussyService.nice = ('two',) 90 | self.assertEqual(FussyService.provide(application_id='test').value, 91 | 'two') 92 | 93 | def test_none_available(self): 94 | """ 95 | No providers are available. 96 | """ 97 | FussyService.nice = () 98 | with self.assertRaises(ImproperlyConfigured): 99 | FussyService.provide(application_id='test') 100 | -------------------------------------------------------------------------------- /tests/forklift/test_services_api.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Test services API. 18 | """ 19 | 20 | import unittest 21 | 22 | import forklift.services 23 | 24 | from tests.base import docker_image_available 25 | 26 | 27 | class ServiceTestCase(unittest.TestCase): 28 | """ 29 | Generic service tests. 30 | """ 31 | 32 | service_class = None 33 | 34 | def test_api_conformance(self): 35 | """ 36 | Test that the service has the correct API. 37 | """ 38 | 39 | # assert we have at least one provider 40 | self.assertGreaterEqual(len(self.service_class.providers), 1) 41 | 42 | # assert those providers exist on the class 43 | for provider in self.service_class.providers: 44 | assert hasattr(self.service_class, provider) 45 | 46 | # assert can build a provider 47 | service = getattr(self.service_class, 48 | self.service_class.providers[0])('fake') 49 | 50 | # assert we can set the host 51 | # 52 | # Only the Docker driver uses the host property, and it is 53 | # currently optional. However this test is useful because the 54 | # property is useful. If it turns out there are services for 55 | # which host is not useful, then this test should be changed :) 56 | assert hasattr(service, 'host') 57 | service.host = 'badger' 58 | 59 | assert hasattr(service, 'environment') 60 | assert hasattr(service, 'available') 61 | 62 | # Test all attributes in allow_override exist 63 | for attr in service.allow_override: 64 | value = getattr(service, attr) 65 | setattr(service, attr, value) 66 | 67 | def test_available(self): 68 | """ 69 | Test that the service provided by the image is available. 70 | """ 71 | 72 | image = self.service_class.CONTAINER_IMAGE 73 | if image and not docker_image_available(image): 74 | raise unittest.SkipTest( 75 | "Docker image {0} is required.".format(image)) 76 | 77 | service = self.service_class.provide('fake', transient=True) 78 | self.assertTrue(service.available()) 79 | 80 | service.cleanup() 81 | 82 | 83 | def load_tests(loader, tests, pattern): 84 | """ 85 | Generate a test class for each service. 86 | """ 87 | 88 | suite = unittest.TestSuite() 89 | for cls in forklift.services.register.values(): 90 | test_class = type(ServiceTestCase)( 91 | cls.__name__ + 'TestCase', 92 | (ServiceTestCase,), 93 | { 94 | 'service_class': cls, 95 | } 96 | ) 97 | suite.addTests(loader.loadTestsFromTestCase(test_class)) 98 | return suite 99 | -------------------------------------------------------------------------------- /forklift/services/memcached.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Memcache service. 18 | """ 19 | 20 | from .base import ( 21 | ProviderNotAvailable, 22 | URLHostInfoDescriptor, 23 | URLNameDescriptor, 24 | URLService, 25 | port_open, 26 | register, 27 | split_host_port, 28 | ) 29 | 30 | 31 | @register('memcache') 32 | class Memcache(URLService): 33 | """ 34 | Memcache service for the application. 35 | """ 36 | 37 | DEFAULT_PORT = 11211 38 | 39 | CONTAINER_IMAGE = 'memcached' 40 | 41 | allow_override = URLService.allow_override + ('key_prefix',) 42 | allow_override_list = URLService.allow_override_list + ('hosts',) 43 | key_prefix = URLNameDescriptor() 44 | hosts = URLHostInfoDescriptor(default_port=DEFAULT_PORT, joiner=tuple) 45 | 46 | providers = ('localhost', 'container') 47 | 48 | def __init__(self, 49 | key_prefix='', 50 | hosts=None): 51 | 52 | super().__init__( 53 | 'memcache://{host}:{port}/{key_prefix}'.format( 54 | host=host, 55 | port=port, 56 | key_prefix=key_prefix, 57 | ) 58 | for host, port in ( 59 | split_host_port(h, self.DEFAULT_PORT) 60 | for h in hosts 61 | ) 62 | ) 63 | 64 | def environment(self): 65 | """ 66 | The environment to access Memcache 67 | """ 68 | 69 | return { 70 | 'MEMCACHE_HOSTS': '|'.join(self.hosts), 71 | 'MEMCACHE_PREFIX': self.key_prefix, 72 | } 73 | 74 | def check_available(self): 75 | """ 76 | Check whether memcache is available 77 | 78 | Do this by connecting to the socket. At least one host must be up 79 | """ 80 | 81 | for host in self.hosts: 82 | if port_open(*split_host_port(host, self.DEFAULT_PORT)): 83 | return True 84 | 85 | raise ProviderNotAvailable("Memcached not available: none of the hosts" 86 | " are up") 87 | 88 | @classmethod 89 | def localhost(cls, application_id): 90 | """ 91 | The default memcached provider 92 | """ 93 | 94 | return cls(key_prefix=application_id, 95 | hosts=['localhost:{0}'.format(cls.DEFAULT_PORT)]) 96 | 97 | @classmethod 98 | def from_container(cls, application_id, container): 99 | """ 100 | Memcached provided by a container. 101 | """ 102 | 103 | return cls( 104 | key_prefix=application_id, 105 | hosts=['{host}:{port}'.format(**container.__dict__)], 106 | ) 107 | -------------------------------------------------------------------------------- /forklift/services/syslog.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Syslog service. 18 | """ 19 | 20 | from forklift.base import free_port 21 | from .base import ( 22 | register, 23 | transient_provider, 24 | try_port, 25 | URLDescriptor, 26 | URLService, 27 | ) 28 | 29 | 30 | @register('syslog') 31 | class Syslog(URLService): 32 | """ 33 | Logging facility for the application. 34 | """ 35 | 36 | DEFAULT_PORT = 514 37 | 38 | allow_override = ('host', 'port', 'proto') 39 | 40 | proto = URLDescriptor('scheme') 41 | 42 | def __init__(self, host=None, port=DEFAULT_PORT, proto='udp'): 43 | super().__init__('{proto}://{host}:{port}'.format( 44 | host=host or '', 45 | port=port, 46 | proto=proto, 47 | )) 48 | 49 | def environment(self): 50 | """ 51 | The environment to provide logging. 52 | """ 53 | 54 | return { 55 | 'SYSLOG_SERVER': self.host, 56 | 'SYSLOG_PORT': str(self.port), 57 | 'SYSLOG_PROTO': self.proto, 58 | } 59 | 60 | def check_available(self): 61 | """ 62 | Check whether syslog is available. 63 | 64 | If the protocol is UDP, assume it is available if any of the other 65 | parameters are set. 66 | """ 67 | 68 | if not self.host: 69 | return False 70 | 71 | if self.proto == 'udp': 72 | return True 73 | else: 74 | return try_port(self.host, self.port) 75 | 76 | @classmethod 77 | def manual(cls, application_id): 78 | """ 79 | Manually-configured syslog. Will not be available unless parameters 80 | are overridden in configuration. 81 | """ 82 | 83 | return cls() 84 | 85 | @classmethod 86 | @transient_provider 87 | def stdout(cls, application_id): 88 | """ 89 | Logger printing all the messages to the standard output of Forklift. 90 | """ 91 | 92 | # Adapted from https://gist.github.com/marcelom/4218010 93 | from forklift.services.satellite import start_satellite 94 | import socketserver 95 | 96 | class SyslogHandler(socketserver.BaseRequestHandler): 97 | """ 98 | Handler outputting logging messages received to stdout. 99 | """ 100 | def handle(self): 101 | data = self.request[0].strip().decode() 102 | print(data) 103 | 104 | port = free_port() 105 | 106 | def run_server(): 107 | """ 108 | Run the syslog server. 109 | """ 110 | 111 | server = socketserver.UDPServer(('0.0.0.0', port), SyslogHandler) 112 | server.serve_forever() 113 | 114 | start_satellite(target=run_server) 115 | 116 | return cls('localhost', port) 117 | 118 | providers = ('manual', 'stdout') 119 | -------------------------------------------------------------------------------- /tests/test_util.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Tests for miscellaneous utilities 18 | """ 19 | 20 | import multiprocessing as mp 21 | import os 22 | import threading 23 | import time 24 | import unittest 25 | 26 | from forklift.base import wait_for_pid 27 | 28 | 29 | # multiprocessing has a LOT of no-member issues 30 | # pylint:disable=no-member 31 | 32 | 33 | def wait_thread(pid, lock): 34 | """ 35 | Lock, wait for pid, unlock 36 | """ 37 | lock.acquire() 38 | wait_for_pid(pid) 39 | lock.release() 40 | 41 | 42 | class WaitForPidTestCase(unittest.TestCase): 43 | """ 44 | Tests for the wait_for_pid function 45 | """ 46 | 47 | def test_wait_for_child(self): 48 | """ 49 | Test for wait_for_pid when the pid we are waiting on is a forked 50 | child 51 | """ 52 | waiting = mp.Lock() 53 | 54 | # Start a subprocess to sleep until killed 55 | proc = mp.Process(target=time.sleep, args=(1000,)) 56 | proc.start() 57 | 58 | # Start a thread to wait for proc to finish 59 | thread = threading.Thread(target=wait_thread, 60 | args=(proc.pid, waiting)) 61 | thread.start() 62 | 63 | # Wait for both fork and thread to start, then make sure that the lock 64 | # is acquired (the thread is waiting) 65 | time.sleep(1) 66 | self.assertFalse(waiting.acquire(False)) 67 | 68 | # Terminate the forked process, wait, then make sure that the thread 69 | # has finished waiting 70 | proc.terminate() 71 | time.sleep(2) 72 | self.assertTrue(waiting.acquire(False)) 73 | 74 | def test_wait_for_parent(self): 75 | """ 76 | Test for wait_for_pid when the pid we are waiting on is the waiting 77 | forks parent 78 | """ 79 | def parent_proc(lock): 80 | """ 81 | Start a process to wait on this one then sleep 82 | """ 83 | # Start a process to watch this PID 84 | child = mp.Process(target=wait_thread, 85 | args=(os.getpid(), lock)) 86 | child.start() 87 | 88 | # Sleep until killed 89 | time.sleep(1000) 90 | 91 | # Start a process (child) to spawn another child (our grandchild) that 92 | # will wait for our child to be killed 93 | waiting = mp.Lock() 94 | proc = mp.Process(target=parent_proc, 95 | args=(waiting,)) 96 | proc.start() 97 | 98 | # Wait for both child and grandchild to have started, then make sure 99 | # that the lock is acquired (the grandchild is waiting) 100 | time.sleep(1) 101 | self.assertFalse(waiting.acquire(False)) 102 | 103 | # Terminate our child, wait, then make sure that the grandchild has 104 | # finished waiting 105 | proc.terminate() 106 | time.sleep(2) 107 | self.assertTrue(waiting.acquire(False)) 108 | -------------------------------------------------------------------------------- /forklift/base.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Common declarations. 18 | """ 19 | 20 | from contextlib import contextmanager 21 | import os 22 | import socket 23 | import subprocess 24 | import tempfile 25 | import time 26 | 27 | import psutil 28 | 29 | 30 | def free_port(): 31 | """ 32 | Find a free TCP port. 33 | """ 34 | 35 | with socket.socket() as sock: 36 | sock.bind(('', 0)) 37 | return sock.getsockname()[1] 38 | 39 | 40 | def wait_for(func, expected_exceptions=(), retries=60): 41 | """ 42 | Wait for a function to return a truthy value, possibly ignoring some 43 | exceptions if they are raised until the very last retry 44 | 45 | Parameters: 46 | func - the function to continually call until truthy 47 | expected_exceptions - list of exceptions to ignore, unless the final 48 | retry is reached (then any exceptions are reraised) 49 | retries - number of times to retry before giving up 50 | 51 | Return value: 52 | The return value of func the last time it was run 53 | """ 54 | 55 | retries = int(retries) 56 | for retry in range(1, retries + 1): 57 | try: 58 | return_value = func() 59 | if return_value: 60 | break 61 | 62 | except expected_exceptions: 63 | if retry == retries: 64 | raise 65 | else: 66 | pass 67 | 68 | time.sleep(1) 69 | 70 | return return_value 71 | 72 | 73 | def wait_for_parent(): 74 | """ 75 | Use wait_for_pid to wait for your parent process 76 | """ 77 | wait_for_pid(os.getppid()) 78 | 79 | 80 | def wait_for_pid(pid): 81 | """ 82 | Wait for a given PID in the best way possible. If PID is a child, we use 83 | os.waitpid. Otherwise, we fall back to a polling approach. 84 | """ 85 | try: 86 | # Try to wait for a child 87 | os.waitpid(pid, 0) 88 | except OSError: 89 | # Fallback to polling process status 90 | try: 91 | proc = psutil.Process(pid) 92 | while proc.status() not in ('zombie', 'dead'): 93 | time.sleep(1) 94 | except psutil.NoSuchProcess: 95 | pass 96 | 97 | 98 | @contextmanager 99 | def open_root_owned(source, *args, **kwargs): 100 | """ 101 | Copy a file as root, open it for writing, then copy it back as root again 102 | when done 103 | """ 104 | with tempfile.NamedTemporaryFile(*args, **kwargs) as dest_fh: 105 | if os.path.isfile(source): 106 | subprocess.check_call(['sudo', 'cp', source, dest_fh.name]) 107 | yield dest_fh 108 | subprocess.check_call(['sudo', 'cp', dest_fh.name, source]) 109 | 110 | 111 | def rm_tree_root_owned(path): 112 | """ 113 | Do an equivalent of shutil.rmtree, but as root 114 | """ 115 | subprocess.check_call(['sudo', 'rm', '-rf', path]) 116 | 117 | 118 | class ImproperlyConfigured(Exception): 119 | """ 120 | The host is not properly configured for running Docker. 121 | """ 122 | 123 | pass 124 | 125 | try: 126 | DEVNULL = subprocess.DEVNULL # pylint:disable=no-member 127 | except AttributeError: 128 | DEVNULL = open(os.devnull) 129 | -------------------------------------------------------------------------------- /tests/forklift/test_sshd.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Test SSH daemon setup. 18 | """ 19 | 20 | import os 21 | import re 22 | import subprocess 23 | 24 | from tests.base import ( 25 | requires_docker, 26 | DOCKER_BASE_IMAGE, 27 | merge_dicts, 28 | parse_environment, 29 | TestCase, 30 | TestDriver, 31 | TestForklift, 32 | ) 33 | 34 | from forklift.base import DEVNULL 35 | from forklift.drivers import Docker 36 | 37 | 38 | class SaveSSHDetailsDocker(TestDriver, Docker): 39 | """ 40 | Save SSH command the container ran. 41 | """ 42 | 43 | log = [] 44 | 45 | def ssh_command(self, container, identity=None): 46 | """ 47 | Save the SSH command for later inspection. 48 | """ 49 | 50 | command, available = super().ssh_command(container, identity=identity) 51 | self.log.append((command, available, container)) 52 | return command, available 53 | 54 | @classmethod 55 | def last_details(cls): 56 | """ 57 | Return the next (FIFO) SSH command. 58 | """ 59 | 60 | return cls.log.pop(0) 61 | 62 | 63 | class SSHTestForklift(TestForklift): 64 | """ 65 | Test Forklift saving SSH commands. 66 | """ 67 | 68 | drivers = merge_dicts({ 69 | 'save_ssh_command_docker': SaveSSHDetailsDocker, 70 | }, TestForklift.drivers) 71 | 72 | 73 | @requires_docker 74 | class SSHTestCase(TestCase): 75 | """ 76 | Test setting up an SSH daemon via Docker. 77 | """ 78 | 79 | private_key = 'tests/test_id_rsa' 80 | 81 | forklift_class = SSHTestForklift 82 | 83 | def test_sshd(self): 84 | """ 85 | Test setting up an SSH daemon. 86 | """ 87 | 88 | container = None 89 | try: 90 | os.chmod(self.private_key, 0o600) 91 | 92 | self.assertEqual(0, self.run_forklift( 93 | '--driver', 'save_ssh_command_docker', 94 | DOCKER_BASE_IMAGE, 'sshd', 95 | '--identity', self.private_key, 96 | )) 97 | 98 | command, available, container = SaveSSHDetailsDocker.last_details() 99 | 100 | def in_container(inside_command): 101 | """ 102 | Command line to execute a command inside the container 103 | via SSH. 104 | """ 105 | 106 | # TODO: run commands directly when environment is passed 107 | # properly. 108 | return "echo '{0}' | ".format(inside_command) + \ 109 | command + \ 110 | ' -T' + \ 111 | ' -o NoHostAuthenticationForLocalhost=yes' + \ 112 | ' -o PasswordAuthentication=no' 113 | 114 | self.assertTrue(available) 115 | self.assertEqual( 116 | subprocess.call(in_container('/bin/true'), shell=True), 117 | 0 118 | ) 119 | 120 | ssh_env = parse_environment( 121 | subprocess.check_output(in_container('/usr/bin/env -0'), 122 | shell=True), 123 | ) 124 | 125 | self.assertEqual(ssh_env['DEVNAME'], 'myself') 126 | self.assertEqual(ssh_env['ENVIRONMENT'], 'dev_local') 127 | self.assertEqual(ssh_env['SITE_PROTOCOL'], 'http') 128 | self.assertTrue( 129 | re.match(r'^localhost:\d+$', ssh_env['SITE_DOMAIN'])) 130 | 131 | finally: 132 | # Kill and remove the started container 133 | if container is not None: 134 | for action in ('stop', 'rm'): 135 | subprocess.check_call( 136 | ('docker', action, container), 137 | stdout=DEVNULL, 138 | ) 139 | -------------------------------------------------------------------------------- /tests/forklift/test_cleanroom.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Test the --rm and --transient flags 18 | """ 19 | 20 | import re 21 | 22 | import docker 23 | import docker.errors 24 | 25 | from tests.base import ( 26 | requires_docker_image, 27 | TestCase, 28 | TestDriver, 29 | TestForklift, 30 | ) 31 | 32 | 33 | def assertion_driver(func): 34 | """ 35 | Create a test driver that runs func to assert test conditions 36 | """ 37 | class InnerClass(TestDriver): # pylint:disable=missing-docstring 38 | run = func 39 | 40 | return InnerClass 41 | 42 | 43 | def assertion_forklift_class(func): 44 | """ 45 | Create a test forklift class that has only an assertion driver 46 | """ 47 | class InnerClass(TestForklift): # pylint:disable=missing-docstring 48 | drivers = {'assertion_driver': assertion_driver(func)} 49 | 50 | return InnerClass 51 | 52 | 53 | @requires_docker_image('mdillon/postgis') 54 | class TestRm(TestCase): 55 | """ 56 | Test the --rm flag 57 | """ 58 | 59 | def setUp(self): 60 | self.client = docker.Client() 61 | 62 | def tearDown(self): 63 | self.client.close() 64 | 65 | def test_create_delete(self): 66 | """ 67 | Make sure the container and data dirs are both created and destroyed 68 | """ 69 | test_info = {} 70 | 71 | def assertions_func(driver, *_): 72 | """ 73 | Test container/data dir creation 74 | """ 75 | container_info = driver.services[0].container_info 76 | test_info['container_name'] = container_info.name 77 | test_info['data_dir'] = container_info.data_dir 78 | 79 | # FIXME: data_dir is broken in paintedfox container 80 | # self.assertTrue(os.path.isdir(test_info['data_dir']), 81 | # "Data dir is created") 82 | 83 | container_inspect = self.client.inspect_container( 84 | test_info['container_name']) 85 | self.assertTrue(container_inspect['State']['Running'], 86 | "Container is running") 87 | 88 | return 0 89 | 90 | self.forklift_class = assertion_forklift_class(assertions_func) 91 | self.assertEqual(0, self.run_forklift( 92 | '--driver', 'assertion_driver', 93 | '--service', 'postgis', 94 | '--rm', 95 | '--', 'fake', 96 | )) 97 | 98 | # FIXME: data_dir is broken in paintedfox container 99 | # self.assertFalse(os.path.exists(test_info['data_dir']), 100 | # "Data dir does not exist") 101 | 102 | with self.assertRaises(docker.errors.APIError) as ex: 103 | self.client.inspect_container(test_info['container_name']) 104 | 105 | ex = ex.exception 106 | self.assertEqual(ex.response.status_code, 404) 107 | self.assertRegex(ex.explanation, re.compile(b'no such', re.IGNORECASE)) 108 | 109 | 110 | class TestTransient(TestCase): 111 | """ 112 | Test the --transient flag 113 | """ 114 | 115 | def test_transient_service(self): 116 | """ 117 | Make sure that a transient service is selected 118 | """ 119 | def assertions_func(driver, *_): 120 | """ 121 | Make sure that the transient service is used 122 | """ 123 | self.assertEqual('here_occasionally', 124 | driver.services[0].provided_by) 125 | return 0 126 | 127 | self.forklift_class = assertion_forklift_class(assertions_func) 128 | self.assertEqual(0, self.run_forklift( 129 | '--driver', 'assertion_driver', 130 | '--service', 'test', 131 | '--transient', 132 | '--', 'fake', 133 | )) 134 | 135 | def test_non_transient_service(self): 136 | """ 137 | Negative test to make sure the test_transient_service test is valid 138 | """ 139 | def assertions_func(driver, *_): 140 | """ 141 | Make sure that the transient service is not used 142 | """ 143 | self.assertEqual('here', 144 | driver.services[0].provided_by) 145 | return 0 146 | 147 | self.forklift_class = assertion_forklift_class(assertions_func) 148 | self.assertEqual(0, self.run_forklift( 149 | '--driver', 'assertion_driver', 150 | '--service', 'test', 151 | '--', 'fake', 152 | )) 153 | -------------------------------------------------------------------------------- /forklift/services/elasticsearch.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Elasticsearch service. 18 | """ 19 | 20 | import json 21 | import http.client 22 | import logging 23 | import os 24 | import urllib.parse 25 | import urllib.request 26 | 27 | from forklift.base import open_root_owned 28 | from .base import (cache_directory, 29 | container_name_for, 30 | ProviderNotAvailable, 31 | pipe_split, 32 | replace_part, 33 | register, 34 | URLNameDescriptor, 35 | URLService, 36 | transient_provider) 37 | 38 | LOGGER = logging.getLogger(__name__) 39 | 40 | 41 | try: 42 | # pylint:disable=undefined-variable,invalid-name 43 | CONNECTION_ISSUES_ERROR = ConnectionError 44 | except NameError: 45 | # pylint:disable=invalid-name 46 | CONNECTION_ISSUES_ERROR = urllib.error.URLError 47 | 48 | 49 | @register('elasticsearch') 50 | class Elasticsearch(URLService): 51 | """ 52 | Elasticsearch service for the application. 53 | """ 54 | 55 | allow_override = URLService.allow_override + ('index_name',) 56 | 57 | providers = ('localhost', 'container') 58 | 59 | CONTAINER_IMAGE = 'elasticsearch' 60 | 61 | DEFAULT_PORT = 9200 62 | 63 | index_name = URLNameDescriptor() 64 | 65 | TEMPORARY_AVAILABILITY_ERRORS = \ 66 | URLService.TEMPORARY_AVAILABILITY_ERRORS + ( 67 | CONNECTION_ISSUES_ERROR, 68 | http.client.HTTPException, 69 | ValueError 70 | ) 71 | PERMANENT_AVAILABILITY_ERRORS = (urllib.request.URLError,) 72 | 73 | def __init__(self, index_name, urls): 74 | super().__init__(tuple( 75 | urllib.parse.urljoin(url, index_name) 76 | for url in pipe_split(urls) 77 | )) 78 | 79 | def environment(self): 80 | """ 81 | The environment to access Elasticsearch. 82 | """ 83 | 84 | hosts = '|'.join( 85 | replace_part(url, path='').geturl() 86 | for url in self.urls 87 | ) 88 | index_name = self.urls[0].path[1:] 89 | 90 | return { 91 | 'ELASTICSEARCH_INDEX_NAME': index_name, 92 | 'ELASTICSEARCH_URLS': hosts, 93 | } 94 | 95 | def check_available(self): 96 | """ 97 | Check whether Elasticsearch is available at a given URL. 98 | """ 99 | 100 | if not self.urls: 101 | return False 102 | 103 | for url in self.urls: 104 | url = replace_part(url, path='') 105 | es_response = urllib.request.urlopen(url.geturl()) 106 | es_status = json.loads(es_response.read().decode()) 107 | if es_status.get('status', es_response.status) != 200: 108 | raise ProviderNotAvailable( 109 | ("Provider '{}' is not yet available: HTTP response " 110 | "{}\n{}").format(self.__class__.__name__, 111 | es_status['status'], 112 | es_status['error']) 113 | ) 114 | 115 | return True 116 | 117 | @classmethod 118 | def localhost(cls, application_id): 119 | """ 120 | The Elasticsearch environment on the local machine. 121 | """ 122 | return cls(index_name=application_id, 123 | urls=('http://localhost:9200',)) 124 | 125 | @classmethod 126 | def ensure_container(cls, application_id, **kwargs): 127 | """ 128 | Ensure an Elasticsearch container. 129 | """ 130 | 131 | kwargs.setdefault('data_dir', '/data') 132 | return super().ensure_container(application_id, **kwargs) 133 | 134 | @classmethod 135 | def from_container(cls, application_id, container): 136 | """ 137 | The Elasticsearch service provided by the container. 138 | """ 139 | 140 | return cls( 141 | index_name=application_id, 142 | urls=('http://{host}:{port}'.format(**container.__dict__),), 143 | ) 144 | 145 | @classmethod 146 | @transient_provider 147 | def container(cls, application_id): 148 | """ 149 | Elasticsearch provided by a container. 150 | """ 151 | 152 | image_name = cls.CONTAINER_IMAGE 153 | container_name = container_name_for(image_name, application_id) 154 | cache_dir = cache_directory(container_name) 155 | 156 | if not os.path.exists(cache_dir): 157 | LOGGER.debug("Creating cache directory '%s'", cache_dir) 158 | os.makedirs(cache_dir) 159 | 160 | config_path = os.path.join(cache_dir, 'elasticsearch.yml') 161 | LOGGER.debug("Writing ElasticSearch config to '%s'", config_path) 162 | with open_root_owned(config_path, 'w') as config: 163 | print( 164 | """ 165 | path: 166 | data: /data/data 167 | logs: /data/log 168 | """, 169 | file=config, 170 | ) 171 | 172 | return super().container(application_id) 173 | -------------------------------------------------------------------------------- /forklift/services/postgres.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | PostgreSQL database service. 18 | """ 19 | 20 | import logging 21 | import os 22 | import subprocess 23 | 24 | from forklift.base import DEVNULL 25 | from .base import (ProviderNotAvailable, 26 | URLNameDescriptor, 27 | URLService, 28 | register) 29 | 30 | 31 | LOGGER = logging.getLogger(__name__) 32 | 33 | 34 | @register('postgres') 35 | class PostgreSQL(URLService): 36 | """ 37 | PostgreSQL service provided by the host machine. 38 | """ 39 | 40 | CHECK_COMMAND = 'select version()' 41 | CONTAINER_IMAGE = 'postgres' 42 | DATABASE_NAME = 'DEFAULT' 43 | DEFAULT_PORT = 5432 44 | URL_SCHEME = 'postgres' 45 | 46 | PERMANENT_AVAILABILITY_ERRORS = (subprocess.CalledProcessError, 47 | OSError) 48 | 49 | allow_override = URLService.allow_override + ('name',) 50 | 51 | name = URLNameDescriptor() 52 | 53 | # pylint:disable=too-many-arguments 54 | def __init__(self, 55 | name, 56 | host='localhost', 57 | port=DEFAULT_PORT, 58 | user=None, 59 | password=None): 60 | super().__init__(( 61 | '{scheme}://{user}{password}@{host}:{port}/{name}'.format( 62 | scheme=self.URL_SCHEME, 63 | user=user, 64 | password=':' + password if password else '', 65 | host=host, 66 | port=port, 67 | name=name, 68 | ), 69 | )) 70 | 71 | def environment(self): 72 | """ 73 | The environment needed for the application to connect to PostgreSQL. 74 | """ 75 | 76 | env_name = 'DB_{0}_URL'.format(self.DATABASE_NAME) 77 | return {env_name: self.urls[0].geturl()} 78 | 79 | def check_available(self): 80 | """ 81 | Check whether PostgreSQL is installed on the host and accessible. Will 82 | raise ProviderNotAvailable or subprocess.CalledProcessError when 83 | unavailable 84 | """ 85 | 86 | stderr = "" 87 | subprocess_kwargs = { 88 | 'stdin': DEVNULL, 89 | 'stdout': DEVNULL, 90 | 'stderr': subprocess.PIPE, 91 | } 92 | 93 | def get_proc_stderr(proc): 94 | """ 95 | Safely read data from stderr into a string and return it 96 | """ 97 | proc_stderr = "" 98 | for stderr_data in proc.communicate(): 99 | proc_stderr += str(stderr_data) 100 | proc.wait() 101 | return proc_stderr 102 | 103 | psql_check_command = ['psql', '--version'] 104 | proc = subprocess.Popen(psql_check_command, **subprocess_kwargs) 105 | stderr = get_proc_stderr(proc) 106 | 107 | if proc.returncode != 0: 108 | raise subprocess.CalledProcessError( 109 | returncode=proc.returncode, 110 | cmd=' '.join(psql_check_command), 111 | output=stderr 112 | ) 113 | 114 | if self.password: 115 | os.environ['PGPASSWORD'] = self.password 116 | 117 | proc = subprocess.Popen([ 118 | 'psql', 119 | '-h', self.host, 120 | '-p', str(self.port), 121 | '-U', self.user, 122 | '-w', 123 | self.name, 124 | '-c', self.CHECK_COMMAND, 125 | ], **subprocess_kwargs) 126 | stderr += get_proc_stderr(proc) 127 | 128 | if proc.returncode != 0: 129 | raise ProviderNotAvailable( 130 | ("Provider '{}' is not yet available: psql exited with status " 131 | "{}\n{}").format(self.__class__.__name__, 132 | proc.returncode, 133 | stderr) 134 | ) 135 | 136 | return True 137 | 138 | @classmethod 139 | def localhost(cls, application_id): 140 | """ 141 | The PostgreSQL environment on the local machine. 142 | """ 143 | return cls( 144 | host='localhost', 145 | name=application_id, 146 | user=application_id, 147 | ) 148 | 149 | @classmethod 150 | def ensure_container(cls, application_id, **kwargs): 151 | """ 152 | Pass custom environment to a PostgreSQL container. 153 | """ 154 | 155 | kwargs.setdefault('environment', {}).update({ 156 | 'POSTGRES_PASSWORD': 'forklift', 157 | }) 158 | 159 | return super().ensure_container(application_id, **kwargs) 160 | 161 | @classmethod 162 | def from_container(cls, application_id, container): 163 | """ 164 | PostgreSQL provided by a container. 165 | """ 166 | 167 | return cls( 168 | host=container.host, 169 | port=container.port, 170 | name='postgres', 171 | user='postgres', 172 | password='forklift', 173 | ) 174 | 175 | providers = ('localhost', 'container') 176 | 177 | 178 | @register('postgis') 179 | class PostGIS(PostgreSQL): 180 | """ 181 | PostgreSQL database with PostGIS support. 182 | """ 183 | 184 | CHECK_COMMAND = """CREATE EXTENSION IF NOT EXISTS postgis; 185 | SELECT PostGIS_full_version()""" 186 | CONTAINER_IMAGE = 'mdillon/postgis' 187 | URL_SCHEME = 'postgis' 188 | 189 | providers = ('localhost', 'container') 190 | -------------------------------------------------------------------------------- /.githooks/pre-commit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # pylint:disable=invalid-name 3 | """ 4 | Git pre-commit hook for performing quality checks on python code using pep8 5 | and pylint 6 | """ 7 | 8 | from __future__ import print_function 9 | 10 | import atexit 11 | 12 | import os 13 | import os.path 14 | 15 | from optparse import OptionParser 16 | 17 | import re 18 | 19 | import shutil 20 | 21 | from subprocess import Popen, PIPE 22 | 23 | import sys 24 | 25 | import tempfile 26 | 27 | # 28 | # Threshold for code to pass the Pylint test. 10 is the highest score Pylint 29 | # will give to any peice of code. 30 | # 31 | _PYLINT_PASS_THRESHOLD = 10 32 | 33 | PEP8_CONF = 'conf/pep8.conf' 34 | 35 | PYLINT_CONF = 'conf/pylint.conf' 36 | 37 | 38 | def output(command): 39 | """ 40 | Read the command output regardless of the exit code 41 | """ 42 | command = command.split() 43 | sub = Popen(command, stdout=PIPE) 44 | sub.wait() 45 | return sub.stdout.read().decode() 46 | 47 | 48 | class Main(object): 49 | """ 50 | Check the Python code in the project. 51 | """ 52 | def __init__(self, force=False): 53 | """ 54 | Initialise the checker. Set @force to True to check all the files 55 | and not just the changed ones. 56 | """ 57 | self.force = force 58 | 59 | def pep8(self): 60 | """ 61 | Run pep8 on the project. 62 | Nothing short of perfect will do for this. 63 | """ 64 | 65 | import pep8 66 | 67 | pep_opts = {} 68 | if os.path.exists(PEP8_CONF): 69 | pep_opts['config_file'] = PEP8_CONF 70 | 71 | style = pep8.StyleGuide(**pep_opts) 72 | report = style.check_files((self.index,)) 73 | count = report.total_errors 74 | if count > 0: 75 | print("Project failed pep8 check: %d error(s)." % count) 76 | cmd = "pep8 " 77 | if 'config_file' in pep_opts: 78 | cmd += "--config=%s " % pep_opts['config_file'] 79 | cmd += "." 80 | print("Re-run with:\n%s\n" % cmd) 81 | return False 82 | 83 | return True 84 | 85 | def pylint(self): 86 | """ 87 | Run PyLint on the project. 88 | """ 89 | 90 | # Build a list of targets 91 | targets = [] 92 | modules = self.modules() 93 | for module in modules: 94 | targets.append(module) 95 | for file_ in self.changed_py_files(): 96 | if not any(file_.startswith(m) for m in modules): 97 | targets.append(file_) 98 | 99 | ok = True 100 | for target in targets: 101 | cmd = 'pylint' 102 | 103 | if os.path.exists(PYLINT_CONF): 104 | cmd += ' --rcfile=' + PYLINT_CONF 105 | 106 | cmd += ' %%s%s' % target 107 | 108 | result = output(cmd % self.index) 109 | 110 | # 111 | # Get the rating from the result 112 | # 113 | rating = pylint_rating(result) 114 | if rating is not None and rating < _PYLINT_PASS_THRESHOLD: 115 | print(result) 116 | ok = False 117 | print( 118 | "%s failed PyLint check (scored %s, min allowed is %s)" 119 | % (target, rating, _PYLINT_PASS_THRESHOLD) 120 | ) 121 | print("Re-run with:\n%s\n" % (cmd % '')) 122 | 123 | return ok 124 | 125 | def copy_index(self): 126 | """ 127 | Create a copy of index in a temporary directory. 128 | """ 129 | # pylint:disable=attribute-defined-outside-init 130 | self.index = tempfile.mkdtemp() + '/' 131 | output('git checkout-index --prefix=%s -af' % self.index) 132 | 133 | # pylint:disable=no-self-use 134 | def changed_files(self): 135 | """ 136 | A list of files changed in the index. 137 | """ 138 | if self.force: 139 | cmd = 'git ls-tree -r --name-only HEAD' 140 | else: 141 | cmd = 'git diff --staged --diff-filter=ACMRTUXB --name-only HEAD' 142 | return output(cmd).split() 143 | 144 | def changed_py_files(self): 145 | """ 146 | A list of Python files changed in the index 147 | """ 148 | def is_py_file(filename): 149 | """ 150 | Determine whether a script is a Python file 151 | """ 152 | if not os.path.exists(filename): 153 | return False 154 | if filename.endswith('.py'): 155 | return True 156 | 157 | with open(filename, 'r') as file_: 158 | first_line = file_.readline().strip() 159 | return '#!' in first_line and 'python' in first_line 160 | 161 | try: 162 | return self._changed_py_files 163 | except AttributeError: 164 | # pylint:disable=attribute-defined-outside-init 165 | self._changed_py_files = \ 166 | [f for f in self.changed_files() if is_py_file(f)] 167 | return self._changed_py_files 168 | 169 | def modules(self): 170 | """ 171 | A list of Python modules in the checkout. 172 | """ 173 | def is_module(module): 174 | """ 175 | Determine whether a directory contains a Python module. 176 | """ 177 | return os.path.exists(os.path.join(module, '__init__.py')) 178 | return list(m for m in os.listdir(self.index) if is_module(m)) 179 | 180 | def cleanup(self): 181 | """ 182 | Delete temporary files. 183 | """ 184 | shutil.rmtree(self.index, ignore_errors=True) 185 | 186 | def main(self): 187 | """ 188 | Run all the necessary checks. 189 | """ 190 | if not self.changed_py_files() and \ 191 | not self.force: 192 | print("No source files changed. Pre-commit tests skipped.") 193 | return True 194 | atexit.register(self.cleanup) 195 | self.copy_index() 196 | good = [func() for func in (self.pep8, self.pylint)] 197 | if all(good): 198 | print("Your code looks good. Continuing with commit.") 199 | return True 200 | else: 201 | print("Pre-commit tests failed.") 202 | return False 203 | 204 | 205 | def pylint_rating(result): 206 | """ 207 | Extract the rating rating from PyLint output. 208 | """ 209 | 210 | if result == '': 211 | return None 212 | rating = re.search(r'Your code has been rated at ([-\d\.]+)/10', result) 213 | return float(rating.group(1)) 214 | 215 | 216 | if __name__ == '__main__': 217 | parser = OptionParser() 218 | parser.add_option('-f', '--force', action='store_true', dest='force', 219 | help='force a check even if no Python files changed') 220 | (options, args) = parser.parse_args() 221 | hook = Main(**options.__dict__) 222 | success = hook.main() 223 | sys.exit(0 if success else 1) 224 | -------------------------------------------------------------------------------- /tests/base.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Base utilities for tests. 18 | """ 19 | 20 | import contextlib 21 | import operator 22 | import os 23 | import sys 24 | import subprocess 25 | import tempfile 26 | import unittest 27 | from functools import reduce 28 | try: 29 | # pylint:disable=ungrouped-imports 30 | from subprocess import DEVNULL # pylint:disable=no-name-in-module 31 | except ImportError: 32 | DEVNULL = open(os.devnull) 33 | 34 | import forklift 35 | import forklift.drivers 36 | import forklift.services 37 | 38 | from forklift.services.base import transient_provider 39 | 40 | 41 | DOCKER_AVAILABLE = False 42 | try: 43 | subprocess.check_call(['docker', 'version'], 44 | stdout=DEVNULL, 45 | stderr=DEVNULL) 46 | DOCKER_AVAILABLE = True 47 | except (subprocess.CalledProcessError, OSError): 48 | pass 49 | 50 | requires_docker = unittest.skipUnless( # pylint:disable=invalid-name 51 | DOCKER_AVAILABLE, "Docker is unavailable") 52 | 53 | 54 | def docker_image_available(image_name): 55 | """ 56 | Check whether a Docker image is available. 57 | """ 58 | 59 | import docker 60 | import requests.exceptions 61 | 62 | try: 63 | with docker.Client() as client: 64 | client.inspect_image(image_name) 65 | return True 66 | except (docker.errors.APIError, requests.exceptions.ConnectionError): 67 | return False 68 | 69 | 70 | def requires_docker_image(image_name): 71 | """ 72 | Mark a test as requiring a Docker image to run. 73 | """ 74 | 75 | return unittest.skipUnless( 76 | docker_image_available(image_name), 77 | "Docker image {0} is required.".format(image_name) 78 | ) 79 | 80 | 81 | DOCKER_BASE_IMAGE = 'debian:wheezy' 82 | 83 | 84 | def merge_dicts(*dicts): 85 | """ 86 | Merge an arbitrary number of dictionaries together. 87 | """ 88 | return dict(reduce(operator.or_, (d.items() for d in dicts))) 89 | 90 | 91 | class TestService(forklift.services.Service): 92 | """ 93 | A test service. 94 | """ 95 | 96 | def __init__(self, host=None, one=None, two=None, list_=None): 97 | self.host = host 98 | self.one = one 99 | self.two = two 100 | self.list = list_ or [] 101 | 102 | allow_override = ('host', 'one', 'two') 103 | allow_override_list = ('list',) 104 | 105 | def check_available(self): 106 | return True 107 | 108 | def environment(self): 109 | return { 110 | 'FOO': '{host}-{one}-{two}'.format(**self.__dict__), 111 | 'BAR': '|'.join(self.list), 112 | } 113 | 114 | providers = ('here', 'here_occasionally') 115 | 116 | @classmethod 117 | def here(cls, application_id): 118 | """ 119 | A sample provider. 120 | """ 121 | return cls('localhost', application_id, '2') 122 | 123 | @classmethod 124 | @transient_provider 125 | def here_occasionally(cls, application_id): 126 | """ 127 | A sample transient provider. 128 | """ 129 | return cls(None, application_id, '2') 130 | 131 | 132 | class TestDriver(forklift.drivers.Driver): 133 | """ 134 | Mock some driver parameters for ease of testing. 135 | """ 136 | 137 | def base_environment(self): 138 | env = super().base_environment() 139 | env['DEVNAME'] = 'myself' 140 | return env 141 | 142 | 143 | class SaveOutputMixin(forklift.drivers.Driver): 144 | """ 145 | A mixin to drivers to examine the commands output. 146 | """ 147 | 148 | _last_output = [None] 149 | 150 | @classmethod 151 | def last_output(cls): 152 | """ 153 | Return the output of the last command. 154 | """ 155 | return cls._last_output[0] 156 | 157 | def _run(self, command): 158 | """ 159 | Run the command, saving the output. 160 | """ 161 | 162 | with tempfile.NamedTemporaryFile() as tmpfile: 163 | with redirect_stream(tmpfile.file.fileno()): 164 | pid = os.fork() 165 | assert pid >= 0 166 | if pid == 0: 167 | super()._run(command) 168 | 169 | # We MUST exit here, or the test suite will continue to 170 | # run in the child process. 171 | # Also must be os._exit rather than sys.exit because this 172 | # is a child process so should not clean up/etc 173 | os._exit(0) # pylint:disable=protected-access 174 | else: 175 | _, status = os.waitpid(pid, 0) 176 | retcode = status >> 8 177 | 178 | with open(tmpfile.name) as saved_output: 179 | self._last_output[0] = saved_output.read() 180 | 181 | return retcode 182 | 183 | 184 | class SaveOutputDirect(SaveOutputMixin, TestDriver, forklift.drivers.Direct): 185 | """ 186 | A direct driver augmented for testing. 187 | """ 188 | 189 | pass 190 | 191 | 192 | class SaveOutputDocker(SaveOutputMixin, TestDriver, forklift.drivers.Docker): 193 | """ 194 | A Docker driver augmented for testing. 195 | """ 196 | 197 | pass 198 | 199 | 200 | class TestForklift(forklift.Forklift): 201 | """ 202 | Forklift with a test service. 203 | """ 204 | 205 | drivers = { 206 | # Drivers MUST always inherit from SaveOutputMixin. See the comment in 207 | # the fork code of _run there for reasoning 208 | 'direct': SaveOutputDirect, 209 | 'docker': SaveOutputDocker, 210 | } 211 | 212 | def get_driver(self, conf): 213 | """ 214 | Use the driver specified in a test as default. 215 | """ 216 | 217 | return getattr(self, '_driver', None) \ 218 | or super().get_driver(conf) 219 | 220 | @contextlib.contextmanager 221 | def set_driver(self, driver): 222 | """ 223 | Set the default driver to use in context. 224 | """ 225 | 226 | setattr(self, '_driver', driver) 227 | 228 | try: 229 | yield 230 | finally: 231 | delattr(self, '_driver') 232 | 233 | services = merge_dicts({'test': TestService}, 234 | forklift.Forklift.services) 235 | 236 | configuration_file_list = [] 237 | 238 | def configuration_files(self, conf): 239 | """ 240 | Override the configuration files. 241 | """ 242 | return self.configuration_file_list 243 | 244 | def implicit_configuration(self): 245 | """ 246 | Override application ID. 247 | """ 248 | return [ 249 | '--application_id', 'test_app', 250 | ] 251 | 252 | 253 | class TestCase(unittest.TestCase): 254 | """ 255 | Base test case. 256 | """ 257 | 258 | forklift_class = TestForklift 259 | 260 | default_driver = None 261 | 262 | def run_forklift(self, *args): 263 | """ 264 | Run Forklift with specified arguments. 265 | """ 266 | 267 | instance = self.forklift_class(['forklift'] + list(args)) 268 | if self.default_driver: 269 | with instance.set_driver(self.default_driver): 270 | return instance.main() 271 | else: 272 | return instance.main() 273 | 274 | 275 | @contextlib.contextmanager 276 | def redirect_stream(target_fd, stream=None): 277 | """ 278 | Redirect the standard output to the target, including from child processes. 279 | 280 | If 'stream' is specified, redirect that stream instead (e.g. sys.stderr). 281 | """ 282 | 283 | stream = stream or sys.stdout 284 | 285 | stream_fileno = stream.fileno() 286 | saved_stream = os.dup(stream_fileno) 287 | os.close(stream_fileno) 288 | os.dup2(target_fd, stream_fileno) 289 | 290 | yield 291 | 292 | os.close(stream_fileno) 293 | os.dup2(saved_stream, stream_fileno) 294 | 295 | 296 | def parse_environment(env_string): 297 | """ 298 | Parse the output of 'env -0' into a dictionary. 299 | """ 300 | 301 | if isinstance(env_string, bytes): 302 | env_string = env_string.decode() 303 | 304 | return dict( 305 | item.split('=', 1) 306 | for item in env_string.rstrip('\0').split('\0') 307 | ) 308 | -------------------------------------------------------------------------------- /pylintrc: -------------------------------------------------------------------------------- 1 | [MASTER] 2 | 3 | # Specify a configuration file. 4 | #rcfile= 5 | 6 | # Python code to execute, usually for sys.path manipulation such as 7 | # pygtk.require(). 8 | #init-hook= 9 | 10 | # Add files or directories to the blacklist. They should be base names, not 11 | # paths. 12 | ignore= 13 | 14 | # Pickle collected data for later comparisons. 15 | persistent=yes 16 | 17 | # List of plugins (as comma separated values of python modules names) to load, 18 | # usually to register additional checkers. 19 | load-plugins=pylint_mccabe 20 | 21 | 22 | [REPORTS] 23 | 24 | # Set the output format. Available formats are text, parseable, colorized, msvs 25 | # (visual studio) and html. You can also give a reporter class, eg 26 | # mypackage.mymodule.MyReporterClass. 27 | output-format=colorized 28 | 29 | # Put messages in a separate file for each module / package specified on the 30 | # command line instead of printing them on stdout. Reports (if any) will be 31 | # written in a file name "pylint_global.[txt|html]". 32 | files-output=no 33 | 34 | # Tells whether to display a full report or only the messages 35 | reports=yes 36 | 37 | # Python expression which should return a note less than 10 (10 is the highest 38 | # note). You have access to the variables errors warning, statement which 39 | # respectively contain the number of errors / warnings messages and the total 40 | # number of statements analyzed. This is used by the global evaluation report 41 | # (RP0004). 42 | evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) 43 | 44 | # Template used to display messages. This is a python new-style format string 45 | # used to format the message information. See doc for all details 46 | #msg-template= 47 | 48 | 49 | [MESSAGES CONTROL] 50 | 51 | # Enable the message, report, category or checker with the given id(s). You can 52 | # either give multiple identifier separated by comma (,) or put this option 53 | # multiple time. See also the "--disable" option for examples. 54 | #enable= 55 | 56 | # Disable the message, report, category or checker with the given id(s). You 57 | # can either give multiple identifiers separated by comma (,) or put this 58 | # option multiple times (only on the command line, not in the configuration 59 | # file where it should appear only once).You can also use "--disable=all" to 60 | # disable everything first and then reenable specific checks. For example, if 61 | # you want to run only the similarities checker, you can use "--disable=all 62 | # --enable=similarities". If you want to run only the classes checker, but have 63 | # no Warning level messages displayed, use"--disable=all --enable=classes 64 | # --disable=W" 65 | disable= 66 | abstract-method, 67 | duplicate-code, 68 | fixme, 69 | locally-disabled, 70 | locally-enabled, 71 | no-self-use, 72 | star-args, 73 | super-on-old-class, 74 | too-few-public-methods, 75 | too-many-public-methods, 76 | unused-argument, 77 | RP0101, 78 | RP0401, 79 | RP0402, 80 | RP0701, 81 | RP0801, 82 | 83 | 84 | [BASIC] 85 | 86 | # List of builtins function names that should not be used, separated by a comma 87 | bad-functions=map,filter,apply 88 | 89 | # Regular expression which should only match correct module names 90 | module-rgx=(([a-z_][a-z0-9_]*)|([A-Z][a-zA-Z0-9]+))$ 91 | 92 | # Regular expression which should only match correct module level names 93 | const-rgx=(([A-Z_][A-Z0-9_]*)|(__.*__))$ 94 | 95 | # Regular expression which should only match correct class names 96 | class-rgx=[A-Z_][a-zA-Z0-9]+$ 97 | 98 | # Regular expression which should only match correct function names 99 | function-rgx=[a-z_][a-z0-9_]{2,30}$ 100 | 101 | # Regular expression which should only match correct method names 102 | method-rgx=[a-z_][a-z0-9_]{2,30}$ 103 | 104 | # Regular expression which should only match correct instance attribute names 105 | attr-rgx=[a-z_][a-z0-9_]{2,30}$ 106 | 107 | # Regular expression which should only match correct argument names 108 | argument-rgx=[a-z_][a-z0-9_]{2,30}$ 109 | 110 | # Regular expression which should only match correct variable names 111 | variable-rgx=[a-z_][a-z0-9_]{2,30}$ 112 | 113 | # Regular expression which should only match correct attribute names in class 114 | # bodies 115 | class-attribute-rgx=([A-Za-z_][A-Za-z0-9_]{2,30}|(__.*__))$ 116 | 117 | # Regular expression which should only match correct list comprehension / 118 | # generator expression variable names 119 | inlinevar-rgx=[A-Za-z_][A-Za-z0-9_]*$ 120 | 121 | # Good variable names which should always be accepted, separated by a comma 122 | good-names=i,j,k,ex,Run,_ 123 | 124 | # Bad variable names which should always be refused, separated by a comma 125 | bad-names=foo,bar,baz,toto,tutu,tata 126 | 127 | # Regular expression which should only match function or class names that do 128 | # not require a docstring. 129 | no-docstring-rgx=__.*__ 130 | 131 | # Minimum line length for functions/classes that require docstrings, shorter 132 | # ones are exempt. 133 | docstring-min-length=-1 134 | 135 | 136 | [TYPECHECK] 137 | 138 | # Tells whether missing members accessed in mixin class should be ignored. A 139 | # mixin class is detected if its name ends with "mixin" (case insensitive). 140 | ignore-mixin-members=yes 141 | 142 | # List of classes names for which member attributes should not be checked 143 | # (useful for classes with attributes dynamically set). 144 | ignored-classes=SQLObject 145 | 146 | # List of members which are set dynamically and missed by pylint inference 147 | # system, and so shouldn't trigger E0201 when accessed. Python regular 148 | # expressions are accepted. 149 | generated-members=REQUEST,acl_users,aq_parent 150 | 151 | 152 | [FORMAT] 153 | 154 | # Maximum number of characters on a single line. 155 | max-line-length=80 156 | 157 | # Regexp for a line that is allowed to be longer than the limit. 158 | ignore-long-lines=^\s*(# )??$ 159 | 160 | # Allow the body of an if to be on the same line as the test if there is no 161 | # else. 162 | single-line-if-stmt=no 163 | 164 | # List of optional constructs for which whitespace checking is disabled 165 | no-space-check=trailing-comma,dict-separator 166 | 167 | # Maximum number of lines in a module 168 | max-module-lines=1000 169 | 170 | # String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 171 | # tab). 172 | indent-string=' ' 173 | 174 | 175 | [SIMILARITIES] 176 | 177 | # Minimum lines number of a similarity. 178 | min-similarity-lines=4 179 | 180 | # Ignore comments when computing similarities. 181 | ignore-comments=yes 182 | 183 | # Ignore docstrings when computing similarities. 184 | ignore-docstrings=yes 185 | 186 | # Ignore imports when computing similarities. 187 | ignore-imports=yes 188 | 189 | 190 | [VARIABLES] 191 | 192 | # Tells whether we should check for unused import in __init__ files. 193 | init-import=no 194 | 195 | # A regular expression matching the beginning of the name of dummy variables 196 | # (i.e. not used). 197 | dummy-variables-rgx=_$|dummy 198 | 199 | # List of additional names supposed to be defined in builtins. Remember that 200 | # you should avoid to define new builtins when possible. 201 | additional-builtins= 202 | 203 | 204 | [MISCELLANEOUS] 205 | 206 | # List of note tags to take in consideration, separated by a comma. 207 | notes=FIXME,XXX,TODO 208 | 209 | 210 | [DESIGN] 211 | 212 | # Maximum number of arguments for function / method 213 | max-args=5 214 | 215 | # Argument names that match this expression will be ignored. Default to name 216 | # with leading underscore 217 | ignored-argument-names=_.* 218 | 219 | # Maximum number of locals for function / method body 220 | max-locals=15 221 | 222 | # Maximum number of return / yield for function / method body 223 | max-returns=6 224 | 225 | # Maximum number of branch for function / method body 226 | max-branches=12 227 | 228 | # Maximum number of statements in function / method body 229 | max-statements=50 230 | 231 | # Maximum number of parents for a class (see R0901). 232 | max-parents=7 233 | 234 | # Maximum number of attributes for a class (see R0902). 235 | max-attributes=7 236 | 237 | # Minimum number of public methods for a class (see R0903). 238 | min-public-methods=2 239 | 240 | # Maximum number of public methods for a class (see R0904). 241 | max-public-methods=20 242 | 243 | 244 | [CLASSES] 245 | 246 | # List of method names used to declare (i.e. assign) instance attributes. 247 | defining-attr-methods=__init__,__new__,setUp 248 | 249 | # List of valid names for the first argument in a class method. 250 | valid-classmethod-first-arg=cls 251 | 252 | # List of valid names for the first argument in a metaclass class method. 253 | valid-metaclass-classmethod-first-arg=mcs 254 | 255 | 256 | [IMPORTS] 257 | 258 | # Deprecated modules which should not be used, separated by a comma 259 | deprecated-modules=regsub,TERMIOS,Bastion,rexec 260 | 261 | # Create a graph of every (i.e. internal and external) dependencies in the 262 | # given file (report RP0402 must not be disabled) 263 | import-graph= 264 | 265 | # Create a graph of external dependencies in the given file (report RP0402 must 266 | # not be disabled) 267 | ext-import-graph= 268 | 269 | # Create a graph of internal dependencies in the given file (report RP0402 must 270 | # not be disabled) 271 | int-import-graph= 272 | 273 | 274 | [EXCEPTIONS] 275 | 276 | # Exceptions that will emit a warning when being caught. Defaults to 277 | # "Exception" 278 | overgeneral-exceptions=Exception 279 | -------------------------------------------------------------------------------- /tests/forklift/test_forklift.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Tests for Forklift. 18 | """ 19 | 20 | import contextlib 21 | import re 22 | import sys 23 | import tempfile 24 | import yaml 25 | 26 | import forklift 27 | from forklift.drivers import ip_address 28 | from tests.base import ( 29 | requires_docker, 30 | parse_environment, 31 | redirect_stream, 32 | DOCKER_BASE_IMAGE, 33 | SaveOutputMixin, 34 | TestCase, 35 | ) 36 | 37 | 38 | class UsageTestCase(TestCase): 39 | """ 40 | Test running forklift with no arguments. 41 | """ 42 | 43 | # Do not override any drivers or services 44 | forklift_class = forklift.Forklift 45 | 46 | def test_usage(self): 47 | """ 48 | Test usage message with no arguments. 49 | """ 50 | with tempfile.NamedTemporaryFile() as tmpfile: 51 | with redirect_stream(tmpfile.file.fileno(), stream=sys.stderr): 52 | with self.assertRaises(SystemExit): 53 | self.run_forklift() 54 | 55 | with open(tmpfile.name) as saved_stderr: 56 | usage_test = saved_stderr.read() 57 | self.assertIn("usage: ", usage_test) 58 | 59 | 60 | class SmokeTestCase(TestCase): 61 | """ 62 | Test running basic commands. 63 | """ 64 | 65 | def test_commands(self): 66 | """ 67 | Test running basic commands. 68 | """ 69 | 70 | self.assertEqual(0, self.run_forklift('true')) 71 | self.assertNotEqual(0, self.run_forklift('false')) 72 | 73 | 74 | class CommandsMixin(object): 75 | """ 76 | Mixin with tests to ensure commands are run correctly. 77 | """ 78 | 79 | def run_command(self, *command): 80 | """ 81 | Run a command in Forklift. 82 | 83 | Override to pass extra options. 84 | """ 85 | 86 | return self.run_forklift(*command) 87 | 88 | def test_exit_code(self): 89 | """ 90 | Test command exit codes. 91 | """ 92 | 93 | self.assertEqual(0, self.run_command('/bin/true')) 94 | self.assertNotEqual(0, self.run_command('/bin/false')) 95 | 96 | def test_output(self): 97 | """ 98 | Test echoing things. 99 | """ 100 | 101 | self.assertEqual(0, self.run_command('/bin/echo', 'apple', 'orange')) 102 | self.assertEqual('apple orange\n', SaveOutputMixin.last_output()) 103 | 104 | self.assertEqual( 105 | 0, 106 | self.run_command('--', '/bin/echo', '--apple', '--orange') 107 | ) 108 | self.assertEqual('--apple --orange\n', SaveOutputMixin.last_output()) 109 | 110 | 111 | class DirectCommandsTestCase(CommandsMixin, TestCase): 112 | """ 113 | Test running commands directly. 114 | """ 115 | 116 | default_driver = 'direct' 117 | 118 | 119 | @requires_docker 120 | class DockerCommandsTestCase(CommandsMixin, TestCase): 121 | """ 122 | Test running commands via Docker. 123 | """ 124 | 125 | default_driver = 'docker' 126 | 127 | def run_command(self, *command): 128 | """ 129 | Run a command via Docker. 130 | """ 131 | 132 | return self.run_forklift( 133 | '--rm', 134 | DOCKER_BASE_IMAGE, 135 | *command 136 | ) 137 | 138 | 139 | class CaptureEnvironmentMixin(object): 140 | """ 141 | Mixin with tests to ensure environment is passed to commands correctly. 142 | """ 143 | 144 | def capture_env(self, *args, prepend_args=None): 145 | """ 146 | Run Forklift to capture the environment. 147 | """ 148 | 149 | prepend_args = prepend_args or [] 150 | 151 | if any(arg.startswith('--') for arg in prepend_args): 152 | prepend_args.insert(0, '--') 153 | 154 | forklift_args = \ 155 | list(args) + \ 156 | list(prepend_args) + \ 157 | ['/usr/bin/env', '-0'] 158 | 159 | self.assertEqual(0, self.run_forklift(*forklift_args)) 160 | 161 | return parse_environment(SaveOutputMixin.last_output()) 162 | 163 | @contextlib.contextmanager 164 | def configuration_file(self, configuration): 165 | """ 166 | Run a command with configuration written to the configuration file. 167 | """ 168 | 169 | with tempfile.NamedTemporaryFile() as conffile: 170 | self.forklift_class.configuration_file_list.append(conffile.name) 171 | if isinstance(configuration, str): 172 | conffile.write(configuration.encode()) 173 | else: 174 | yaml.dump(configuration, conffile, encoding='utf-8') 175 | 176 | try: 177 | yield 178 | finally: 179 | self.forklift_class.configuration_file_list.pop() 180 | 181 | @staticmethod 182 | def localhost_reference(): 183 | """ 184 | The local host, as seen from inside the driver. 185 | """ 186 | return 'localhost' 187 | 188 | def test_basic_environment(self): 189 | """ 190 | Test passing basic environment to the command. 191 | """ 192 | 193 | env = self.capture_env() 194 | self.assertEqual(env['DEVNAME'], 'myself') 195 | self.assertEqual(env['ENVIRONMENT'], 'dev_local') 196 | self.assertEqual(env['SITE_PROTOCOL'], 'http') 197 | self.assertTrue(re.match(r'^localhost:\d+$', env['SITE_DOMAIN'])) 198 | 199 | env = self.capture_env('--serve_port', '9998') 200 | self.assertEqual(env['SITE_DOMAIN'], 'localhost:9998') 201 | 202 | def test_service_environment(self): 203 | """ 204 | Test passing service environment to the command. 205 | """ 206 | 207 | with self.configuration_file({'services': ['test']}): 208 | self.assertEqual( 209 | self.capture_env()['FOO'], 210 | '{0}-test_app-2'.format(self.localhost_reference()) 211 | ) 212 | 213 | empty_file = \ 214 | """ 215 | # An empty YAML file. 216 | """ 217 | with self.configuration_file(empty_file): 218 | self.assertEqual( 219 | self.capture_env()['FOO'], 220 | '{0}-test_app-2'.format(self.localhost_reference()) 221 | ) 222 | 223 | with self.configuration_file({ 224 | 'services': ['test'], 225 | 'test': { 226 | 'one': '111', 227 | }, 228 | }): 229 | self.assertEqual(self.capture_env()['FOO'], 230 | '{0}-111-2'.format(self.localhost_reference())) 231 | 232 | with self.configuration_file({ 233 | 'test': { 234 | 'two': '222', 235 | }, 236 | }): 237 | self.assertEqual( 238 | self.capture_env()['FOO'], 239 | '{0}-111-222'.format(self.localhost_reference())) 240 | 241 | self.assertEqual( 242 | self.capture_env('--test.host', 'otherhost')['FOO'], 243 | 'otherhost-111-222' 244 | ) 245 | 246 | def test_nargs(self): 247 | """ 248 | Test multiple arguments 249 | """ 250 | 251 | with self.configuration_file({ 252 | 'services': ['test'], 253 | 'test': { 254 | 'list': ['1', '2'], 255 | }, 256 | }): 257 | self.assertEqual( 258 | self.capture_env()['BAR'], 259 | '1|2') 260 | 261 | def test_added_environment(self): 262 | """ 263 | Test passing additional environment to the command. 264 | """ 265 | 266 | with self.configuration_file({ 267 | 'environment': [ 268 | 'BAR=additional', 269 | ], 270 | }): 271 | self.assertEqual(self.capture_env()['BAR'], 'additional') 272 | 273 | # Environment can be passed as a hash 274 | with self.configuration_file({ 275 | 'environment': { 276 | 'BAR': 'additional', 277 | }, 278 | }): 279 | self.assertEqual(self.capture_env()['BAR'], 'additional') 280 | 281 | 282 | class DirectEnvironmentTestCase(CaptureEnvironmentMixin, TestCase): 283 | """ 284 | Test that environment is passed to the commands using direct driver. 285 | """ 286 | 287 | default_driver = 'direct' 288 | 289 | 290 | @requires_docker 291 | class DockerEnvironmentTestCase(CaptureEnvironmentMixin, TestCase): 292 | """ 293 | Test environment passed to the commands using Docker. 294 | """ 295 | 296 | default_driver = 'docker' 297 | 298 | @staticmethod 299 | def localhost_reference(): 300 | # TODO: Can this change? 301 | return ip_address('docker0') 302 | 303 | def capture_env(self, *args, prepend_args=None): 304 | """ 305 | Run Forklift to capture the environment. 306 | """ 307 | 308 | prepend_args = prepend_args or [] 309 | prepend_args.append(DOCKER_BASE_IMAGE) 310 | 311 | args = ['--rm'] + list(args) 312 | 313 | return super().capture_env(*args, prepend_args=prepend_args) 314 | -------------------------------------------------------------------------------- /tests/test_services.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Tests for services provided by Forklift. 18 | """ 19 | 20 | import unittest 21 | 22 | import socket 23 | import socketserver 24 | import threading 25 | import tempfile 26 | from time import sleep 27 | from urllib.parse import urlparse 28 | 29 | import forklift.services 30 | import forklift.services.base as base 31 | from forklift.base import free_port 32 | 33 | from tests.base import redirect_stream 34 | 35 | 36 | class ElasticsearchTestCase(unittest.TestCase): 37 | """ 38 | Test Elasticsearch service. 39 | """ 40 | 41 | def test_host(self): 42 | """ 43 | Test host get/set. 44 | """ 45 | 46 | service = forklift.services.Elasticsearch( 47 | 'index', 48 | ('http://alpha:9200|http://beta:9200',)) 49 | 50 | self.assertEqual(service.urls, ( 51 | urlparse('http://alpha:9200/index'), 52 | urlparse('http://beta:9200/index'), 53 | )) 54 | self.assertEqual(service.environment(), { 55 | 'ELASTICSEARCH_URLS': 'http://alpha:9200|http://beta:9200', 56 | 'ELASTICSEARCH_INDEX_NAME': 'index', 57 | }) 58 | 59 | service.host = 'other' 60 | 61 | self.assertEqual(service.urls, ( 62 | urlparse('http://other:9200/index'), 63 | )) 64 | self.assertEqual(service.environment(), { 65 | 'ELASTICSEARCH_URLS': 'http://other:9200', 66 | 'ELASTICSEARCH_INDEX_NAME': 'index', 67 | }) 68 | 69 | service = forklift.services.Elasticsearch( 70 | 'index', 71 | ('http://localhost:9200',)) 72 | 73 | self.assertEqual(service.urls, ( 74 | urlparse('http://localhost:9200/index'), 75 | )) 76 | self.assertEqual(service.host, 'localhost') 77 | 78 | service = forklift.services.Elasticsearch( 79 | 'index', 80 | ('http://alpha:9200', 81 | 'http://beta:9200')) 82 | 83 | self.assertEqual(service.urls, ( 84 | urlparse('http://alpha:9200/index'), 85 | urlparse('http://beta:9200/index'), 86 | )) 87 | self.assertEqual(service.environment(), { 88 | 'ELASTICSEARCH_URLS': 'http://alpha:9200|http://beta:9200', 89 | 'ELASTICSEARCH_INDEX_NAME': 'index', 90 | }) 91 | 92 | 93 | class MemcacheTestCase(unittest.TestCase): 94 | """ 95 | Test Memcache service. 96 | """ 97 | 98 | def test_host(self): 99 | """ 100 | Test host get/set. 101 | """ 102 | 103 | service = forklift.services.Memcache( 104 | 'index', 105 | ['alpha', 'beta:11222']) 106 | 107 | self.assertEqual(service.hosts, ( 108 | 'alpha', 109 | 'beta:11222', 110 | )) 111 | 112 | service.host = 'other' 113 | 114 | self.assertEqual(service.hosts, ( 115 | 'other', 116 | )) 117 | 118 | service = forklift.services.Memcache( 119 | 'index', 120 | ['localhost', 'localhost:22111', 'alpha', 'beta:11222']) 121 | service.host = '2.2.2.2|3.3.3.3|gamma|delta' 122 | self.assertEqual(service.hosts, ( 123 | '2.2.2.2', '3.3.3.3', 'gamma', 'delta' 124 | )) 125 | 126 | 127 | class SyslogTestCase(unittest.TestCase): 128 | """ 129 | Test Syslog service. 130 | """ 131 | 132 | def test_stdout(self): 133 | """ 134 | Test printing to stdout with the fallback Syslog provider. 135 | """ 136 | 137 | with tempfile.NamedTemporaryFile() as tmpfile: 138 | with redirect_stream(tmpfile.file.fileno()): 139 | syslog = forklift.services.Syslog.stdout('fake_app') 140 | self.assertTrue(syslog.available()) 141 | env = syslog.environment() 142 | 143 | import logging 144 | from logging.handlers import SysLogHandler 145 | 146 | handler = SysLogHandler( 147 | address=(env['SYSLOG_SERVER'], int(env['SYSLOG_PORT'])), 148 | socktype=socket.SOCK_DGRAM 149 | if env['SYSLOG_PROTO'] == 'udp' 150 | else socket.SOCK_STREAM, 151 | ) 152 | 153 | handler.handle(logging.LogRecord( 154 | name='logname', 155 | level=logging.INFO, 156 | pathname='/fake/file', 157 | lineno=314, 158 | msg="Logging %s", 159 | args="message", 160 | exc_info=None, 161 | )) 162 | handler.close() 163 | 164 | # Give the server a chance to process the message 165 | sleep(1) 166 | 167 | with open(tmpfile.name) as saved_output: 168 | log = saved_output.read() 169 | self.assertEqual("<14>Logging message\x00\n", log) 170 | 171 | 172 | class EmailTestCase(unittest.TestCase): 173 | """ 174 | Test email service. 175 | """ 176 | 177 | def test_stdout(self): 178 | """ 179 | Test printing to stdout with the fallback provider. 180 | """ 181 | 182 | with tempfile.NamedTemporaryFile() as tmpfile: 183 | with redirect_stream(tmpfile.file.fileno()): 184 | email = forklift.services.Email.stdout('fake_app') 185 | 186 | self.assertTrue(email.available()) 187 | env = email.environment() 188 | 189 | import smtplib 190 | 191 | smtp = smtplib.SMTP(host=env['EMAIL_HOST'], 192 | port=env['EMAIL_PORT']) 193 | smtp.sendmail( 194 | from_addr='forklift@example.com', 195 | to_addrs=('destination@example.com',), 196 | msg='Email message', 197 | ) 198 | smtp.quit() 199 | 200 | # Give the server a chance to process the message 201 | sleep(1) 202 | 203 | with open(tmpfile.name) as saved_output: 204 | log = saved_output.read().splitlines() 205 | self.assertEqual([ 206 | '---------- MESSAGE FOLLOWS ----------', 207 | 'Email message', 208 | '------------ END MESSAGE ------------', 209 | ], log) 210 | 211 | 212 | class BaseTestCase(unittest.TestCase): 213 | """ 214 | Test base services functions. 215 | """ 216 | 217 | def test_port_open(self): 218 | """ 219 | Test port_open. 220 | """ 221 | 222 | class DummyHandler(socketserver.BaseRequestHandler): 223 | """ 224 | A do-nothing handler. 225 | """ 226 | 227 | def handle(self): 228 | pass 229 | 230 | class DummyServer(socketserver.ThreadingMixIn, 231 | socketserver.TCPServer): 232 | """ 233 | A do-nothing server. 234 | """ 235 | 236 | pass 237 | 238 | port = free_port() 239 | self.assertFalse(base.port_open('localhost', port)) 240 | 241 | server = DummyServer(('0.0.0.0', port), DummyHandler) 242 | server_thread = threading.Thread(target=server.serve_forever) 243 | server_thread.start() 244 | 245 | try: 246 | self.assertTrue(base.port_open('localhost', port)) 247 | finally: 248 | server.shutdown() 249 | 250 | 251 | class MockLogger(object): 252 | """ 253 | A mock for Python logger 254 | """ 255 | 256 | def __init__(self): 257 | self.logs = {} 258 | 259 | # pylint:disable=missing-docstring, invalid-name 260 | def isEnabledFor(self, *args): 261 | return True 262 | 263 | def debug(self, fstring, *args): 264 | self.logs.setdefault('debug', []).append(fstring % args) 265 | 266 | def info(self, fstring, *args): 267 | self.logs.setdefault('info', []).append(fstring % args) 268 | 269 | def warning(self, fstring, *args): 270 | self.logs.setdefault('warning', []).append(fstring % args) 271 | 272 | def error(self, fstring, *args): 273 | self.logs.setdefault('error', []).append(fstring % args) 274 | 275 | def critical(self, fstring, *args): 276 | self.logs.setdefault('critical', []).append(fstring % args) 277 | 278 | 279 | class SettingsLogTestCase(unittest.TestCase): 280 | """ 281 | Test the log_service_settings function 282 | """ 283 | def setUp(self): 284 | self.logger = MockLogger() 285 | 286 | def test_basic(self): 287 | """ 288 | Check that basic logging of properties works 289 | """ 290 | setattr(self, 'containers', 'pretty great') 291 | setattr(self, 'score', 9001) 292 | 293 | base.log_service_settings(self.logger, self, 294 | 'containers', 'score') 295 | self.assertEqual(self.logger.logs, { 296 | 'debug': [ 297 | 'SettingsLogTestCase containers: pretty great', 298 | 'SettingsLogTestCase score: 9001', 299 | ], 300 | }) 301 | 302 | def test_callable(self): 303 | """ 304 | Check that if attrs are callable, they are correctly called to get 305 | the value 306 | """ 307 | setattr(self, 'the_callable', lambda: 'the value') 308 | setattr(self, 'not_callable', 9001) 309 | 310 | base.log_service_settings(self.logger, self, 311 | 'the_callable', 'not_callable') 312 | self.assertEqual(self.logger.logs, { 313 | 'debug': [ 314 | 'SettingsLogTestCase the_callable: the value', 315 | 'SettingsLogTestCase not_callable: 9001', 316 | ], 317 | }) 318 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![Build Status](https://travis-ci.org/infoxchange/docker-forklift.svg?branch=master)](https://travis-ci.org/infoxchange/docker-forklift) [![Coverage Status](https://coveralls.io/repos/infoxchange/docker-forklift/badge.svg?branch=master)](https://coveralls.io/r/infoxchange/docker-forklift?branch=master) [![PyPI](https://img.shields.io/pypi/v/docker-forklift.svg)](https://pypi.python.org/pypi/docker-forklift) 2 | 3 | Forklift - when you need to handle just one container 4 | ===================================================== 5 | 6 | Utilities to develop a containerised application. 7 | 8 | The standard containers at InfoXchange require a number of environment 9 | variables to run properly. With Forklift, they can be inferred automatically 10 | and/or specified in the codebase. 11 | 12 | Furthermore, it is often necessary to experiment within a running container. 13 | Forklift includes a special 'sshd' mode to start the SSH daemon instead of the 14 | original command, so that one can run arbitrary commands inside. 15 | 16 | Installation 17 | ------------ 18 | 19 | Forklift requires Python 3. Use the corresponding `pip` to install from PyPI, 20 | for example, `pip-3.2` on Debian and `pip3` on Ubuntu. You will also need 21 | Python header files installed, they are typically found in `python3-dev` or 22 | `python3-devel` package. 23 | 24 | Install the package system-wide: 25 | 26 | sudo pip install docker-forklift 27 | 28 | Or in a virtualenv: 29 | 30 | pip install docker-forklift 31 | 32 | Running Forklift 33 | ---------------- 34 | 35 | The basic invocation is: 36 | 37 | forklift APPLICATION ARGUMENT... 38 | 39 | What happens is: 40 | 41 | * The configuration files are being searched for a list of services to provide 42 | to the command. 43 | * For all of those services, an available provider is searched for. 44 | * The found services, along with any additional configured environment, are 45 | passed to the command as environment variables. 46 | 47 | For example, if the project specifies: 48 | 49 | services: 50 | - postgres 51 | 52 | Forklift will check if the PostgreSQL server is running on the local machine, 53 | and pass the database URL to the application. 54 | 55 | Docker 56 | ------ 57 | 58 | Forklift can run commands directly or Docker containers. By default, if the 59 | application given is an existing file (e.g. `./manage.py`), it is run directly. 60 | Otherwise it is assumed to be a Docker image to create a container from. 61 | The environment is passed to the application in either case. 62 | 63 | To override the choice, set `driver` parameter to either `docker` or `direct`. 64 | 65 | Docker driver has specific parameters: 66 | 67 | * `serve_port` - Services that the container runs on port 8000, 68 | e.g., the container running a web server, will be available on this 69 | port locally. 70 | * `rm`: Automatically remove containers after they've stopped. 71 | * `privileged`: Run containers in privileged mode. 72 | * `interactive`: Run containers in interactive mode (`-i -t`). 73 | * `storage`: Run the container with `/storage` mounted as a volume under the 74 | specified path. 75 | * `detach`: Run detached (`-d`). 76 | * `mount-root`: Bind mount the root directory of the container filesystem to 77 | the specified path (for *reasons* `mount-root` is only supported with 78 | `detach` or SSH daemon mode). 79 | 80 | ### SSH daemon mode 81 | 82 | Forklift can set up an SSH server inside the container, passing in all the 83 | environment and adding the user public key. To use this mode, pass `sshd` as 84 | the command (e.g. `forklift ubuntu sshd`). 85 | 86 | The following additional options apply in SSH daemon mode: 87 | 88 | * `user` - the user to set up for SSH in the container, defaults to `app`. 89 | * `identity` - the public key file to authorise logging in as. Can be specified 90 | as the full path or as a file in `~/.ssh`. 91 | * `host-uid` - for ease to use with `mount-root`, the UID of the user inside 92 | the container is changed to the one of the host user; override if needed. 93 | 94 | When running in SSH daemon mode, Forklift starts the container in the 95 | background and prints a command to SSH to it. It is up to the user to stop 96 | the container when no longer needed. 97 | 98 | Because the host keys of containers will be different every time, `ssh` will 99 | warn about the mismatch. To disable host checking for `localhost` only, put 100 | `NoHostAuthenticationForLocalhost yes` into the SSH configuration 101 | (`~/.ssh/config`). 102 | 103 | ### Recycler 104 | 105 | Forklift can clean up old containers and images on your system. By default 106 | it will clean up all stopped containers, and all untagged images. 107 | 108 | Run `forklift recycle` 109 | 110 | The following flags can also be passed: 111 | 112 | * `--include-running` - also remove running containers 113 | * `--include-tagged` - also include tagged images 114 | 115 | Services and environment 116 | ------------------------ 117 | 118 | The following environment is always available to the running application: 119 | 120 | * `ENVIRONMENT`: `dev_local` 121 | * `DEVNAME`: the current user name 122 | * `SITE_DOMAIN` and `SITE_PROTOCOL`: The URL where the application will be 123 | accessible to the outside world if it listens on port 8000 locally. 124 | * Any environment variables from configured services. 125 | * All extra environment passed as `environment` (e.g. `--environment FOO=bar` 126 | will set environment variable `FOO` to `bar`). 127 | 128 | Most of the services which provide per-application resources (e.g. a database) 129 | need to distinguish between different applications running on the same host. 130 | To do that, they are supplied with an application ID, which defaults to the 131 | base name of the current directory. If needed, this can be overridden in the 132 | configuration by the key `application_id`. 133 | 134 | The services to provide to the application are taken from the `services` array 135 | in the configuration file. The following services are known to Forklift: 136 | 137 | ### PostgreSQL 138 | 139 | Provides access to the database. The environment variable, `DB_DEFAULT_URL`, 140 | contains a [Database URL][dj-database-url] for the application to use. 141 | 142 | By default, Forklift checks if there is a PostgreSQL server running on the 143 | machine, and if yes, provides the application with its details, taking the 144 | application ID for the database name. 145 | 146 | The following parameters can be overridden: `host`, `port`, `user`, `password`, 147 | `name`. 148 | 149 | ### Elasticsearch 150 | 151 | Provides an URL to access Elasticsearch at as environment variables 152 | `ELASTICSEARCH_URLS` (the `|`-separated list of URLs to try at round robin) 153 | and `ELASTICSEARCH_INDEX_NAME` (the index to use). 154 | 155 | By default, the localhost is checked for a running instance of Elasticsearch 156 | and if successful, the application ID is provided to use as the index name. 157 | 158 | The following parameters can be overridden: `urls`, `index_name`. 159 | 160 | ### HTTP Proxy 161 | 162 | Provides an HTTP proxy as an URL in `HTTP_PROXY`. 163 | 164 | The following parameters can be overridden: `host`, `port`. 165 | 166 | ### Email (SMTP) 167 | 168 | Provides an MTA for the application to connect to. 169 | 170 | Defaults to `localhost` port 25. 171 | 172 | The following parameters can be overridden: `host`, `port`. 173 | 174 | ### Logging (syslog) 175 | 176 | Provides a syslog instance to log events to. 177 | 178 | If not overridden, Forklift will start a daemon to print out all messages to 179 | standard output and provide its address to the application. 180 | 181 | The following parameters can be specified: `host`, `port`, `proto` (`tcp` or 182 | `udp`). 183 | 184 | ### Memcache 185 | 186 | Provides the settings `MEMCACHE_HOSTS`, a pipe-separated list of hosts 187 | running memcache and `MEMCACHE_PREFIX`, a prefix to use for keys passed 188 | to memcache. 189 | 190 | By default, the localhost is checked for a running instance of Memcache 191 | and if successful, the application ID is provided to use as the key prefix. 192 | 193 | The following parameters can be overridden: `hosts`, `key_prefix`. 194 | 195 | ### Redis 196 | 197 | Provides the settings `REDIS_HOSTS`, a pipe-separate list of hosts running 198 | Redis and `REDIS_DB_INDEX`, a DB-index you should use. 199 | 200 | The following parameters can be overriden: `host`, `db_index`. 201 | 202 | By default `db_index` will be 0 unless overridden in your config. 203 | 204 | 205 | Configuration 206 | ------------- 207 | 208 | Forklift has a hierarchy of configuration options. For example, `services` 209 | parameter is an array of services the application need, `environment` is a 210 | dictionary of extra environment variables to provide, `postgres` overrides 211 | options for PostgreSQL service, etc. 212 | 213 | Every parameter value is searched, in order, in the following locations: 214 | 215 | * Command line, e.g. `--driver direct` or `--postgres-port 5433` (note the 216 | nested parameter syntax). 217 | * User per-project configuration file in `forklift/PROJECT.yaml` inside the 218 | [XDG configuration directory][xdg] (usually `$HOME/.config`), where 219 | `PROJECT` is the application ID. 220 | * Global user configuration file in `forklift/_default.yaml` in the same 221 | directory. 222 | * Project configuration file - `forklift.yaml` in the current directory. 223 | 224 | The project configuration file is a place to store settings which the project 225 | always needs, such as a list of required services, and is intended to be 226 | checked into the version control system. (As such, the sensitive settings such 227 | as passwords should not go here.) For example, a project depending on a 228 | database might have: 229 | 230 | services: 231 | - postgres 232 | 233 | User configuration files allow people to override project settings to adapt it 234 | to their local setup. For example, if the PostgreSQL database server on a 235 | particular machine runs on port 5433, the `_default.yaml` can contain: 236 | 237 | postgres: 238 | port: 5433 239 | 240 | This setting will be applied to all projects which are run through Forklift, 241 | as long as they use a PostgreSQL database. An exotic setting only a specific 242 | project needs can be overridden in a per-project user configuration file, for 243 | example, `foo.yaml`: 244 | 245 | environment: 246 | # Only foo project needs this other database connection 247 | DB_ANOTHER_URL: postgres://alice:rabbit@test.server/foo_test_db 248 | 249 | Finally, the command line options can be used to quickly alter settings while 250 | developing. 251 | 252 | [dj-database-url]: https://github.com/kennethreitz/dj-database-url 253 | [xdg]: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html 254 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /forklift/__init__.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Copyright 2014 Infoxchange Australia 4 | # 5 | # Licensed under the Apache License, Version 2.0 (the "License"); 6 | # you may not use this file except in compliance with the License. 7 | # You may obtain a copy of the License at 8 | # 9 | # http://www.apache.org/licenses/LICENSE-2.0 10 | # 11 | # Unless required by applicable law or agreed to in writing, software 12 | # distributed under the License is distributed on an "AS IS" BASIS, 13 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14 | # See the License for the specific language governing permissions and 15 | # limitations under the License. 16 | 17 | """ 18 | A script to install and start an SSH daemon in a Docker image, enabling the 19 | user to log on to it. 20 | """ 21 | 22 | import argparse 23 | import logging 24 | import os 25 | import pwd 26 | import socket 27 | import subprocess 28 | import sys 29 | import time 30 | import uuid 31 | import yaml 32 | # pylint:disable=no-name-in-module,import-error 33 | from distutils.spawn import find_executable 34 | # pylint:enable=no-name-in-module,import-error 35 | 36 | from xdg.BaseDirectory import xdg_config_home 37 | import pkg_resources 38 | 39 | from forklift.arguments import argument_factory, convert_to_args, project_args 40 | from forklift.base import DEVNULL, ImproperlyConfigured 41 | import forklift.drivers 42 | import forklift.services 43 | 44 | LOG_LEVELS = ('DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL') 45 | LOGGER = logging.getLogger(__name__) 46 | 47 | try: 48 | # pylint:disable=maybe-no-member 49 | __version__ = pkg_resources.get_distribution('docker-forklift').version 50 | except pkg_resources.DistributionNotFound: 51 | __version__ = 'dev' 52 | 53 | 54 | def create_parser(services, drivers, command_required=True): 55 | """ 56 | Collect all options from services and drivers in an argparse format. 57 | """ 58 | 59 | parser = argparse.ArgumentParser( 60 | usage="%(prog)s [options]", 61 | ) 62 | add_argument = parser.add_argument 63 | 64 | add_argument('--application_id', 65 | help="Application name to derive resource names from") 66 | add_argument('--driver', default=None, choices=drivers.keys(), 67 | help="Driver to execute the application with") 68 | add_argument('--services', default=[], nargs='*', choices=services.keys(), 69 | help="Services to provide to the application") 70 | add_argument('--transient', action='store_true', 71 | help="Force services to use a transisent provider, where " 72 | "one is available") 73 | add_argument('--rm', action='store_true', 74 | help="When done, clean up and transient providers that were " 75 | "created") 76 | add_argument('--unique', action='store_true', 77 | help="Add to the application ID to make it unique for this" 78 | "invocation") 79 | add_argument('--cleanroom', action='store_true', 80 | help="Synonym for --unique --transient --rm") 81 | add_argument('--environment', default=[], nargs='*', 82 | type=lambda pair: pair.split('=', 1), 83 | help="Additional environment variables to pass") 84 | add_argument('--loglevel', default='WARNING', choices=LOG_LEVELS, 85 | metavar='LEVEL', type=lambda strlevel: strlevel.upper(), 86 | help="Set the minimum log level to ouput") 87 | add_argument('--version', '-v', action='version', version=__version__) 88 | 89 | for name, service in services.items(): 90 | service_options = parser.add_argument_group(name) 91 | service.add_arguments( 92 | argument_factory(service_options.add_argument, name)) 93 | 94 | add_argument('command', nargs='+' if command_required else '*', 95 | help="Command to run") 96 | 97 | # Drivers inherit all the common options from their base class, so 98 | # allow conflicts for this group of options 99 | driver_options = parser.add_argument_group('Driver options') 100 | driver_options.conflict_handler = 'resolve' 101 | for name, driver in drivers.items(): 102 | driver.add_arguments(driver_options.add_argument) 103 | 104 | # Dummy option to separate command line arguments from the ones 105 | # generated from configuration files 106 | add_argument('--zzzz', action='store_const', const=None, 107 | help=argparse.SUPPRESS) 108 | 109 | return parser 110 | 111 | 112 | class Forklift(object): 113 | """ 114 | The main class. 115 | """ 116 | 117 | services = forklift.services.register 118 | drivers = forklift.drivers.register 119 | 120 | CONFIG_DIR = os.path.join(xdg_config_home, 'forklift') 121 | 122 | def __init__(self, argv): 123 | """ 124 | Parse the command line and set up the class. 125 | """ 126 | 127 | # Parse the configuration from: 128 | # - implicit defaults 129 | # - project configuration file 130 | # - user configuration file 131 | # - user per-project configuration file 132 | # - command line 133 | 134 | options = self.implicit_configuration() 135 | 136 | # Get application_id 137 | initial_parser = create_parser({}, {}, command_required=False) 138 | conf, _ = initial_parser.parse_known_args(options) 139 | 140 | for conffile in self.configuration_files(conf): 141 | options.extend(self.file_configuration(conffile)) 142 | 143 | options.append('--zzzz') 144 | options.extend(argv[1:]) 145 | 146 | parser = create_parser(self.services, self.drivers) 147 | 148 | conf = parser.parse_args(options) 149 | 150 | if conf.cleanroom: 151 | args_idx = options.index('--zzzz') 152 | left, right = (options[:args_idx], options[args_idx:]) 153 | options = left + ['--unique', '--transient', '--rm'] + right 154 | 155 | # Once the driver and services are known, parse the arguments again 156 | # with only the needed options 157 | 158 | driver = self.get_driver(conf) 159 | # enabled_services = { 160 | # name: service 161 | # for name, service in self.services.items() 162 | # if name in conf.services 163 | # } 164 | 165 | # FIXME: creating a parser with only the enabled_services (see above) 166 | # causes problems because we then cannot parse the arguments for 167 | # disabled services. Because services are separately namespaced 168 | # including arguments for non-enabled services is sufficient for now 169 | parser = create_parser(self.services, # FIXME: enabled_services 170 | {driver: self.drivers[driver]}) 171 | 172 | self.conf = parser.parse_args(options) 173 | 174 | # As soon as we have parsed conf 175 | self.setup_logging() 176 | 177 | if self.conf.unique: 178 | self.unique_application_id() 179 | 180 | def implicit_configuration(self): 181 | """ 182 | Implicit configuration based on the current directory. 183 | """ 184 | 185 | application_id = os.path.basename(os.path.abspath(os.curdir)) 186 | return [ 187 | '--application_id', application_id, 188 | ] 189 | 190 | def configuration_files(self, conf): 191 | """ 192 | A list of configuration files to look for settings in. 193 | """ 194 | 195 | application_id = conf.application_id 196 | return ( 197 | 'forklift.yaml', 198 | os.path.join(self.CONFIG_DIR, '_default.yaml'), 199 | os.path.join(self.CONFIG_DIR, '{0}.yaml'.format(application_id)), 200 | ) 201 | 202 | def file_configuration(self, name): 203 | """ 204 | Parse settings from a configuration file. 205 | """ 206 | try: 207 | with open(name) as conffile: 208 | return convert_to_args(yaml.load(conffile)) 209 | except IOError: 210 | return [] 211 | 212 | def unique_application_id(self): 213 | """ 214 | Set the application id in config to a (probably) unique value 215 | """ 216 | self.conf.application_id += '-%s' % uuid.uuid4() 217 | LOGGER.info("New application ID is '%s'", self.conf.application_id) 218 | 219 | @staticmethod 220 | def _readme_stream(): 221 | """ 222 | Get the README file as a stream. 223 | """ 224 | 225 | # pylint:disable=no-name-in-module,import-error 226 | from pkg_resources import resource_stream 227 | # pylint:enable=no-name-in-module,import-error 228 | return resource_stream(__name__, 'README.md') 229 | 230 | def help(self): 231 | """ 232 | Render the help file. 233 | """ 234 | 235 | readme = self._readme_stream() 236 | 237 | # Try to format the README nicely if Pandoc is installed 238 | pagers = [ 239 | 'pandoc -s -f markdown -t man | man -l -', 240 | os.environ.get('PAGER', ''), 241 | 'less', 242 | 'more', 243 | ] 244 | 245 | pager = None 246 | 247 | for pager in pagers: 248 | if find_executable(pager.split(' ')[0]): 249 | break 250 | 251 | process = subprocess.Popen(pager, shell=True, stdin=subprocess.PIPE) 252 | process.communicate(input=readme.read()) 253 | readme.close() 254 | process.wait() 255 | 256 | def get_driver(self, conf): 257 | """ 258 | Find out what driver to use given the configuration. 259 | 260 | If no driver is explicitly specified, choose one which states 261 | the command is its valid target or fall back to Docker driver. 262 | """ 263 | 264 | if conf.driver: 265 | return conf.driver 266 | 267 | target = conf.command[0] 268 | for driver_name, driver_class in self.drivers.items(): 269 | if driver_class.valid_target(target): 270 | return driver_name 271 | 272 | return 'docker' 273 | 274 | def main(self): 275 | """ 276 | Run the specified application command. 277 | """ 278 | 279 | if self.conf.command == ['help']: 280 | self.help() 281 | return 0 282 | 283 | driver_name = self.get_driver(self.conf) 284 | driver_class = self.drivers[driver_name] 285 | 286 | (target, *command) = self.conf.command 287 | 288 | services = [] 289 | try: 290 | try: 291 | # This strange loop is so that even if we get an exception 292 | # mid-loop, we still get the list of services that have been 293 | # successfully started (otherwise we get empty array) 294 | services_gen = ( 295 | self.services[service].provide( 296 | self.conf.application_id, 297 | overrides=project_args(self.conf, service), 298 | transient=self.conf.transient, 299 | ) 300 | for service in self.conf.services 301 | ) 302 | for service in services_gen: 303 | services.append(service) 304 | 305 | environment = dict(self.conf.environment) 306 | 307 | driver = driver_class( 308 | target=target, 309 | services=services, 310 | environment=environment, 311 | conf=self.conf, 312 | ) 313 | 314 | except ImproperlyConfigured as ex: 315 | print(ex) 316 | return 1 317 | 318 | return driver.run(*command) 319 | finally: 320 | if self.conf.rm: 321 | for service in services: 322 | # pylint:disable=undefined-loop-variable 323 | service.cleanup() 324 | 325 | def setup_logging(self): 326 | """ 327 | Setup the root logger 328 | """ 329 | logging.basicConfig(level=self.conf.loglevel) 330 | 331 | 332 | def main(): 333 | """ 334 | Main entry point. 335 | """ 336 | 337 | return Forklift(sys.argv).main() 338 | 339 | 340 | if __name__ == '__main__': 341 | main() 342 | -------------------------------------------------------------------------------- /forklift/drivers.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Drivers that can execute applications. 18 | """ 19 | 20 | 21 | import fcntl 22 | import logging 23 | import os 24 | import pwd 25 | import re 26 | import socket 27 | import struct 28 | import subprocess 29 | import sys 30 | 31 | # pylint:disable=no-name-in-module,import-error 32 | from distutils.spawn import find_executable 33 | # pylint:enable=no-name-in-module,import-error 34 | 35 | import docker 36 | 37 | from forklift.base import ( 38 | DEVNULL, 39 | free_port, 40 | ImproperlyConfigured, 41 | wait_for, 42 | wait_for_parent, 43 | ) 44 | from forklift.registry import Registry 45 | 46 | 47 | register = Registry() # pylint:disable=invalid-name 48 | 49 | LOGGER = logging.getLogger(__name__) 50 | 51 | 52 | class Driver(object): 53 | """ 54 | A method of executing the application with supplied services. 55 | """ 56 | 57 | def __init__(self, target, services, environment, conf): 58 | """ 59 | Initialise the driver with the specified target and services. 60 | """ 61 | 62 | self.target = target 63 | self.services = services 64 | self.added_environment = environment 65 | self.conf = conf 66 | 67 | @staticmethod 68 | def valid_target(target): 69 | """ 70 | Guess whether a target is valid for the given driver. 71 | 72 | Override to have driver chosen automatically based on target 73 | given. 74 | """ 75 | 76 | return False 77 | 78 | def run(self, *command): 79 | """ 80 | Run the command on the target. 81 | """ 82 | 83 | raise NotImplementedError("Please override run().") 84 | 85 | def _run(self, command): 86 | """ 87 | Run the command AS THE CURRENT PROCESS (you will be replaced). 88 | 89 | We also fork and add the child to its own process group. This means 90 | that the child will persist after the parent completes and will not 91 | receive signals (SIGTERM, SIGKILL, SIGSTOP, etc) from the parent 92 | pgroup, and will also not grab standard input from the parent process. 93 | 94 | When the parent process finishes, we return (in the child process) so 95 | that the main code path can complete 96 | """ 97 | 98 | child_pid = os.fork() 99 | if child_pid: 100 | os.execvp(command[0], command) 101 | else: 102 | os.setpgrp() 103 | wait_for_parent() 104 | 105 | def base_environment(self): 106 | """ 107 | The service-independent environment to supply to the application. 108 | """ 109 | 110 | return { 111 | 'ENVIRONMENT': 'dev_local', 112 | 'DEVNAME': pwd.getpwuid(os.getuid())[0], 113 | # TODO: TZ 114 | 'SITE_PROTOCOL': 'http', 115 | 'SITE_DOMAIN': 'localhost:{0}'.format(self.serve_port()), 116 | } 117 | 118 | def environment(self): 119 | """ 120 | The environment to supply to the application. 121 | """ 122 | 123 | env = self.base_environment() 124 | 125 | for service in self.services: 126 | env.update(service.environment()) 127 | 128 | env.update(self.added_environment) 129 | 130 | return env 131 | 132 | @staticmethod 133 | def _free_port(): 134 | """ 135 | Find a free port to serve on. 136 | 137 | Overridden in tests for reproducibility. 138 | """ 139 | 140 | return free_port() 141 | 142 | def serve_port(self): 143 | """ 144 | The port for application to serve in. 145 | 146 | If not explicitly given in the configuration, pick a free one. 147 | """ 148 | 149 | if self.conf.serve_port: 150 | return self.conf.serve_port 151 | 152 | # pylint:disable=access-member-before-definition 153 | # pylint:disable=attribute-defined-outside-init 154 | if hasattr(self, '_serve_port'): 155 | return self._serve_port 156 | 157 | self._serve_port = self._free_port() 158 | return self._serve_port 159 | 160 | @classmethod 161 | def add_arguments(cls, add_argument): 162 | """ 163 | Add driver configuration arguments to the parser. 164 | """ 165 | 166 | add_argument('--serve_port', type=int, default=None, 167 | help="The port to expose the application on") 168 | 169 | 170 | def ip_address(ifname): 171 | """ 172 | Get the IP address associated with an interface. 173 | """ 174 | 175 | with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as tmp_socket: 176 | return socket.inet_ntoa(fcntl.ioctl( 177 | tmp_socket.fileno(), 178 | 0x8915, # SIOCGIFADDR 179 | struct.pack('256s', ifname[:15].encode()) 180 | )[20:24]) 181 | 182 | 183 | @register('docker') 184 | class Docker(Driver): 185 | """ 186 | Execute the application packaged as a Docker container. 187 | """ 188 | 189 | @classmethod 190 | def add_arguments(cls, add_argument): 191 | """ 192 | Add Docker-specific options. 193 | """ 194 | 195 | super().add_arguments(add_argument) 196 | 197 | add_argument('--rm', default=False, action='store_true', 198 | help="Remove the container after the command exit") 199 | add_argument('--privileged', default=False, action='store_true', 200 | help="Run the container in privileged mode") 201 | add_argument('--interactive', default=False, action='store_true', 202 | help="Run the command in interactive mode") 203 | add_argument('--mount-root', 204 | help="The directory to bind the root directory of the " + 205 | "container to") 206 | add_argument('--storage', 207 | help="The directory to mount under /storage in the " + 208 | "container") 209 | add_argument('--user', default='app', 210 | help="The user to set up for SSH in the container") 211 | add_argument('--host_uid', default=os.getuid(), 212 | help="The UID for the user inside the container") 213 | add_argument('--identity', 214 | help="The public key to authorise logging in as") 215 | add_argument('--detach', default=False, action='store_true', 216 | help="Detach docker container from terminal") 217 | 218 | def run(self, *command): 219 | """ 220 | Run the command in Docker container. 221 | """ 222 | 223 | # Check resolv.conf for local nameservers 224 | with open('/etc/resolv.conf') as rcfile: 225 | nameservers = [l for l in rcfile.read().splitlines() 226 | if 'nameserver' in l and not l.startswith('#')] 227 | if any('127.0.0.1' in l or '127.0.1.1' in l 228 | for l in nameservers): 229 | print( 230 | "/etc/resolv.conf on the host specifies localhost " 231 | "as the name server. This will make Docker use Google " 232 | "Public DNS inside the container, and accessing Intranet " 233 | "resources will fail.\n" 234 | "Please fix /etc/resolv.conf on the host before " 235 | "continuing.", 236 | file=sys.stderr, 237 | ) 238 | 239 | if list(command) == ['sshd']: 240 | return self.run_sshd() 241 | elif self.conf.detach: 242 | command = self.docker_command(*command) 243 | container = subprocess.check_output(command).decode().strip() 244 | self.mount_root(container) 245 | 246 | print("Running as %s" % container) 247 | print("docker exec -t -i %s bash" % container) 248 | return 0 249 | else: 250 | command = self.docker_command(*command) 251 | return self._run(command) 252 | 253 | def environment(self): 254 | """ 255 | Change every service's host attribute from localhost. 256 | """ 257 | 258 | for service in self.services: 259 | if 'host' in service.allow_override: 260 | if service.host == 'localhost': 261 | service.host = ip_address('docker0') 262 | 263 | return super().environment() 264 | 265 | def docker_command(self, *command, use_sshd=False): 266 | """ 267 | The Docker command to start a container. 268 | """ 269 | 270 | docker_command = [ 271 | 'docker', 'run', 272 | '-p', '{0}:8000'.format(self.serve_port()), 273 | ] 274 | 275 | if self.conf.rm: 276 | docker_command += ['--rm'] 277 | 278 | if self.conf.detach: 279 | docker_command += ['-d'] 280 | 281 | if use_sshd: 282 | docker_command += [ 283 | '-d', 284 | '-p', '22', 285 | '--entrypoint=/bin/bash', 286 | '-u=root', 287 | ] 288 | else: 289 | for key, value in self.environment().items(): 290 | docker_command += ['-e', '{0}={1}'.format(key, value)] 291 | 292 | if self.conf.privileged: 293 | docker_command += [ 294 | '--privileged', 295 | ] 296 | 297 | if self.conf.interactive: 298 | docker_command += [ 299 | '-i', '-t', 300 | ] 301 | 302 | if self.conf.storage: 303 | storage = self.conf.storage 304 | subprocess.check_call(['mkdir', '-p', storage]) 305 | docker_command += [ 306 | '-v', '{0}:/storage'.format(storage), 307 | ] 308 | docker_command += [self.target] 309 | docker_command += command 310 | 311 | if docker_command: 312 | LOGGER.debug( 313 | 'docker command:\n%s', 314 | ' '.join(docker_command).replace('&&', '&& \\ \n') 315 | ) 316 | 317 | return docker_command 318 | 319 | def mount_root(self, container): 320 | """ 321 | Mount the container's root directory on the host. 322 | """ 323 | 324 | # If requested, mount the working directory 325 | if self.conf.mount_root: 326 | mount_root = self.conf.mount_root 327 | 328 | subprocess.call(['sudo', 'umount', mount_root], 329 | stderr=DEVNULL) 330 | subprocess.check_call(['mkdir', '-p', mount_root]) 331 | 332 | with docker.Client(version='auto') as client: 333 | driver = client.inspect_container(container)['Driver'] 334 | 335 | # Different drivers use different paths 336 | rootfs_rel_path = { 337 | 'aufs': 'aufs/mnt/{container}', 338 | 'devicemapper': 'devicemapper/mnt/{container}/rootfs', 339 | 'btrfs': 'btrfs/subvolumes/{container}', 340 | 'overlay': 'overlay/{container}/merged', 341 | } 342 | 343 | if driver == 'devicemapper': 344 | # we need to actually mount the devicemapper device 345 | # using the correct security context 346 | # 347 | # Derive the path of the DM device 348 | pool_name = dict(client.info()['DriverStatus'])['Pool Name'] 349 | mapper_path = '/dev/mapper/' + pool_name[:-4] + container 350 | 351 | # Somewhere to mount the container. It doesn't actually matter 352 | # it just needs to align with rootfs_rel_path above. 353 | container_path = \ 354 | '/var/lib/docker/devicemapper/mnt/{container}/'.format( 355 | container=container, 356 | ) 357 | 358 | # The SELinux security context 359 | context = client.inspect_container(container)['MountLabel'] 360 | 361 | # Mount the DM device 362 | subprocess.check_call(['sudo', 'mount', '-o', 363 | 'context="%s"' % context, 364 | mapper_path, 365 | container_path]) 366 | 367 | rootfs_path = '/var/lib/docker/' + rootfs_rel_path[driver].format( 368 | driver=driver, 369 | container=container, 370 | ) 371 | 372 | subprocess.check_call(['sudo', 'mount', '-o', 'bind', 373 | rootfs_path, 374 | mount_root]) 375 | print("Container filesystem mounted on {mount_root}".format( 376 | mount_root=mount_root)) 377 | 378 | if driver == 'devicemapper': 379 | # clean up after ourselves 380 | subprocess.check_call(['sudo', 'umount', container_path]) 381 | 382 | def run_sshd(self): 383 | """ 384 | Run SSH server in a container. 385 | """ 386 | 387 | # determine the user's SSH key(s) 388 | identity = None 389 | if not self.conf.identity: 390 | # provide the entire set of keys 391 | # pylint:disable=no-member 392 | ssh_key = (subprocess 393 | .check_output(['ssh-add', '-L']) 394 | .decode() 395 | .strip()) 396 | if ssh_key == '': 397 | raise ImproperlyConfigured( 398 | "You don't seem to have any SSH keys! " 399 | "How do you do any work?") 400 | else: 401 | identity = self.conf.identity 402 | if not os.path.exists(identity): 403 | identity = os.path.expanduser( 404 | '~/.ssh/{}'.format(self.conf.identity)) 405 | with open(identity + '.pub') as id_file: 406 | ssh_key = id_file.read().strip() 407 | 408 | commands = [ 409 | 'DEBIAN_FRONTEND=noninteractive apt-get -qq update', 410 | 'DEBIAN_FRONTEND=noninteractive apt-get -qq install dropbear sudo', 411 | '(kill $(pidof dropbear) || true)', 412 | ] + [ 413 | # TODO: this only passes the environment to shells. 414 | # Commands run directly (ssh ... command) get no environment. 415 | 'echo \'export {0}={1}\' >> /etc/profile'.format(*env) 416 | for env in self.environment().items() 417 | ] + [ 418 | '(useradd -m {user} || true)', 419 | 'mkdir -p ~{user}/.ssh', 420 | 'echo \'{ssh_key}\' >> ~{user}/.ssh/authorized_keys', 421 | 'chown -R {user} ~{user}/.ssh', 422 | 'chmod -R go-rwx ~{user}/.ssh', 423 | 'chsh -s /bin/bash {user}', 424 | 'usermod -p zzz {user}', 425 | 'chown -R --from={user} {host_uid} ~{user}', 426 | 'usermod -u {host_uid} {user}', 427 | 'chown {user} ~{user}', 428 | 'chmod go-w ~{user}', 429 | 'echo \'{user} ALL=(ALL) NOPASSWD: ALL\' >> /etc/sudoers', 430 | 'echo Starting SSH...', 431 | '/usr/sbin/dropbear -F', 432 | ] 433 | 434 | args = { 435 | 'user': self.conf.user, 436 | 'host_uid': self.conf.host_uid, 437 | 'ssh_key': ssh_key, 438 | } 439 | 440 | command = self.docker_command( 441 | '-c', 442 | ' && '.join(cmd.format(**args) for cmd in commands), 443 | use_sshd=True 444 | ) 445 | container = subprocess.check_output(command).decode().strip() 446 | self.mount_root(container) 447 | 448 | ssh_command, ssh_available = self.ssh_command(container, identity) 449 | if not ssh_available: 450 | print("Timed out waiting for SSH setup. You can still try " 451 | "the command below but it might also indicate a problem " 452 | "with SSH setup.") 453 | print(ssh_command) 454 | 455 | return 0 456 | 457 | def ssh_command(self, container, identity=None): 458 | """ 459 | Wait for SSH service to start and print the command to SSH to 460 | the container. 461 | 462 | Returns a tuple of (command, available), where command is the command 463 | to run and available is an indication of whether the self-test 464 | succeeded. 465 | """ 466 | 467 | with docker.Client(version='auto') as docker_client: 468 | ssh_port = docker_client.port(container, 22)[0]['HostPort'] 469 | 470 | ssh_command = [ 471 | 'ssh', 472 | '{0}@localhost'.format(self.conf.user), 473 | '-p', 474 | ssh_port, 475 | '-A', 476 | ] 477 | 478 | if identity: 479 | ssh_command += ('-i', identity) 480 | 481 | available = False 482 | try: 483 | available = wait_for( 484 | lambda: subprocess.check_call( 485 | ssh_command + [ 486 | '-o', 'StrictHostKeyChecking=no', 487 | '-o', 'PasswordAuthentication=no', 488 | '-o', 'NoHostAuthenticationForLocalhost=yes', 489 | '/bin/true', 490 | ], 491 | stdin=DEVNULL, 492 | stdout=DEVNULL, 493 | stderr=DEVNULL, 494 | ) or True, 495 | expected_exceptions=(subprocess.CalledProcessError,) 496 | ) 497 | except (subprocess.CalledProcessError, OSError) as ex: 498 | print(ex) 499 | 500 | return (' '.join(ssh_command), available) 501 | 502 | 503 | @register('container_recycler') 504 | class ContainerRecycler(Driver): 505 | """ 506 | Cleans up Docker's mess 507 | """ 508 | 509 | @classmethod 510 | def add_arguments(cls, add_argument): 511 | """ 512 | Add recycler-specific options. 513 | """ 514 | 515 | super().add_arguments(add_argument) 516 | 517 | add_argument('--include-running', default=False, action='store_true', 518 | help="Remove running containers as well") 519 | add_argument('--include-tagged', default=False, action='store_true', 520 | help="Remove tagged images as well") 521 | 522 | def run(self, *command): 523 | """ 524 | Recycle old containers and images 525 | """ 526 | 527 | self.recycle_containers(include_running=self.conf.include_running) 528 | self.recycle_images(include_tagged=self.conf.include_tagged) 529 | 530 | def recycle_containers(self, include_running=False): 531 | """ 532 | Clean up old stopped (and optionally running) containers 533 | """ 534 | all_containers = set( 535 | subprocess.check_output(('docker', 'ps', '-aq')) 536 | .split() 537 | ) 538 | 539 | running_containers = set( 540 | subprocess.check_output(('docker', 'ps', '-q')) 541 | .split() 542 | ) 543 | 544 | if include_running: 545 | containers = all_containers 546 | 547 | else: 548 | if running_containers: 549 | print("You have running containers, pass " 550 | "--include-running to remove") 551 | 552 | containers = all_containers - running_containers 553 | 554 | if containers: 555 | print("Removing old containers...") 556 | subprocess.check_call(('docker', 'rm', '-f') + tuple(containers)) 557 | 558 | def recycle_images(self, include_tagged=False): 559 | """ 560 | Clean up untagged (and optionally tagged) images 561 | """ 562 | images = set() 563 | tagged_images = set() 564 | 565 | output = subprocess.check_output(('docker', 'images'), 566 | universal_newlines=True) 567 | output = output.strip().split('\n') 568 | 569 | # the first line contains the offsets 570 | header = output[0] 571 | remainder = output[1:] 572 | 573 | # calculate the column widths from the header by calculating the 574 | # offsets of the columns 575 | columns = [header.index(l) 576 | for l in re.split(r'\s\s+', header)] + [len(header)] 577 | columns = [(a, b) for a, b in zip(columns, columns[1:])] 578 | 579 | for line in remainder: 580 | repo, tag, image, _, _ = (line[a:b].strip() for a, b in columns) 581 | images.add(image) 582 | 583 | if repo != '' and tag != '': 584 | tagged_images.add(image) 585 | 586 | if include_tagged: 587 | pass 588 | 589 | else: 590 | if tagged_images: 591 | print("You have tagged images, pass " 592 | "--include-tagged to remove") 593 | 594 | images -= tagged_images 595 | 596 | if images: 597 | print("Removing old images...") 598 | subprocess.check_call(('docker', 'rmi') + tuple(images)) 599 | 600 | @staticmethod 601 | def valid_target(target): 602 | 603 | return target == 'recycle' 604 | 605 | 606 | @register('direct') 607 | class Direct(Driver): 608 | """ 609 | Execute the application directly. 610 | """ 611 | 612 | def run(self, *command): 613 | """ 614 | Run the application directly. 615 | """ 616 | 617 | for key, value in self.environment().items(): 618 | os.environ[key] = value 619 | 620 | return self._run([self.target] + list(command)) 621 | 622 | @staticmethod 623 | def valid_target(target): 624 | """ 625 | Check if the target is directly executable. 626 | """ 627 | 628 | return find_executable(target) 629 | -------------------------------------------------------------------------------- /forklift/services/base.py: -------------------------------------------------------------------------------- 1 | # 2 | # Copyright 2014 Infoxchange Australia 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); 5 | # you may not use this file except in compliance with the License. 6 | # You may obtain a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, 12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | """ 17 | Services that can be provided to running applications - base definitions. 18 | """ 19 | 20 | import logging 21 | import os 22 | import socket 23 | import subprocess 24 | import sys 25 | import urllib.parse 26 | 27 | from collections import namedtuple 28 | from operator import attrgetter 29 | 30 | import docker 31 | 32 | import requests.exceptions 33 | 34 | from xdg.BaseDirectory import save_cache_path 35 | 36 | from forklift.base import ImproperlyConfigured, wait_for, rm_tree_root_owned 37 | from forklift.registry import Registry 38 | 39 | LOGGER = logging.getLogger(__name__) 40 | register = Registry() # pylint:disable=invalid-name 41 | 42 | 43 | def try_port(host, port): 44 | """ 45 | Try to connect to a given TCP port. 46 | """ 47 | 48 | with socket.socket() as sock: 49 | sock.connect((host, int(port))) 50 | return True 51 | 52 | 53 | def port_open(host, port): 54 | """ 55 | Check whether the specified TCP port is open. 56 | """ 57 | 58 | try: 59 | return try_port(host, port) 60 | except socket.error: 61 | return False 62 | 63 | 64 | def split_host_port(host_port, default_port): 65 | """ 66 | Split host:port into host and port, using the default port in case 67 | it's not given. 68 | """ 69 | 70 | host_port = host_port.split(':') 71 | if len(host_port) == 2: 72 | host, port = host_port 73 | return host, port 74 | else: 75 | return host_port[0], default_port 76 | 77 | 78 | def pipe_split(value): 79 | """ 80 | Split a pipe-separated string if it's the only value in an array. 81 | """ 82 | 83 | if isinstance(value, str): 84 | value = (value,) 85 | 86 | try: 87 | value = tuple(value) 88 | except TypeError: 89 | value = (value,) 90 | 91 | if len(value) == 1 and isinstance(value[0], str) and '|' in value[0]: 92 | return value[0].split('|') 93 | else: 94 | return value 95 | 96 | 97 | def transient_provider(func): 98 | """ 99 | Decorator to mark a provider as transient 100 | """ 101 | func.transient = True 102 | return func 103 | 104 | 105 | class ProviderNotAvailable(Exception): 106 | """ 107 | A service provider is not available. 108 | """ 109 | 110 | pass 111 | 112 | 113 | class DependencyRequired(ProviderNotAvailable): 114 | """ 115 | A dependency is required to make a provider available. 116 | """ 117 | 118 | def __init__(self, message, command=None): 119 | super().__init__(message) 120 | self.command = command 121 | 122 | 123 | class DockerImageRequired(DependencyRequired): 124 | """ 125 | A Docker image is required to make a provider available. 126 | """ 127 | 128 | def __init__(self, image): 129 | super().__init__( 130 | message="Docker image {0} is required.".format(image), 131 | command='docker pull {0}'.format(image), 132 | ) 133 | 134 | 135 | class ContainerRefusingConnection(ProviderNotAvailable): 136 | """ 137 | A Docker container that was started is not connectable after a period of 138 | time. 139 | """ 140 | 141 | def __init__(self, image, port): 142 | super().__init__( 143 | "Docker container {0} was started but couldn't connect on" 144 | "port {1}".format(image, port) 145 | ) 146 | 147 | 148 | class Service(object): 149 | """ 150 | Base class for services required by the application. 151 | """ 152 | 153 | # A list of class methods to try to find an available service provider. 154 | providers = () 155 | 156 | # A list of attributes which can be overridden from a configuration file 157 | # or the command line. 158 | allow_override = () 159 | 160 | # A list of attributes which can be overridden as a list of arguments 161 | # (i.e. hosts, urls) 162 | allow_override_list = () 163 | 164 | TEMPORARY_AVAILABILITY_ERRORS = ( 165 | ProviderNotAvailable, 166 | socket.error, 167 | ) 168 | PERMANENT_AVAILABILITY_ERRORS = () 169 | 170 | CONTAINER_IMAGE = None 171 | DEFAULT_PORT = None 172 | 173 | @classmethod 174 | def add_arguments(cls, add_argument): 175 | """ 176 | Add service configuration arguments to the parser. 177 | """ 178 | 179 | # TODO: refactor for types other than string (port numbers) and 180 | # list (Elasticsearch host). 181 | 182 | for param in cls.allow_override: 183 | add_argument('--{0}'.format(param)) 184 | 185 | for param in cls.allow_override_list: 186 | add_argument('--{0}'.format(param), nargs='+') 187 | 188 | @classmethod 189 | def provide(cls, application_id, overrides=None, transient=False): 190 | """ 191 | Choose the first available service from the list of providers. 192 | """ 193 | for provider in cls.providers: 194 | provider_func = getattr(cls, provider) 195 | if transient and not getattr(provider_func, 'transient', False): 196 | LOGGER.debug("Skipping %s provider for %s service because " 197 | "it's not transient", provider, cls.__name__) 198 | continue 199 | 200 | LOGGER.debug("Trying %s provider for %s service", 201 | provider, cls.__name__) 202 | try: 203 | service = provider_func(application_id) 204 | setattr(service, 'provided_by', provider) 205 | except ProviderNotAvailable as exc: 206 | print(( 207 | "While trying '{provider}' provider for {service}: {exc}" 208 | ).format( 209 | provider=provider, 210 | service=cls.__name__, 211 | exc=exc, 212 | ), file=sys.stderr) 213 | continue 214 | 215 | cls._set_overrides(service, overrides) 216 | 217 | try: 218 | if service.available(): 219 | return service 220 | except: 221 | service.cleanup() 222 | raise 223 | 224 | raise ImproperlyConfigured( 225 | "No available providers for service {0}.".format(cls.__name__)) 226 | 227 | @classmethod 228 | def _set_overrides(cls, service, overrides=None): 229 | """ 230 | Setup override values on a service 231 | """ 232 | overrides = vars(overrides) if overrides else {} 233 | allowed_overrides = cls.allow_override + cls.allow_override_list 234 | 235 | for key, value in overrides.items(): 236 | if value is not None: 237 | if key in allowed_overrides: 238 | setattr(service, key, value) 239 | LOGGER.debug("Config for %s: %s = %s", 240 | cls.__name__, key, value) 241 | else: 242 | raise ImproperlyConfigured( 243 | "Invalid parameter {0} for service {1}.".format( 244 | key, cls.__name__)) 245 | 246 | def available(self): 247 | """ 248 | Wrap check_available so that "expected" exceptions are not raised 249 | """ 250 | try: 251 | return self.check_available() 252 | except self.TEMPORARY_AVAILABILITY_ERRORS: 253 | return False 254 | except self.PERMANENT_AVAILABILITY_ERRORS: 255 | return False 256 | 257 | def check_available(self): 258 | """ 259 | Check whether the service is available. Override to implement 260 | availability checks to warn the user instead of let the application 261 | fail. 262 | """ 263 | return True 264 | 265 | def wait_until_available(self, retries=60): 266 | """ 267 | Wait for the container to be available before returning. If the retry 268 | limit is exceeded, ProviderNotAvailable is raised 269 | 270 | Parameters: 271 | retries - number of times to retry before giving up 272 | """ 273 | try: 274 | LOGGER.info("Waiting for %s to become available", 275 | self.__class__.__name__) 276 | available = wait_for( 277 | self.check_available, 278 | expected_exceptions=self.TEMPORARY_AVAILABILITY_ERRORS, 279 | retries=retries, 280 | ) 281 | return available 282 | 283 | except self.PERMANENT_AVAILABILITY_ERRORS as ex: 284 | print("Error checking for {}: {}".format( 285 | self.__class__.__name__, ex 286 | )) 287 | return False 288 | 289 | def environment(self): 290 | """ 291 | The environment, as a dictionary, to let the application know 292 | the service configuration. 293 | """ 294 | 295 | raise NotImplementedError("Please override environment().") 296 | 297 | def cleanup(self): 298 | """ 299 | Do any clean up required to undo anything that was done in the provide 300 | method 301 | """ 302 | 303 | # pylint:disable=no-member 304 | if self.provided_by != 'container': 305 | LOGGER.debug("Don't know how to clean up %s service provided " 306 | "by %s", 307 | self.__class__.__name__, 308 | self.provided_by) 309 | return False 310 | 311 | if self.container_info.new: 312 | LOGGER.debug("Cleaning up container '%s' for %s service", 313 | self.container_info.name, 314 | self.__class__.__name__) 315 | destroy_container(self.container_info.name) 316 | else: 317 | LOGGER.debug("Not cleaning up container '%s' for service %s " 318 | "because it was not created by this invocation", 319 | self.container_info.name, 320 | self.__class__.__name__) 321 | 322 | return True 323 | 324 | @classmethod 325 | def ensure_container(cls, application_id, **kwargs): 326 | """ 327 | Ensure a container for this service is running. 328 | """ 329 | 330 | return _ensure_container( 331 | image=cls.CONTAINER_IMAGE, 332 | port=cls.DEFAULT_PORT, 333 | application_id=application_id, 334 | **kwargs 335 | ) 336 | 337 | @classmethod 338 | def from_container(cls, application_id, container): 339 | """ 340 | The service instance connecting to the specified Docker container. 341 | """ 342 | 343 | raise NotImplementedError("Please override from_container.") 344 | 345 | @classmethod 346 | @transient_provider 347 | def container(cls, application_id): 348 | """ 349 | A generic container provider for a service. Needs to be specified in 350 | 'providers' to activate. 351 | """ 352 | 353 | container = cls.ensure_container(application_id) 354 | 355 | instance = cls.from_container(application_id, container) 356 | instance.wait_until_available() 357 | 358 | # pylint:disable=attribute-defined-outside-init 359 | instance.container_info = container 360 | return instance 361 | 362 | 363 | def replace_part(url, **kwargs): 364 | """ 365 | Replace a part of the URL with a new value. 366 | 367 | Keyword arguments can be any properties of urllib.parse.ParseResult. 368 | """ 369 | 370 | netloc_parts = ('username', 'password', 'hostname', 'port') 371 | 372 | for part, value in kwargs.items(): 373 | if part in netloc_parts: 374 | # Reassemble netloc 375 | netloc = { 376 | p: getattr(url, p) 377 | for p in netloc_parts 378 | } 379 | 380 | netloc[part] = value 381 | 382 | netloc_str = netloc['hostname'] 383 | 384 | if netloc['port']: 385 | netloc_str += ':' + str(netloc['port']) 386 | if netloc['username'] or netloc['password']: 387 | userinfo = netloc['username'] or '' 388 | if netloc['password']: 389 | userinfo += ':' + netloc['password'] 390 | netloc_str = userinfo + '@' + netloc_str 391 | 392 | kwargs = {'netloc': netloc_str} 393 | else: 394 | kwargs = {part: value} 395 | 396 | # pylint:disable=protected-access 397 | url = url._replace(**kwargs) 398 | 399 | return url 400 | 401 | 402 | class URLDescriptor(object): 403 | """ 404 | A descriptor to get or set an URL part for all the URLs of the class. 405 | """ 406 | 407 | def __init__(self, part, default='', joiner='|'.join): 408 | """ 409 | Initialise a descriptor to get or set an URL part. 410 | 411 | Parameters: 412 | part - the part to get, can be a string or a tuple of (get, set) 413 | default - filler for missing parts of the URL, defaults to '' 414 | joiner - how to join the parts from the URL array together; 415 | defaults to concatenating with '|' in between 416 | """ 417 | 418 | if isinstance(part, str): 419 | self.getter = attrgetter(part) 420 | self.setter = lambda url, value: replace_part(url, **{part: value}) 421 | else: 422 | (self.getter, self.setter) = part 423 | 424 | self.default = default 425 | self.joiner = joiner 426 | 427 | def __get__(self, instance, owner): 428 | """ 429 | Get the URL part for all URLs in the instance. 430 | """ 431 | 432 | if instance is None: 433 | return self 434 | 435 | return self.joiner( 436 | self.getter(url) or self.default 437 | for url in instance.urls 438 | ) 439 | 440 | def __set__(self, instance, value): 441 | """ 442 | Set the URL part for all URLs in the instance. 443 | """ 444 | 445 | instance.urls = tuple( 446 | self.setter(url, value) 447 | for url in instance.urls 448 | ) 449 | 450 | 451 | class URLNameDescriptor(URLDescriptor): 452 | """ 453 | A descriptor to get or set the URL path without a leading slash, 454 | commonly used when mapping an alphanumeric namespace (e.g. database names) 455 | onto URLs. 456 | """ 457 | 458 | def __init__(self): 459 | super().__init__(( 460 | lambda url: url.path.lstrip('/'), 461 | lambda url, value: replace_part(url, path='/' + value), 462 | )) 463 | 464 | 465 | class URLMultiValueDescriptor(URLDescriptor): 466 | """ 467 | A descriptor to get or set part of the URLs to an array of values. 468 | """ 469 | 470 | def __init__(self, part, default='', joiner=lambda xs: next(iter(xs))): 471 | super().__init__(part, default, joiner) 472 | 473 | def __set__(self, instance, value): 474 | """ 475 | Set the URL part to an array of values. 476 | 477 | After setting this, all the URLs will be identical except for hostinfo 478 | taken from the (iterable) value assigned. The rest of the URL 479 | parameters will be copied from the first one. 480 | """ 481 | 482 | url = instance.urls[0] 483 | instance.urls = tuple( 484 | self.setter(url, v) 485 | for v in pipe_split(value) 486 | ) 487 | 488 | 489 | class URLHostInfoDescriptor(URLMultiValueDescriptor): 490 | """ 491 | A descriptor to get or set hostinfo pairs (hostname:port), accounting for 492 | default port. 493 | """ 494 | 495 | def get_hostinfo(self, url): 496 | """ 497 | Get the hostname:port pair of the URL. 498 | """ 499 | 500 | port = url.port 501 | if port == self.default_port or port is None: 502 | return url.hostname 503 | else: 504 | return ':'.join((url.hostname, str(port))) 505 | 506 | def set_hostinfo(self, url, value): 507 | """ 508 | Set the hostname:port pair of the URL. 509 | """ 510 | 511 | hostname, port = split_host_port(value, self.default_port) 512 | return replace_part(url, hostname=hostname, port=port) 513 | 514 | def __init__(self, default_port, joiner=lambda xs: next(iter(xs))): 515 | self.default_port = default_port 516 | super().__init__( 517 | ( 518 | self.get_hostinfo, 519 | self.set_hostinfo, 520 | ), 521 | joiner=joiner, 522 | ) 523 | 524 | 525 | class URLService(Service): 526 | """ 527 | A service specified by a set of URLs. 528 | 529 | This is a common 12 factor pattern and should be used instead of inheriting 530 | Service directly as much as possible. 531 | """ 532 | 533 | # These set respective attributes on all the URLs. 534 | allow_override = ('user', 'password', 'host', 'port', 'path') 535 | 536 | allow_override_list = ('urls',) 537 | 538 | def __init__(self, urls): 539 | self._urls = () 540 | self.urls = urls 541 | 542 | log_service_settings(LOGGER, self, 'urls') 543 | 544 | def url_string(self): 545 | """ 546 | All URLs joined as a string. 547 | """ 548 | return '|'.join(url.geturl() for url in self.urls) 549 | 550 | @property 551 | def urls(self): 552 | """ 553 | The array of the URLs the service can be accessed at. 554 | """ 555 | 556 | return self._urls 557 | 558 | @urls.setter 559 | def urls(self, urls): 560 | """ 561 | Set the URLs to access the service at. 562 | """ 563 | 564 | self._urls = tuple( 565 | urllib.parse.urlparse(url) if isinstance(url, str) else url 566 | for url in pipe_split(urls) 567 | ) 568 | 569 | user = URLDescriptor('username') 570 | password = URLDescriptor('password') 571 | host = hostname = URLMultiValueDescriptor('hostname') 572 | port = URLMultiValueDescriptor('port', default=None) 573 | path = URLDescriptor('path') 574 | 575 | 576 | ContainerInfo = namedtuple('ContainerInfo', ['host', 577 | 'port', 578 | 'data_dir', 579 | 'name', 580 | 'new']) 581 | 582 | 583 | def cache_directory(container_name): 584 | """ 585 | A directory to cache the container data in. 586 | """ 587 | 588 | return os.path.join(save_cache_path('forklift'), container_name) 589 | 590 | 591 | def container_name_for(image, application_id): 592 | """ 593 | Get a name for a service container based on image and application ID 594 | 595 | Parameters: 596 | image - image that the container is for 597 | application_id - application id that the container is for 598 | 599 | Return value: 600 | A string 601 | """ 602 | return image.replace('/', '_') + '__' + application_id 603 | 604 | 605 | def _ensure_container(image, 606 | port, 607 | application_id, 608 | data_dir=None, 609 | **kwargs): 610 | """ 611 | Ensure that a container for an application is running and wait for the port 612 | to be connectable. 613 | 614 | Parameters: 615 | image - the image to run a container from 616 | port - the port to forward from the container 617 | application_id - the application ID, for naming the container 618 | data_dir - the directory to persistently mount inside the container 619 | 620 | Return value: 621 | An object with the following attributes: 622 | port - the forwarded port number 623 | data_dir - if asked for, path for the persistently mounted 624 | directory inside the container 625 | name - the container name 626 | new - True/False to show if the container was created or not 627 | """ 628 | 629 | with docker.Client(version='auto') as docker_client: 630 | 631 | # TODO: better container name 632 | container_name = container_name_for(image, application_id) 633 | LOGGER.info("Ensuring container for '%s' is started with name '%s'", 634 | image, container_name) 635 | 636 | if data_dir is not None: 637 | cached_dir = cache_directory(container_name) 638 | else: 639 | cached_dir = None 640 | 641 | try: 642 | created, container_status = get_or_create_container( 643 | docker_client, 644 | container_name, 645 | image, 646 | port, 647 | data_dir, 648 | cached_dir, 649 | **kwargs 650 | ) 651 | 652 | if not container_status['State']['Running']: 653 | _start_container(docker_client, 654 | container_name, 655 | port, 656 | data_dir, 657 | cached_dir) 658 | 659 | host = docker_client.inspect_container( 660 | container_name)['NetworkSettings']['IPAddress'] 661 | host_port = docker_client.port(container_name, port)[0]['HostPort'] 662 | 663 | try: 664 | _wait_for_port(image, host_port) 665 | except: 666 | if created: 667 | LOGGER.debug("Could not connect to '%s' container, so " 668 | "destroying it", image) 669 | destroy_container(container_name) 670 | raise 671 | 672 | return ContainerInfo(host=host, 673 | port=port, 674 | data_dir=cached_dir, 675 | name=container_name, 676 | new=created) 677 | except requests.exceptions.ConnectionError: 678 | raise ProviderNotAvailable("Cannot connect to Docker daemon.") 679 | 680 | 681 | # pylint:disable=too-many-arguments 682 | def get_or_create_container(docker_client, 683 | container_name, 684 | image, 685 | port, 686 | data_dir=None, 687 | cached_dir=None, 688 | **kwargs): 689 | """ 690 | Get info for an existing container by name, or create a new one 691 | 692 | Parameters: 693 | docker_client - a docker.Client object for the Docker daemon 694 | container_name - name to check/start 695 | image - the image to run a container from 696 | port - the port to forward from the container 697 | data_dir - the directory to persistently mount inside the container 698 | cached_dir - the directory to mount from the host to data_dir 699 | 700 | Return value: 701 | A tuple of: 702 | - True if the container started as a result of this call 703 | - Output from Docker inspect 704 | """ 705 | try: 706 | return False, docker_client.inspect_container(container_name) 707 | except docker.errors.APIError: 708 | try: 709 | docker_client.inspect_image(image) 710 | except docker.errors.APIError: 711 | raise DockerImageRequired(image) 712 | 713 | if data_dir is not None: 714 | # Ensure the data volume is mounted 715 | kwargs.setdefault('volumes', {})[data_dir] = {} 716 | 717 | docker_client.create_container( 718 | image, 719 | name=container_name, 720 | ports={port: {}}, 721 | **kwargs 722 | ) 723 | container_status = docker_client.inspect_container(container_name) 724 | 725 | return True, container_status 726 | 727 | 728 | def destroy_container(container_name): 729 | """ 730 | Stop and remove a container by name 731 | """ 732 | cache_dir = cache_directory(container_name) 733 | with docker.Client(version='auto') as docker_client: 734 | docker_client.stop(container_name) 735 | docker_client.remove_container(container_name) 736 | 737 | try: 738 | rm_tree_root_owned(cache_dir) 739 | except subprocess.CalledProcessError: 740 | pass 741 | 742 | 743 | def _wait_for_port(image, port, retries=30): 744 | """ 745 | Wait for a port to become available, or raise ContainerRefusingConnection 746 | error 747 | 748 | Parameters: 749 | image - the image that the container is run from 750 | port - the port to wait for 751 | retries - number of times to retry before giving up 752 | """ 753 | LOGGER.debug("Waiting for '%s' port %s to be reachable", image, port) 754 | if not wait_for(lambda: port_open('127.0.0.1', port), retries=retries): 755 | raise ContainerRefusingConnection(image, port) 756 | 757 | 758 | def _start_container(docker_client, image, port, data_dir, cached_dir): 759 | """ 760 | Start a container, binding ports and data dirs 761 | 762 | Parameters: 763 | docker_client - client for the Docker API 764 | image - the image to run a container from 765 | port - the port to forward from the container 766 | data_dir - the directory to persistently mount inside the container 767 | cached_dir - the directory to mount from the host to data_dir 768 | """ 769 | LOGGER.info("Starting '%s' container", image) 770 | LOGGER.debug("Container port: %s", port) 771 | LOGGER.debug("Container data dir (in container): %s", data_dir) 772 | LOGGER.debug("Container cached dir (on host): %s", cached_dir) 773 | start_args = { 774 | 'port_bindings': {port: None}, 775 | } 776 | if data_dir is not None: 777 | start_args['binds'] = { 778 | cached_dir: data_dir, 779 | } 780 | docker_client.start(image, **start_args) 781 | 782 | 783 | def log_service_settings(logger, service, *attrs): 784 | """ 785 | Format and log a service settings. 786 | 787 | Parameters: 788 | logger - a logger object to log to 789 | service - the service object that the settings are for 790 | attrs - a list of attrs to get from the service. If the attr is 791 | callable, it will be called with no arguments. It may return 792 | just a value, or a tuple of a new attr name and a value 793 | """ 794 | if logger.isEnabledFor(logging.DEBUG): 795 | for attr in attrs: 796 | val = getattr(service, attr) 797 | if callable(val): 798 | val = val() 799 | 800 | logger.debug("%s %s: %s", service.__class__.__name__, attr, val) 801 | --------------------------------------------------------------------------------