├── changes_lxc_wrapper ├── __init__.py ├── cli │ ├── __init__.py │ ├── helper.py │ ├── manager.py │ └── wrapper.py ├── heartbeat.py ├── api.py ├── log_reporter.py ├── snapshot_cache.py └── container.py ├── .gitignore ├── support ├── bootstrap-vagrant.sh └── bootstrap-ubuntu.sh ├── setup.cfg ├── Vagrantfile ├── Makefile ├── tests ├── test_log_reporter.py ├── test_heartbeat.py ├── test_snapshot_cache.py └── cli │ └── test_wrapper.py ├── examples └── changes ├── setup.py ├── README.rst └── LICENSE /changes_lxc_wrapper/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .arcconfig 2 | .coverage 3 | build 4 | /dist/ 5 | /.vagrant/ 6 | /env/ 7 | *.deb 8 | *.egg-info/ 9 | *.pyc 10 | -------------------------------------------------------------------------------- /support/bootstrap-vagrant.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | cd /vagrant/ 4 | 5 | support/bootstrap-ubuntu.sh 6 | 7 | sudo pip3 install -e . 8 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [pytest] 2 | addopts=--tb=short 3 | norecursedirs=env htmlcov docs node_modules .* *.egg-info{args} 4 | 5 | [flake8] 6 | ignore = F999,E501,E128,E124,E126,F841,E123 7 | max-line-length = 100 8 | exclude = env,.svn,CVS,.bzr,.hg,.git,__pycache 9 | -------------------------------------------------------------------------------- /Vagrantfile: -------------------------------------------------------------------------------- 1 | # -*- mode: ruby -*- 2 | # vi: set ft=ruby : 3 | 4 | # Vagrantfile API/syntax version. Don't touch unless you know what you're doing! 5 | VAGRANTFILE_API_VERSION = "2" 6 | 7 | Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| 8 | config.vm.box = "ubuntu/trusty64" 9 | 10 | config.ssh.forward_agent = true 11 | 12 | config.vm.provider "virtualbox" do |v| 13 | v.memory = 1024 14 | v.cpus = 2 15 | end 16 | 17 | config.vm.provision :shell, :path => "support/bootstrap-vagrant.sh" 18 | end 19 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | PKG_NAME = changes-lxc-wrapper 2 | VERSION = 0.0.6 3 | REV=`git rev-list HEAD --count` 4 | 5 | deb: 6 | fpm -s python -t deb \ 7 | -n $(PKG_NAME) \ 8 | -v "$(VERSION)-$(REV)" \ 9 | -a all \ 10 | --python-bin python3 \ 11 | --python-package-name-prefix python3 \ 12 | -d "python3-setuptools" \ 13 | -d "python3" \ 14 | -d "python3-lxc" \ 15 | setup.py 16 | 17 | setup-test-env: 18 | virtualenv --python=`which python3` ./env --system-site-packages 19 | env/bin/pip3 install -e . 20 | env/bin/pip3 install "file://`pwd`#egg=changes-lxc-wrapper[tests]" 21 | 22 | test: 23 | env/bin/py.test 24 | 25 | .PHONY: deb 26 | -------------------------------------------------------------------------------- /support/bootstrap-ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | export DEBIAN_FRONTEND=noninteractive 4 | 5 | sudo apt-get install -y python-software-properties software-properties-common 6 | sudo add-apt-repository -y ppa:awstools-dev/awstools 7 | 8 | sudo apt-get update -y 9 | 10 | # Install basic Python support 11 | sudo apt-get install -y python3 python3-setuptools python3-pip python-virtualenv 12 | 13 | # Install aws cli tools 14 | sudo apt-get install -y awscli 15 | 16 | # Install git 17 | sudo apt-get install -y git 18 | 19 | # Install lxc 20 | sudo apt-get install -y libcgmanager0 lxc 21 | 22 | # Install fpm 23 | sudo apt-get install -y ruby-dev gcc 24 | sudo gem install fpm --no-ri --no-rdoc 25 | -------------------------------------------------------------------------------- /tests/test_log_reporter.py: -------------------------------------------------------------------------------- 1 | from mock import call, Mock 2 | from threading import Thread 3 | from uuid import uuid4 4 | 5 | from changes_lxc_wrapper.log_reporter import LogReporter 6 | 7 | 8 | def test_line_buffering(): 9 | mock_api = Mock() 10 | jobstep_id = uuid4() 11 | 12 | reporter = LogReporter(mock_api, jobstep_id) 13 | reporter_thread = Thread(target=reporter.process) 14 | reporter_thread.start() 15 | 16 | reporter.write('hello ') 17 | reporter.write('world\n') 18 | reporter.write('foo bar') 19 | 20 | reporter.close() 21 | reporter_thread.join() 22 | 23 | assert mock_api.mock_calls == [ 24 | call.append_log(jobstep_id, { 25 | 'text': 'hello world\n', 26 | 'source': 'console', 27 | }), 28 | call.append_log(jobstep_id, { 29 | 'text': 'foo bar', 30 | 'source': 'console', 31 | }), 32 | ] 33 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/heartbeat.py: -------------------------------------------------------------------------------- 1 | from threading import Condition, Event 2 | 3 | 4 | class Heartbeater(object): 5 | interval = 5 6 | 7 | def __init__(self, api, jobstep_id, interval=None): 8 | self.api = api 9 | self.jobstep_id = jobstep_id 10 | self.cv = Condition() 11 | self.finished = Event() 12 | 13 | if interval is not None: 14 | self.interval = interval 15 | 16 | def wait(self): 17 | with self.cv: 18 | self.finished.clear() 19 | while not self.finished.is_set(): 20 | data = self.api.get_jobstep(self.jobstep_id) 21 | if data['status']['id'] == 'finished': 22 | self.finished.set() 23 | break 24 | 25 | self.cv.wait(self.interval) 26 | 27 | def close(self): 28 | with self.cv: 29 | self.finished.set() 30 | self.cv.notifyAll() 31 | -------------------------------------------------------------------------------- /tests/test_heartbeat.py: -------------------------------------------------------------------------------- 1 | from mock import call, Mock 2 | from time import sleep 3 | from threading import Thread 4 | from uuid import uuid4 5 | 6 | from changes_lxc_wrapper.heartbeat import Heartbeater 7 | 8 | 9 | def test_simple(): 10 | mock_api = Mock() 11 | jobstep_id = uuid4() 12 | 13 | mock_api.get_jobstep.return_value = { 14 | 'status': {'id': 'in_progress'} 15 | } 16 | 17 | heartbeater = Heartbeater(mock_api, jobstep_id) 18 | heartbeat_thread = Thread(target=heartbeater.wait) 19 | heartbeat_thread.start() 20 | 21 | sleep(0.001) 22 | 23 | assert heartbeat_thread.is_alive() 24 | mock_api.get_jobstep.assert_called_once_with(jobstep_id) 25 | 26 | mock_api.get_jobstep.return_value = { 27 | 'status': {'id': 'finished'} 28 | } 29 | 30 | sleep(0.001) 31 | 32 | # XXX(dcramer): we really shouldnt call this internal API 33 | with heartbeater.cv: 34 | heartbeater.cv.notifyAll() 35 | 36 | sleep(0.001) 37 | 38 | assert mock_api.mock_calls == [ 39 | call.get_jobstep(jobstep_id), 40 | call.get_jobstep(jobstep_id), 41 | ] 42 | 43 | assert not heartbeat_thread.is_alive() 44 | 45 | heartbeater.close() 46 | heartbeat_thread.join() 47 | -------------------------------------------------------------------------------- /examples/changes: -------------------------------------------------------------------------------- 1 | #!/bin/bash -eux 2 | 3 | # This example makes some assumptions about the system (such as NPM being installed), which 4 | # internally at Dropbox are handled by build system puppet configurations (and a few helper) 5 | # scripts. 6 | 7 | # Your system should be configured with a base image as such, and then run with something like: 8 | # sudo ./changes-lxc-wrapper \ 9 | # --project=changes \ 10 | # --script=examples/changes 11 | 12 | echo `whoami` 13 | echo $PATH 14 | 15 | sudo apt-get install -y python-software-properties software-properties-common 16 | 17 | sudo add-apt-repository -y ppa:chris-lea/node.js 18 | sudo add-apt-repository -y ppa:git-core/ppa 19 | sudo apt-get update -y 20 | 21 | sudo apt-get install -y git nodejs build-essential python-setuptools redis-server postgresql python-dev libpq-dev libevent-dev libxml2-dev libxslt-dev 22 | 23 | if [ ! -e ./source/ ]; then 24 | git clone https://github.com/dropbox/changes.git ./source/ 25 | pushd source 26 | else 27 | pushd source 28 | git pull https://github.com/dropbox/changes.git master 29 | fi 30 | 31 | npm --version 32 | 33 | sudo npm install -g bower 34 | sudo easy_install -U pip 35 | sudo pip install virtualenv 36 | 37 | virtualenv --no-site-packages `pwd`/env 38 | 39 | export PATH=`pwd`/env/bin:$PATH 40 | 41 | sudo -u postgres createuser -s `whoami` --no-password || true 42 | sudo -u postgres createdb changes || true 43 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | """ 3 | changes-lxc-wrapper 4 | =================== 5 | 6 | :copyright: (c) 2014 Dropbox, Inc. 7 | """ 8 | 9 | from setuptools import setup, find_packages 10 | 11 | tests_require = [ 12 | 'coverage', 13 | 'mock>=1.0.1,<1.1.0', 14 | 'pytest>=2.6.1,<2.7.0', 15 | ] 16 | 17 | install_requires = [ 18 | 'raven>=5.0.0,<5.1.0', 19 | ] 20 | 21 | setup( 22 | name='changes-lxc-wrapper', 23 | version='0.1.0', 24 | author='Dropbox, Inc', 25 | description='', 26 | long_description=__doc__, 27 | packages=find_packages(), 28 | zip_safe=False, 29 | install_requires=install_requires, 30 | extras_require={'tests': tests_require}, 31 | tests_require=tests_require, 32 | entry_points={ 33 | 'console_scripts': [ 34 | 'changes-lxc = changes_lxc_wrapper.cli.helper:main', 35 | 'changes-lxc-wrapper = changes_lxc_wrapper.cli.wrapper:main', 36 | 'changes-snapshot-manager = changes_lxc_wrapper.cli.manager:main', 37 | ], 38 | }, 39 | include_package_data=True, 40 | classifiers=[ 41 | '__DO NOT UPLOAD__', 42 | 'Programming Language :: Python', 43 | 'Programming Language :: Python :: 3', 44 | 'Intended Audience :: Developers', 45 | 'Intended Audience :: System Administrators', 46 | 'Operating System :: OS Independent', 47 | 'Topic :: Software Development' 48 | ], 49 | ) 50 | -------------------------------------------------------------------------------- /tests/test_snapshot_cache.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | from mock import Mock 4 | from subprocess import check_call 5 | from uuid import UUID 6 | 7 | from changes_lxc_wrapper.snapshot_cache import SnapshotCache 8 | 9 | 10 | CACHE_PATH = '/tmp/changes-lxc-wrapper-snapshot-cache-test' 11 | 12 | 13 | def setup_dummy_cache(path): 14 | snapshot_1_id = '311a862b-dd15-4c44-90f1-fa95a7621860' 15 | snapshot_2_id = 'af986ceb-6640-4b69-b722-42df633ed0b7' 16 | 17 | check_call(['rm', '-rf', path]) 18 | check_call(['mkdir', '-p', '{}/ubuntu/precise/i386'.format(path)]) 19 | check_call(['mkdir', '-p', '{}/ubuntu/precise/i386/{}'.format(path, snapshot_1_id)]) 20 | check_call(['mkdir', '-p', '{}/ubuntu/precise/i386/{}'.format(path, snapshot_2_id)]) 21 | with open('{}/ubuntu/precise/i386/{}/foo'.format(path, snapshot_2_id), 'w') as fp: 22 | fp.write('12345') 23 | 24 | 25 | def test_simple(): 26 | mock_api = Mock() 27 | mock_api.list_snapshots.return_value = [] 28 | 29 | setup_dummy_cache(CACHE_PATH) 30 | 31 | cache = SnapshotCache(CACHE_PATH, mock_api) 32 | cache.initialize() 33 | 34 | cache.snapshots.sort(key=lambda x: x.id) 35 | assert len(cache.snapshots) == 2 36 | assert cache.snapshots[0].id == UUID('311a862b-dd15-4c44-90f1-fa95a7621860') 37 | assert cache.snapshots[0].path == '{}/ubuntu/precise/i386/311a862b-dd15-4c44-90f1-fa95a7621860'.format(CACHE_PATH) 38 | assert cache.snapshots[0].size == 0 39 | assert cache.snapshots[1].id == UUID('af986ceb-6640-4b69-b722-42df633ed0b7') 40 | assert cache.snapshots[1].path == '{}/ubuntu/precise/i386/af986ceb-6640-4b69-b722-42df633ed0b7'.format(CACHE_PATH) 41 | assert cache.snapshots[1].size == 5 42 | assert cache.total_size == 5 43 | 44 | cache.remove(cache.snapshots[1]) 45 | 46 | assert len(cache.snapshots) == 1 47 | assert cache.snapshots[0].id == UUID('311a862b-dd15-4c44-90f1-fa95a7621860') 48 | assert cache.total_size == 0 49 | 50 | assert not os.path.exists('{}/ubuntu/precise/i386/af986ceb-6640-4b69-b722-42df633ed0b7'.format(CACHE_PATH)) 51 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Changes LXC Wrapper 2 | ------------------- 3 | 4 | Handles automating launching containers for running Changes builds. 5 | 6 | Requirements 7 | ============ 8 | 9 | - LXC 1.0 10 | - AWS CLI Tools (for snapshot integration) 11 | 12 | Development 13 | =========== 14 | 15 | Provision the Vagrant VM: 16 | 17 | $ vagrant up --provision 18 | 19 | This will install various system dependencies as well as setting up a symlink 20 | for the ``changes-lxc-wrapper`` package. 21 | 22 | Run a Build 23 | =========== 24 | 25 | Provision and use ubuntu minimal install:: 26 | 27 | $ changes-lxc-wrapper 28 | 29 | .. note:: You will likely need to run these commands as root, and assuming you're 30 | passing AWS credentials via environment variables you'll want to run 31 | everything with `sudo -E`. 32 | 33 | Use a snapshot rather than bootstrapping a fresh container, add ``--snapshot``:: 34 | 35 | $ changes-lxc-wrapper \ 36 | --snapshot 65072990854348a1a80c94bb0b6089e5 37 | 38 | When running in production, you'll be passing two values which will automatically 39 | specify the project and snapshot for you:: 40 | 41 | $ changes-lxc-wrapper \ 42 | --api-url https://changes.example.com/api/0/ \ 43 | --jobstep-id 65072990854348a1a80c94bb0b6089e5 44 | 45 | 46 | Creating a snapshot 47 | =================== 48 | 49 | This will create a ``meta.tar.xz`` and a ``rootfs.tar.xz``:: 50 | 51 | $ changes-lxc-wrapper \ 52 | --snapshot 65072990854348a1a80c94bb0b6089e5 \ 53 | --save-snapshot \ 54 | --clean 55 | 56 | To rebuild the cached Ubuntu minimal install base rootfs, pass ``--flush-cache`` 57 | 58 | .. note:: You **must** use --clean if you're passing a --snapshot (explicit snapshot name) 59 | 60 | Run Command 61 | =========== 62 | 63 | Simply launch a container and run a command:: 64 | 65 | $ changes-lxc-wrapper \ 66 | -- echo "hello world" 67 | 68 | 69 | Running the Sample Build 70 | ======================== 71 | 72 | Assuming you're using the VM, login and jump into /vagrant/. Once there, you can run the following: 73 | 74 | $ sudo changes-lxc-wrapper --script examples/changes 75 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/api.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import json 3 | import time 4 | 5 | from urllib.error import URLError 6 | from urllib.parse import urlencode 7 | from urllib.request import urlopen 8 | 9 | 10 | class BuildCancelled(Exception): 11 | pass 12 | 13 | 14 | class ChangesApi(object): 15 | def __init__(self, base_url): 16 | self.base_url = base_url.rstrip('/') 17 | 18 | def request(self, path, data=None, max_retries=5): 19 | if isinstance(data, dict): 20 | data = urlencode(data).encode('utf-8') 21 | 22 | url = '{}/{}'.format(self.base_url, path.lstrip('/')) 23 | logging.info('Making request to %s', url) 24 | for retry_num in range(max_retries): 25 | try: 26 | fp = urlopen(url, data=data, timeout=5) 27 | 28 | body = fp.read().decode('utf-8') 29 | return json.loads(body) 30 | except URLError as e: 31 | code = getattr(e, 'code', None) 32 | if code == 404: 33 | # this suggests that a primary key is wrong, or the 34 | # base url is incorrect 35 | raise 36 | 37 | if code == 410: 38 | raise BuildCancelled 39 | 40 | if retry_num == max_retries - 1: 41 | print("==> Failed request to {}".format(path)) 42 | raise 43 | 44 | retry_delay = (retry_num + 1) ** 2 45 | print("==> API request failed ({}), retrying in {}s".format( 46 | code or e, retry_delay)) 47 | time.sleep(retry_delay) 48 | 49 | def update_jobstep(self, jobstep_id, data): 50 | return self.request('/jobsteps/{}/'.format(jobstep_id), data) 51 | 52 | def get_jobstep(self, jobstep_id): 53 | return self.request('/jobsteps/{}/'.format(jobstep_id)) 54 | 55 | def update_snapshot_image(self, snapshot_id, data): 56 | return self.request('/snapshotimages/{}/'.format(snapshot_id), data) 57 | 58 | def append_log(self, jobstep_id, data): 59 | return self.request('/jobsteps/{}/logappend/'.format(jobstep_id), data) 60 | 61 | def list_snapshots(self): 62 | return self.request('/snapshots/?state=valid&per_page=0') 63 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/log_reporter.py: -------------------------------------------------------------------------------- 1 | from collections import deque 2 | from functools import wraps 3 | from threading import Condition, Event, Lock 4 | 5 | 6 | def chunked(buffer, chunk_size=4096): 7 | """ 8 | Given a deque, chunk it up into ~chunk_size, but be aware of newline 9 | termination as an intended goal. 10 | """ 11 | result = '' 12 | while buffer: 13 | result += buffer.popleft() 14 | while '\n' in result: 15 | newline_pos = result.rfind('\n', 0, chunk_size) 16 | if newline_pos == -1: 17 | newline_pos = chunk_size 18 | else: 19 | newline_pos += 1 20 | yield result[:newline_pos] 21 | result = result[newline_pos:] 22 | 23 | if result: 24 | yield result 25 | 26 | 27 | def _locked(func): 28 | @wraps(func) 29 | def wrapped(self, *args, **kwargs): 30 | with self.lock: 31 | return func(self, *args, **kwargs) 32 | return wrapped 33 | 34 | 35 | class ThreadSafeDeque(deque): 36 | def __init__(self, *args, **kwargs): 37 | self.lock = Lock() 38 | super().__init__(*args, **kwargs) 39 | 40 | append = _locked(deque.append) 41 | appendleft = _locked(deque.appendleft) 42 | clear = _locked(deque.clear) 43 | extend = _locked(deque.extend) 44 | extendleft = _locked(deque.extendleft) 45 | pop = _locked(deque.pop) 46 | popleft = _locked(deque.popleft) 47 | remove = _locked(deque.remove) 48 | reverse = _locked(deque.reverse) 49 | rotate = _locked(deque.rotate) 50 | 51 | 52 | class LogReporter(object): 53 | source = 'console' 54 | 55 | def __init__(self, api, jobstep_id, source=None): 56 | self.api = api 57 | self.jobstep_id = jobstep_id 58 | if source is not None: 59 | self.source = source 60 | 61 | self.buffer = ThreadSafeDeque() 62 | self.done = Event() 63 | self.cv = Condition() 64 | 65 | def process(self): 66 | with self.cv: 67 | self.done.clear() 68 | while not self.done.is_set() or self.buffer: 69 | for chunk in chunked(self.buffer): 70 | self.api.append_log(self.jobstep_id, { 71 | 'text': chunk, 72 | 'source': self.source, 73 | }) 74 | if not self.done.is_set(): 75 | self.cv.wait(5) 76 | 77 | def write(self, chunk): 78 | with self.cv: 79 | self.buffer.append(chunk) 80 | self.cv.notifyAll() 81 | 82 | def close(self): 83 | with self.cv: 84 | self.done.set() 85 | self.cv.notifyAll() 86 | 87 | def flush(self): 88 | pass 89 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/snapshot_cache.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | import shutil 3 | 4 | from datetime import datetime 5 | from uuid import UUID 6 | 7 | 8 | DATETIME_FORMAT = "%Y-%m-%dT%H:%M:%S.%f" 9 | 10 | 11 | def get_directory_size(path): 12 | total_size = 0 13 | for dirpath, dirnames, filenames in os.walk(path): 14 | for f in filenames: 15 | fp = os.path.join(dirpath, f) 16 | total_size += os.path.getsize(fp) 17 | return total_size 18 | 19 | 20 | def convert_date(value): 21 | return datetime.strptime(value, DATETIME_FORMAT) 22 | 23 | 24 | class SnapshotImage(object): 25 | def __init__(self, id, path, date_created=None, is_active=None, 26 | is_valid=True, project=None): 27 | self.id = id 28 | self.path = path 29 | self.size = get_directory_size(path) 30 | self.date_created = date_created 31 | self.is_active = is_active 32 | self.is_valid = is_valid 33 | self.project = project 34 | 35 | 36 | class SnapshotCache(object): 37 | def __init__(self, root, api): 38 | self.api = api 39 | self.root = root 40 | self.snapshots = [] 41 | 42 | def initialize(self): 43 | print("==> Initializing snapshot cache") 44 | # find all valid snapshot paths 45 | path_list = self._collect_files(self.root) 46 | 47 | upstream_data = {} 48 | if path_list: 49 | # get upstream metadata 50 | print("==> Fetching upstream metadata") 51 | for snapshot in self.api.list_snapshots(): 52 | for image in snapshot['images']: 53 | upstream_data[UUID(image['id'])] = { 54 | 'project': UUID(snapshot['project']['id']), 55 | 'date_created': convert_date(snapshot['dateCreated']), 56 | 'is_active': snapshot['isActive'], 57 | } 58 | 59 | # collect size information for each path 60 | snapshot_list = [] 61 | for path in path_list: 62 | id_ = UUID(path.rsplit('/', 1)[-1]) 63 | path_data = upstream_data.get(id_, {}) 64 | snapshot_list.append(SnapshotImage( 65 | id=id_, 66 | path=path, 67 | is_active=path_data.get('is_active', False), 68 | date_created=path_data.get('date_created'), 69 | is_valid=bool(path_data), 70 | project=path_data.get('project'), 71 | )) 72 | 73 | self.snapshots = snapshot_list 74 | 75 | print("==> {} items found in cache ({} bytes)".format(len(self.snapshots), self.total_size)) 76 | 77 | @property 78 | def total_size(self): 79 | return sum(s.size for s in self.snapshots) 80 | 81 | def remove(self, snapshot, on_disk=True): 82 | assert not snapshot.is_active 83 | print("==> Removing snapshot: {}".format(snapshot.id)) 84 | if on_disk: 85 | shutil.rmtree(snapshot.path) 86 | self.snapshots.remove(snapshot) 87 | 88 | def _collect_files(self, root): 89 | # The root will consist of three subdirs, depicting the dist, release, 90 | # and arch. i.e. ubuntu/precise/amd64/ 91 | # We need to collect all children that are three levels deep 92 | if not os.path.exists(root): 93 | return [] 94 | 95 | def _r_collect_files(path, _stack=None, _depth=1): 96 | if _stack is None: 97 | _stack = [] 98 | for name in os.listdir(path): 99 | name_path = os.path.join(path, name) 100 | if not os.path.isdir(name_path): 101 | continue 102 | if _depth <= 3: 103 | _r_collect_files(name_path, _stack, _depth + 1) 104 | else: 105 | _stack.append(name_path) 106 | return _stack 107 | 108 | return _r_collect_files(root) 109 | -------------------------------------------------------------------------------- /tests/cli/test_wrapper.py: -------------------------------------------------------------------------------- 1 | import threading 2 | 3 | from mock import patch 4 | from uuid import uuid4 5 | 6 | from changes_lxc_wrapper.cli.wrapper import WrapperCommand 7 | 8 | 9 | def generate_jobstep_data(): 10 | # this must generic a *valid* dataset that should result in a full 11 | # run 12 | return { 13 | 'status': {'id': 'queued'}, 14 | 'data': {}, 15 | 'expectedSnapshot': None, 16 | 'snapshot': { 17 | 'id': 'a1028849e8cf4ff0a7d7fdfe3c4fe925', 18 | }, 19 | } 20 | 21 | 22 | def setup_function(function): 23 | assert threading.activeCount() == 1 24 | 25 | 26 | def teardown_function(function): 27 | assert threading.activeCount() == 1 28 | 29 | 30 | @patch.object(WrapperCommand, 'run_build_script') 31 | def test_local_run(mock_run): 32 | command = WrapperCommand([ 33 | '--', 'echo 1', 34 | ]) 35 | command.run() 36 | 37 | mock_run.assert_called_once_with( 38 | release='precise', 39 | post_launch=None, 40 | snapshot=None, 41 | save_snapshot=False, 42 | s3_bucket=None, 43 | pre_launch=None, 44 | validate=True, 45 | user='ubuntu', 46 | cmd=['echo 1'], 47 | script=None, 48 | flush_cache=False, 49 | clean=False, 50 | keep=False, 51 | ) 52 | 53 | 54 | @patch('changes_lxc_wrapper.cli.wrapper.ChangesApi') 55 | @patch.object(WrapperCommand, 'run_build_script') 56 | def test_remote_run(mock_run, mock_api_cls): 57 | jobstep_id = uuid4() 58 | 59 | jobstep_data = generate_jobstep_data() 60 | 61 | mock_api = mock_api_cls.return_value 62 | mock_api.get_jobstep.return_value = jobstep_data 63 | 64 | command = WrapperCommand([ 65 | '--jobstep-id', jobstep_id.hex, 66 | '--api-url', 'http://changes.example.com', 67 | ]) 68 | command.run() 69 | 70 | mock_run.assert_called_once_with( 71 | release='precise', 72 | post_launch=None, 73 | snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925', 74 | save_snapshot=False, 75 | s3_bucket=None, 76 | pre_launch=None, 77 | validate=True, 78 | user='ubuntu', 79 | cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex], 80 | flush_cache=False, 81 | clean=False, 82 | keep=False, 83 | ) 84 | 85 | 86 | @patch('changes_lxc_wrapper.cli.wrapper.ChangesApi') 87 | @patch.object(WrapperCommand, 'run_build_script') 88 | def test_already_finished_job(mock_run, mock_api_cls): 89 | jobstep_id = uuid4() 90 | 91 | jobstep_data = generate_jobstep_data() 92 | jobstep_data['status']['id'] = 'finished' 93 | 94 | mock_api = mock_api_cls.return_value 95 | mock_api.get_jobstep.return_value = jobstep_data 96 | 97 | command = WrapperCommand([ 98 | '--jobstep-id', jobstep_id.hex, 99 | '--api-url', 'http://changes.example.com', 100 | ]) 101 | command.run() 102 | 103 | assert not mock_run.called 104 | 105 | 106 | @patch('changes_lxc_wrapper.cli.wrapper.ChangesApi') 107 | @patch.object(WrapperCommand, 'run_build_script') 108 | def test_non_default_release(mock_run, mock_api_cls): 109 | jobstep_id = uuid4() 110 | 111 | jobstep_data = generate_jobstep_data() 112 | jobstep_data['data']['release'] = 'fakerelease' 113 | 114 | mock_api = mock_api_cls.return_value 115 | mock_api.get_jobstep.return_value = jobstep_data 116 | 117 | command = WrapperCommand([ 118 | '--jobstep-id', jobstep_id.hex, 119 | '--api-url', 'http://changes.example.com', 120 | ]) 121 | command.run() 122 | 123 | mock_run.assert_called_once_with( 124 | release='fakerelease', 125 | post_launch=None, 126 | snapshot='a1028849-e8cf-4ff0-a7d7-fdfe3c4fe925', 127 | save_snapshot=False, 128 | s3_bucket=None, 129 | pre_launch=None, 130 | validate=True, 131 | user='ubuntu', 132 | cmd=['changes-client', '--server', 'http://changes.example.com', '--jobstep_id', jobstep_id.hex], 133 | flush_cache=False, 134 | clean=False, 135 | keep=False, 136 | ) 137 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/cli/helper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import logging 5 | 6 | from raven.handlers.logging import SentryHandler 7 | from uuid import UUID 8 | 9 | from ..container import Container 10 | 11 | 12 | DESCRIPTION = "LXC helper for running Changes jobs" 13 | 14 | DEFAULT_RELEASE = 'precise' 15 | 16 | DEFAULT_USER = 'ubuntu' 17 | 18 | 19 | class CommandError(Exception): 20 | pass 21 | 22 | 23 | class HelperCommand(object): 24 | def __init__(self, argv=None): 25 | self.argv = argv 26 | 27 | def get_arg_parser(self): 28 | parser = argparse.ArgumentParser(description=DESCRIPTION) 29 | parser.add_argument('--log-level', default='WARN') 30 | 31 | subparsers = parser.add_subparsers(dest='command') 32 | launch_parser = subparsers.add_parser('launch', help='Launch a new container') 33 | launch_parser.add_argument( 34 | 'name', nargs='?', type=str, 35 | help="Container name") 36 | launch_parser.add_argument( 37 | '--snapshot', '-s', type=UUID, 38 | help="Snapshot ID of the container") 39 | launch_parser.add_argument( 40 | '--release', '-r', default=DEFAULT_RELEASE, 41 | help="Release") 42 | launch_parser.add_argument( 43 | '--no-validate', action='store_false', default=True, dest='validate', 44 | help="Don't validate downloaded images") 45 | launch_parser.add_argument( 46 | '--clean', action='store_true', default=False, 47 | help="Use a fresh container from Ubuntu minimal install") 48 | launch_parser.add_argument( 49 | '--flush-cache', action='store_true', default=False, 50 | help="Rebuild Ubuntu minimal install cache") 51 | launch_parser.add_argument( 52 | '--s3-bucket', 53 | help="S3 Bucket to store/fetch images from") 54 | launch_parser.add_argument( 55 | '--pre-launch', 56 | help="Command to run before container is launched") 57 | launch_parser.add_argument( 58 | '--post-launch', 59 | help="Command to run after container is launched") 60 | 61 | exec_parser = subparsers.add_parser('exec', help='Execute a command within a container') 62 | exec_parser.add_argument( 63 | '--user', '-u', default=DEFAULT_USER, 64 | help="User to run command as") 65 | exec_parser.add_argument( 66 | '--cwd', 67 | help="Working directory for command") 68 | exec_parser.add_argument( 69 | 'name', nargs='?', type=str, 70 | help="Container name") 71 | exec_parser.add_argument( 72 | 'cmd', nargs=argparse.REMAINDER, 73 | help="Command to run inside the container") 74 | 75 | exec_script_parser = subparsers.add_parser('exec-script', help='Execute a command within a container') 76 | exec_script_parser.add_argument( 77 | '--user', '-u', default=DEFAULT_USER, 78 | help="User to run command as") 79 | exec_script_parser.add_argument( 80 | '--cwd', 81 | help="Working directory for command") 82 | exec_script_parser.add_argument( 83 | 'name', nargs='?', type=str, 84 | help="Container name") 85 | exec_script_parser.add_argument( 86 | 'path', nargs=argparse.REMAINDER, 87 | help="Local script to run inside the container") 88 | 89 | destroy_parser = subparsers.add_parser('destroy', help='Destroy a running container') 90 | destroy_parser.add_argument( 91 | 'name', nargs='?', type=str, 92 | help="Container name") 93 | 94 | return parser 95 | 96 | def configure_logging(self, level): 97 | logging.basicConfig(level=level) 98 | 99 | root = logging.getLogger() 100 | root.addHandler(SentryHandler()) 101 | 102 | def run(self): 103 | parser = self.get_arg_parser() 104 | args = parser.parse_args(self.argv) 105 | 106 | try: 107 | args.cmd.remove('--') 108 | except (AttributeError, ValueError): 109 | pass 110 | 111 | self.configure_logging(args.log_level) 112 | 113 | if args.command == 'launch': 114 | self.run_launch(**vars(args)) 115 | elif args.command == 'exec': 116 | self.run_exec(**vars(args)) 117 | elif args.command == 'exec-script': 118 | self.run_exec_script(**vars(args)) 119 | elif args.command == 'destroy': 120 | self.run_destroy(**vars(args)) 121 | 122 | def run_launch(self, name, snapshot=None, release=DEFAULT_RELEASE, 123 | validate=True, s3_bucket=None, clean=False, 124 | flush_cache=False, pre_launch=None, post_launch=None, 125 | **kwargs): 126 | 127 | container = Container( 128 | name=name, 129 | snapshot=snapshot, 130 | release=release, 131 | validate=validate, 132 | s3_bucket=s3_bucket, 133 | ) 134 | 135 | container.launch( 136 | pre=pre_launch, 137 | post=post_launch, 138 | clean=clean, 139 | flush_cache=flush_cache, 140 | ) 141 | print("==> Instance successfully launched as {}".format(name)) 142 | 143 | def run_exec(self, name, cmd, user=DEFAULT_USER, cwd=None, **kwargs): 144 | container = Container( 145 | name=name, 146 | ) 147 | 148 | container.run(cmd, user=user, cwd=cwd) 149 | 150 | def run_exec_script(self, name, path, user=DEFAULT_USER, cwd=None, **kwargs): 151 | container = Container( 152 | name=name, 153 | ) 154 | 155 | container.run_script(' '.join(path), user=user, cwd=cwd) 156 | 157 | def run_destroy(self, name, **kwargs): 158 | container = Container( 159 | name=name, 160 | ) 161 | 162 | container.destroy() 163 | 164 | 165 | def main(): 166 | command = HelperCommand() 167 | command.run() 168 | 169 | 170 | if __name__ == '__main__': 171 | main() 172 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/cli/manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import re 5 | 6 | from collections import defaultdict, namedtuple 7 | from datetime import datetime, timedelta 8 | 9 | from ..api import ChangesApi 10 | from ..container import SNAPSHOT_CACHE 11 | from ..snapshot_cache import SnapshotCache 12 | 13 | DESCRIPTION = "LXC snapshot manager" 14 | 15 | SnapshotInfo = namedtuple('SnapshotInfo', ['id', 'path', 'size']) 16 | 17 | 18 | def parse_size_value(value): 19 | value = value.lower() 20 | match = re.match(r'(\d+)(gb|g|mb|m|kb|k|b)?', value) 21 | if not match: 22 | raise ValueError('Unable to parse size value') 23 | 24 | number = int(match.group(1)) 25 | key = match.group(2) 26 | 27 | if key in ('gb', 'g'): 28 | return number / 1024 / 1024 / 1024 29 | elif key in ('mb', 'm'): 30 | return number / 1024 / 1024 31 | elif key in ('kb', 'k'): 32 | return number / 1024 33 | return number 34 | 35 | 36 | def parse_ttl_date(value): 37 | return datetime.utcnow() - timedelta(seconds=int(value)) 38 | 39 | 40 | def format_size_value(value): 41 | if value > 1024 * 1024 * 1024: 42 | return '{}GB'.format(value // 1024 // 1024 // 1024) 43 | if value > 1024 * 1024: 44 | return '{}MB'.format(value // 1024 // 1024) 45 | if value > 1024: 46 | return '{}KB'.format(value // 1024) 47 | return '{}B'.format(value) 48 | 49 | 50 | class ManagerCommand(object): 51 | """ 52 | Bound image cache to: 53 | 54 | - ttl 55 | - max-disk usage 56 | - max-disk per class 57 | 58 | Treat it as a semi-LRU: 59 | 60 | - always keep 'active' snapshots 61 | - clear out ttl'd snapshots first 62 | - next find projects exceeding max-disk per class and clear out any up 63 | to the active 64 | - finally sort remainder by size and clear out biggest first 65 | """ 66 | def __init__(self, argv=None): 67 | self.argv = argv 68 | 69 | def get_arg_parser(self): 70 | parser = argparse.ArgumentParser(description=DESCRIPTION) 71 | parser.add_argument('--cache-path', default=SNAPSHOT_CACHE) 72 | parser.add_argument('--api-url', required=True, 73 | help="API URL to Changes (i.e. https://changes.example.com/api/0/)") 74 | 75 | subparsers = parser.add_subparsers(dest='command') 76 | cleanup_parser = subparsers.add_parser('cleanup', help='Clean up the local snapshot cache') 77 | cleanup_parser.add_argument('--max-disk', required=True, type=parse_size_value) 78 | cleanup_parser.add_argument('--max-disk-per-class', type=parse_size_value) 79 | cleanup_parser.add_argument('--ttl', type=parse_ttl_date) 80 | cleanup_parser.add_argument('--dry-run', action='store_true', default=False) 81 | 82 | subparsers.add_parser('list', help='List the status of local snapshots') 83 | 84 | return parser 85 | 86 | def run(self): 87 | parser = self.get_arg_parser() 88 | args = parser.parse_args(self.argv) 89 | 90 | api = ChangesApi(args.api_url) 91 | cache = SnapshotCache(args.cache_path, api) 92 | cache.initialize() 93 | 94 | if args.command == 'cleanup': 95 | self.run_cleanup(cache, args) 96 | 97 | elif args.command == 'list': 98 | self.run_list(cache, args) 99 | 100 | def run_list(self, cache, args): 101 | print('-' * 80) 102 | template = '{id:41} {size:5} {is_valid:5} {project:10} {date}' 103 | print(template.format( 104 | id='ID', 105 | size='Size', 106 | is_valid='Valid', 107 | project='Project', 108 | date='Date', 109 | )) 110 | print('-' * 80) 111 | for snapshot in cache.snapshots: 112 | print(template.format( 113 | id=str(snapshot.id) if not snapshot.is_active else '* {}'.format(snapshot.id), 114 | size=format_size_value(snapshot.size), 115 | is_valid='T' if snapshot.is_valid else 'F', 116 | project=str(snapshot.project or 'n/a'), 117 | date=snapshot.date_created.date() if snapshot.date_created else 'n/a', 118 | )) 119 | 120 | def run_cleanup(self, cache, args): 121 | 122 | wipe_on_disk = not args.dry_run 123 | 124 | if not wipe_on_disk: 125 | print("==> DRY RUN: Not removing files on disk") 126 | 127 | # find snapshot data within Changes 128 | snapshots_by_class = defaultdict(list) 129 | used_space_by_class = defaultdict(int) 130 | 131 | def get_sort_value(snapshot): 132 | if snapshot.date_created: 133 | return int(snapshot.date_created.strftime('%s')) 134 | else: 135 | return 0 136 | 137 | for snapshot in sorted(cache.snapshots, key=get_sort_value): 138 | # this snapshot is unknown or has been invalidated 139 | if snapshot.is_active: 140 | continue 141 | 142 | if not snapshot.is_valid: 143 | cache.remove(snapshot, wipe_on_disk) 144 | continue 145 | 146 | # check ttl to see if we can safely remove it 147 | elif args.ttl and snapshot.date_created < args.ttl: 148 | cache.remove(snapshot, wipe_on_disk) 149 | continue 150 | 151 | # add size to class pool for later determination 152 | used_space_by_class[snapshot.project] += snapshot.size 153 | snapshots_by_class[snapshot.project].append(snapshot) 154 | 155 | if args.max_disk_per_class: 156 | for project_id, class_size in used_space_by_class.items(): 157 | # keep removing old snapshots until we're under the threshold 158 | while class_size > args.max_disk_per_class: 159 | snapshot = snapshots_by_class.pop(0) 160 | if snapshot.is_active: 161 | continue 162 | cache.remove(snapshot, wipe_on_disk) 163 | class_size -= snapshot.size 164 | 165 | # finally, ensure we're under our disk threshold or remove snapshots 166 | # based on their size 167 | # TODO(dcramer): we could optimize this to more evenly remove snapshots 168 | snapshot_size_iter = iter(sorted( 169 | cache.snapshots, key=lambda x: x.size, reverse=True)) 170 | 171 | while cache.total_size > args.max_disk: 172 | try: 173 | snapshot = next(snapshot_size_iter) 174 | except StopIteration: 175 | break 176 | if snapshot.is_active: 177 | continue 178 | cache.remove(snapshot, wipe_on_disk) 179 | 180 | 181 | def main(): 182 | command = ManagerCommand() 183 | command.run() 184 | 185 | 186 | if __name__ == '__main__': 187 | main() 188 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/container.py: -------------------------------------------------------------------------------- 1 | import lxc 2 | import os 3 | import shutil 4 | import socket 5 | import subprocess 6 | 7 | from time import time 8 | from uuid import uuid4 9 | 10 | SNAPSHOT_CACHE = '/var/cache/lxc/download' 11 | 12 | 13 | class Container(lxc.Container): 14 | def __init__(self, name, release=None, snapshot=None, validate=True, 15 | s3_bucket=None, *args, **kwargs): 16 | self.snapshot = snapshot 17 | self.release = release 18 | self.s3_bucket = s3_bucket 19 | 20 | # This will be the hostname inside the container 21 | self.utsname = snapshot or str(uuid4()) 22 | 23 | self.validate = validate 24 | 25 | # Randomize container name to prevent clobbering 26 | super().__init__(name, *args, **kwargs) 27 | 28 | @property 29 | def rootfs(self): 30 | """ May be real path or overlayfs:base-dir:delta-dir """ 31 | return self.get_config_item('lxc.rootfs').split(':')[-1] 32 | 33 | def get_home_dir(self, user): 34 | return '/root' if user == 'root' else '/home/{}'.format(user) 35 | 36 | def get_image_path(self, snapshot): 37 | return "{dist}/{release}/{arch}/{snapshot}".format( 38 | dist='ubuntu', 39 | arch='amd64', 40 | release=self.release, 41 | snapshot=snapshot, 42 | ) 43 | 44 | def ensure_image_cached(self, snapshot): 45 | """ 46 | To avoid complexity of having a sort-of public host, and to ensure we 47 | can just instead easily store images on S3 (or similar) we attempt to 48 | sync images in a similar fashion to the LXC image downloader. This means 49 | that when we attempt to run the image, the download will look for our 50 | existing cache (that we've correctly populated) and just reference the 51 | image from there. 52 | """ 53 | path = self.get_image_path(snapshot) 54 | 55 | local_path = "/var/cache/lxc/download/{}".format(path) 56 | # list of files required to avoid network hit 57 | file_list = [ 58 | 'rootfs.tar.xz', 59 | 'config', 60 | 'snapshot_id', 61 | ] 62 | if all(os.path.exists(os.path.join(local_path, f)) for f in file_list): 63 | return 64 | 65 | assert self.s3_bucket, 'Missing S3 bucket configuration' 66 | 67 | if not os.path.exists(local_path): 68 | os.makedirs(local_path) 69 | 70 | remote_path = "s3://{}/{}".format(self.s3_bucket, path) 71 | 72 | print("==> Downloading image {}".format(snapshot)) 73 | start = time() 74 | assert not subprocess.call( 75 | ["aws", "s3", "sync", remote_path, local_path], 76 | env=os.environ.copy(), 77 | ), "Failed to download image {}".format(remote_path) 78 | stop = time() 79 | print("==> Image {} downloaded in {}s".format( 80 | snapshot, int((stop - start) * 100) / 100)) 81 | 82 | def upload_image(self, snapshot): 83 | assert self.s3_bucket, 'Missing S3 bucket configuration' 84 | 85 | path = self.get_image_path(snapshot) 86 | local_path = "{}/{}".format(SNAPSHOT_CACHE, path) 87 | remote_path = "s3://{}/{}".format(self.s3_bucket, path) 88 | 89 | start = time() 90 | print("==> Uploading image {}".format(snapshot)) 91 | assert not subprocess.call( 92 | ["aws", "s3", "sync", local_path, remote_path], 93 | env=os.environ.copy(), 94 | ), "Failed to upload image {}".format(remote_path) 95 | stop = time() 96 | print("==> Image {} uploaded in {}s".format( 97 | snapshot, int((stop - start) * 100) / 100)) 98 | 99 | def run_script(self, script_path, **kwargs): 100 | """ 101 | Runs a local script within the container. 102 | """ 103 | assert os.path.isfile(script_path), "Cannot find local script {}".format(script_path) 104 | new_name = os.path.join("tmp", "script-{}".format(uuid4().hex)) 105 | print("==> Writing local script {} as /{}".format(script_path, new_name)) 106 | shutil.copy(script_path, os.path.join(self.rootfs, new_name)) 107 | script_path = '/' + new_name 108 | assert self.run(['chmod', '0755', script_path], quiet=True) == 0 109 | assert self.run([script_path], **kwargs) == 0 110 | 111 | def run(self, cmd, cwd=None, env=None, user='root', quiet=False): 112 | assert self.running, "Cannot run cmd in non-RUNNING container" 113 | 114 | home_dir = self.get_home_dir(user) 115 | if cwd is None: 116 | cwd = home_dir 117 | else: 118 | cwd = '/' 119 | 120 | def run(args): 121 | cmd, cwd, env = args 122 | 123 | new_env = { 124 | # TODO(dcramer): HOME is pretty hacky here 125 | 'USER': user, 126 | 'HOME': home_dir, 127 | 'PWD': cwd, 128 | 'DEBIAN_FRONTEND': 'noninteractive', 129 | 'LXC_NAME': self.name, 130 | 'HOST_HOSTNAME': socket.gethostname(), 131 | 'PATH': '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin', 132 | } 133 | if env: 134 | new_env.update(env) 135 | 136 | if user != 'root': 137 | cmd = ['sudo', '-EHu', user] + cmd 138 | 139 | return subprocess.call(cmd, cwd=cwd, env=new_env) 140 | 141 | if not quiet: 142 | print("==> Running: {}".format(cmd)) 143 | 144 | ret_code = self.attach_wait(run, (cmd, cwd, env), env_policy=lxc.LXC_ATTACH_CLEAR_ENV) 145 | 146 | if not quiet: 147 | print("==> Command exited: {}".format(ret_code)) 148 | 149 | return ret_code 150 | 151 | def install(self, pkgs): 152 | assert self.run(["apt-get", "update", "-y", "--fix-missing"]) == 0, \ 153 | "Failed updating apt resources" 154 | return self.run(["apt-get", "install", "-y", "--force-yes"] + pkgs) 155 | 156 | def setup_sudoers(self, user='ubuntu'): 157 | sudoers_path = os.path.join(self.rootfs, 'etc', 'sudoers') 158 | 159 | with open(sudoers_path, 'w') as fp: 160 | fp.write('Defaults env_reset\n') 161 | fp.write('Defaults !requiretty\n\n') 162 | fp.write('# Allow all sudoers.\n') 163 | fp.write('ALL ALL=(ALL) NOPASSWD:ALL'.format(user)) 164 | 165 | subprocess.call(['chmod', '0440', sudoers_path]) 166 | 167 | return True 168 | 169 | def launch(self, pre=None, post=None, clean=False, flush_cache=False): 170 | """ Launch a container 171 | 172 | If we have a snapshot, attempt to download and extract the image to clone. 173 | Without a snapshot, generate a container from ubuntu minimal install. 174 | """ 175 | 176 | if self.snapshot and not clean: 177 | if self.snapshot not in lxc.list_containers(): 178 | self.ensure_image_cached(snapshot=self.snapshot) 179 | 180 | create_args = [ 181 | '--dist', 'ubuntu', 182 | '--release', self.release, 183 | '--arch', 'amd64', 184 | '--variant', self.snapshot, 185 | ] 186 | if not self.validate: 187 | create_args.extend(['--no-validate']) 188 | 189 | base = lxc.Container(self.snapshot) 190 | assert base.create('download', args=create_args), ( 191 | "Failed to load cached image: {}".format(self.snapshot)) 192 | else: 193 | base = lxc.Container(self.snapshot) 194 | 195 | print("==> Overlaying container: {}".format(self.snapshot)) 196 | assert base.clone(self.name, flags=lxc.LXC_CLONE_KEEPNAME | lxc.LXC_CLONE_SNAPSHOT), ( 197 | "Failed to clone: {}".format(self.snapshot)) 198 | assert self.load_config(), "Unable to reload container config" 199 | else: 200 | create_args = [ 201 | '--release', self.release, 202 | '--arch', 'amd64', 203 | ] 204 | if flush_cache: 205 | create_args.extend(['--flush-cache']) 206 | 207 | print("==> Creating container") 208 | assert self.create('ubuntu', args=create_args), \ 209 | "Failed to create container. Try running this command as root." 210 | 211 | if pre: 212 | pre_env = dict(os.environ, LXC_ROOTFS=self.rootfs, LXC_NAME=self.name) 213 | subprocess.check_call(pre, cwd=self.rootfs, env=pre_env) 214 | 215 | # XXX: More or less disable apparmor 216 | assert self.set_config_item("lxc.aa_profile", "unconfined") 217 | # Allow loop/squashfs in container 218 | assert self.append_config_item('lxc.cgroup.devices.allow', 'c 10:137 rwm') 219 | assert self.append_config_item('lxc.cgroup.devices.allow', 'b 6:* rwm') 220 | 221 | print("==> Starting container") 222 | assert self.start(), "Failed to start base container" 223 | 224 | print("==> Waiting for container to startup networking") 225 | assert self.get_ips(family='inet', timeout=30), "Failed to connect to container" 226 | 227 | print("==> Install ca-certificates") 228 | assert self.install(["ca-certificates"]) == 0 229 | 230 | print("==> Setting up sudoers") 231 | assert self.setup_sudoers(), "Failed to setup sudoers" 232 | 233 | if post: 234 | # Naively check if trying to run a file that exists outside the container 235 | self.run_script(post) 236 | 237 | def create_image(self): 238 | snapshot = self.snapshot or str(uuid4()) 239 | dest = "/var/cache/lxc/download/{}".format( 240 | self.get_image_path(snapshot)) 241 | 242 | print("==> Stopping container") 243 | self.stop() 244 | 245 | assert self.wait('STOPPED', timeout=30) 246 | 247 | print("==> Saving snapshot to {}".format(dest)) 248 | if not os.path.exists(dest): 249 | os.makedirs(dest) 250 | 251 | print("==> Creating metadata") 252 | with open(os.path.join(dest, "config"), "w") as fp: 253 | fp.write("lxc.include = LXC_TEMPLATE_CONFIG/ubuntu.common.conf\n") 254 | fp.write("lxc.arch = x86_64\n") 255 | 256 | rootfs_txz = os.path.join(dest, "rootfs.tar.xz") 257 | 258 | print("==> Creating rootfs.tar.xz") 259 | subprocess.check_call(["tar", "-Jcf", rootfs_txz, 260 | "-C", self.get_config_item('lxc.rootfs'), 261 | "."]) 262 | 263 | with open(os.path.join(dest, "snapshot_id"), 'w') as fp: 264 | fp.write(self.utsname) 265 | 266 | return snapshot 267 | 268 | def destroy(self, timeout=-1): 269 | if not self.defined: 270 | print("==> No container to destroy") 271 | return 272 | 273 | if self.running: 274 | print("==> Container is running, stop it first") 275 | self.stop() 276 | print("==> Wait for container to stop") 277 | self.wait('STOPPED', timeout=timeout) 278 | 279 | print("==> Destroying container") 280 | super().destroy() 281 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright 2014 Dropbox, Inc. 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /changes_lxc_wrapper/cli/wrapper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import logging 5 | import sys 6 | import traceback 7 | 8 | from raven.handlers.logging import SentryHandler 9 | from threading import Thread 10 | from time import sleep 11 | from uuid import UUID, uuid4 12 | 13 | from ..api import ChangesApi 14 | from ..container import Container 15 | from ..log_reporter import LogReporter 16 | 17 | 18 | DESCRIPTION = "LXC Wrapper for running Changes jobs" 19 | 20 | DEFAULT_RELEASE = 'precise' 21 | 22 | 23 | class CommandError(Exception): 24 | pass 25 | 26 | 27 | class WrappedOutput(object): 28 | def __init__(self, stream, reporter): 29 | self.stream = stream 30 | self.reporter = reporter 31 | 32 | def write(self, chunk): 33 | self.stream.write(chunk) 34 | self.reporter.write(chunk) 35 | 36 | def flush(self): 37 | self.stream.flush() 38 | self.reporter.flush() 39 | 40 | 41 | class WrapperCommand(object): 42 | def __init__(self, argv=None): 43 | self.argv = argv 44 | self.stdout = sys.stdout 45 | self.stderr = sys.stderr 46 | 47 | def get_arg_parser(self): 48 | parser = argparse.ArgumentParser(description=DESCRIPTION) 49 | parser.add_argument('--snapshot', '-s', type=UUID, 50 | help="Snapshot ID of the container") 51 | parser.add_argument('--release', '-r', 52 | help="Ubuntu release (default: {})".format(DEFAULT_RELEASE)) 53 | parser.add_argument('--keep', action='store_true', default=False, 54 | help="Don't destroy the container after running cmd/build") 55 | parser.add_argument('--no-validate', action='store_false', default=True, dest='validate', 56 | help="Don't validate downloaded images") 57 | parser.add_argument('--save-snapshot', action='store_true', default=False, 58 | help="Create an image from this container") 59 | parser.add_argument('--clean', action='store_true', default=False, 60 | help="Use a fresh container from Ubuntu minimal install") 61 | parser.add_argument('--flush-cache', action='store_true', default=False, 62 | help="Rebuild Ubuntu minimal install cache") 63 | parser.add_argument('--api-url', 64 | help="API URL to Changes (i.e. https://changes.example.com/api/0/)") 65 | parser.add_argument('--jobstep-id', 66 | help="Jobstep ID for Changes") 67 | parser.add_argument('--pre-launch', 68 | help="Command to run before container is launched") 69 | parser.add_argument('--post-launch', 70 | help="Command to run after container is launched") 71 | parser.add_argument('--user', '-u', default='ubuntu', 72 | help="User to run command (or script) as") 73 | parser.add_argument('--script', 74 | help="Script to execute as command") 75 | parser.add_argument('--s3-bucket', 76 | help="S3 Bucket to store/fetch images from") 77 | parser.add_argument('--log-level', default='WARN') 78 | parser.add_argument('cmd', nargs=argparse.REMAINDER, 79 | help="Command to run inside the container") 80 | return parser 81 | 82 | def configure_logging(self, level): 83 | logging.basicConfig(level=level) 84 | 85 | root = logging.getLogger() 86 | root.addHandler(SentryHandler()) 87 | 88 | def patch_system_logging(self, reporter): 89 | sys.stdout = WrappedOutput(sys.stdout, reporter) 90 | sys.stderr = WrappedOutput(sys.stderr, reporter) 91 | 92 | def run(self): 93 | parser = self.get_arg_parser() 94 | args = parser.parse_args(self.argv) 95 | 96 | try: 97 | args.cmd.remove('--') 98 | except ValueError: 99 | pass 100 | 101 | self.configure_logging(args.log_level) 102 | 103 | if args.jobstep_id: 104 | return self.run_remote(args) 105 | return self.run_local(args) 106 | 107 | def run_local(self, args): 108 | """ 109 | Run a local-only build (i.e. for testing). 110 | """ 111 | snapshot = str(args.snapshot) if args.snapshot else None 112 | release = args.release or DEFAULT_RELEASE 113 | 114 | self.run_build_script( 115 | snapshot=snapshot, 116 | release=release, 117 | validate=args.validate, 118 | s3_bucket=args.s3_bucket, 119 | pre_launch=args.pre_launch, 120 | post_launch=args.post_launch, 121 | clean=args.clean, 122 | flush_cache=args.flush_cache, 123 | save_snapshot=args.save_snapshot, 124 | user=args.user, 125 | cmd=args.cmd, 126 | script=args.script, 127 | keep=args.keep, 128 | ) 129 | 130 | def run_remote(self, args): 131 | """ 132 | Run a build script from upstream (Changes), pulling any required 133 | information from the remote server, as well as pushing up status 134 | changes and log information. 135 | """ 136 | if not args.api_url: 137 | raise CommandError('jobstep_id passed, but missing api_url') 138 | 139 | # we wrap the actual run routine to make it easier to catch 140 | # top level exceptions and report them via the log 141 | def inner_run(api, jobstep_id): 142 | try: 143 | # fetch build information to set defaults for things like snapshot 144 | # TODO(dcramer): make this support a small amount of downtime 145 | # TODO(dcramer): make this verify the snapshot 146 | resp = api.get_jobstep(jobstep_id) 147 | if resp['status']['id'] == 'finished': 148 | raise Exception('JobStep already marked as finished, aborting.') 149 | 150 | release = resp['data'].get('release') or DEFAULT_RELEASE 151 | 152 | # If we're expected a snapshot output we need to override 153 | # any snapshot parameters, and also ensure we're creating a clean 154 | # image 155 | if resp['expectedSnapshot']: 156 | snapshot = str(UUID(resp['expectedSnapshot']['id'])) 157 | save_snapshot = True 158 | clean = True 159 | 160 | else: 161 | if resp['snapshot']: 162 | snapshot = str(UUID(resp['snapshot']['id'])) 163 | else: 164 | snapshot = None 165 | save_snapshot = False 166 | clean = False 167 | 168 | api.update_jobstep(jobstep_id, {"status": "in_progress"}) 169 | 170 | cmd = [ 171 | 'changes-client', 172 | '--server', args.api_url, 173 | '--jobstep_id', jobstep_id, 174 | ] 175 | 176 | self.run_build_script( 177 | snapshot=snapshot, 178 | release=release, 179 | validate=args.validate, 180 | s3_bucket=args.s3_bucket, 181 | pre_launch=args.pre_launch, 182 | post_launch=args.post_launch, 183 | clean=clean, 184 | flush_cache=args.flush_cache, 185 | save_snapshot=save_snapshot, 186 | user=args.user, 187 | cmd=cmd, 188 | keep=args.keep, 189 | ) 190 | 191 | except Exception: 192 | reporter.write(traceback.format_exc()) 193 | 194 | api.update_jobstep(jobstep_id, {"status": "finished", "result": "failed"}) 195 | if args.save_snapshot: 196 | api.update_snapshot_image(snapshot, {"status": "failed"}) 197 | 198 | raise 199 | 200 | else: 201 | api.update_jobstep(jobstep_id, {"status": "finished"}) 202 | if args.save_snapshot: 203 | api.update_snapshot_image(snapshot, {"status": "active"}) 204 | 205 | api = ChangesApi(args.api_url) 206 | jobstep_id = args.jobstep_id 207 | 208 | reporter = LogReporter(api, jobstep_id) 209 | reporter_thread = Thread(target=reporter.process) 210 | reporter_thread.start() 211 | self.patch_system_logging(reporter) 212 | 213 | run_thread = Thread(target=inner_run, args=[api, jobstep_id]) 214 | run_thread.daemon = True 215 | run_thread.start() 216 | while run_thread.is_alive(): 217 | try: 218 | run_thread.join(10) 219 | except Exception: 220 | reporter.write(traceback.format_exc()) 221 | break 222 | sleep(1) 223 | 224 | if run_thread.is_alive(): 225 | reporter.write('==> Signal received from upstream, terminating.\n') 226 | # give it a second chance in case there was a race between the heartbeat 227 | # and the builder 228 | run_thread.join(5) 229 | 230 | reporter.close() 231 | 232 | reporter_thread.join(60) 233 | 234 | def run_build_script(self, snapshot, release, validate, s3_bucket, pre_launch, 235 | post_launch, clean, flush_cache, save_snapshot, 236 | user, cmd=None, script=None, keep=False): 237 | """ 238 | Run the given build script inside of the LXC container. 239 | """ 240 | assert clean or not (save_snapshot and snapshot), \ 241 | "You cannot create a snapshot from an existing snapshot" 242 | 243 | assert not (cmd and script), \ 244 | 'Only one of cmd or script can be specified' 245 | 246 | assert cmd or script, \ 247 | 'Missing build command' 248 | 249 | container = Container( 250 | name=str(uuid4()), 251 | snapshot=snapshot, 252 | release=release, 253 | validate=validate, 254 | s3_bucket=s3_bucket, 255 | ) 256 | 257 | try: 258 | container.launch(pre_launch, post_launch, clean, flush_cache) 259 | 260 | # TODO(dcramer): we should assert only one type of command arg is set 261 | if cmd: 262 | container.run(cmd, user=user) 263 | elif script: 264 | container.run_script(script, user=user) 265 | 266 | if save_snapshot or not keep: 267 | container.stop() 268 | 269 | if save_snapshot: 270 | snapshot = container.create_image() 271 | print("==> Snapshot saved: {}".format(snapshot)) 272 | if s3_bucket: 273 | container.upload_image(snapshot=snapshot) 274 | except Exception as e: 275 | logging.exception(e) 276 | raise e 277 | finally: 278 | if not keep: 279 | container.destroy() 280 | else: 281 | print("==> Container kept at {}".format(container.rootfs)) 282 | print("==> SSH available via:") 283 | print("==> $ sudo lxc-attach --name={}".format(container.name)) 284 | 285 | 286 | def main(): 287 | command = WrapperCommand() 288 | command.run() 289 | 290 | 291 | if __name__ == '__main__': 292 | main() 293 | --------------------------------------------------------------------------------