├── riker ├── __init__.py ├── version.py ├── utils.py ├── git_helpers.py ├── retry.py ├── config.py ├── main.py ├── boto_helpers.py └── api.py ├── MANIFEST.in ├── .gitignore ├── requirements.txt ├── Makefile ├── setup.py ├── README.rst └── LICENSE.txt /riker/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /riker/version.py: -------------------------------------------------------------------------------- 1 | VERSION = '0.2.4' 2 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include *.txt *.rst 2 | 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | /riker.egg-info 4 | /dist 5 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | boto==2.31.1 2 | docopt==0.6.2 3 | Fabric==1.10.1 4 | giturlparse.py==0.0.5 5 | pybars==0.0.4 6 | tld==0.6.4 7 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | clean: 2 | -rm -Rf dist 3 | -rm -Rf build 4 | -rm -Rf riker.egg-info 5 | 6 | .PHONY: clean 7 | 8 | dist: 9 | python setup.py sdist 10 | 11 | .PHONY: dist 12 | 13 | upload: 14 | python setup.py sdist upload 15 | 16 | .PHONY: upload 17 | -------------------------------------------------------------------------------- /riker/utils.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | 3 | class TimeoutError(Exception): 4 | def __init__(self, x): 5 | super(TimeoutError, self).__init__("Operation took longer than %s minutes" % x) 6 | 7 | def poll_for_condition(fetch_callable, condition_callable, timeout=600, poll_delay=10): 8 | done = False 9 | max_iters = int(timeout / float(poll_delay)) 10 | for i in xrange(max_iters): 11 | fetched = fetch_callable() 12 | done = condition_callable(fetched) 13 | if done: 14 | break 15 | sleep(poll_delay) 16 | if not done: 17 | raise TimeoutError(timeout / 60.0) 18 | 19 | header = '-----> ' 20 | normal = ' ' 21 | 22 | def log(level, message, show_header=False): 23 | print((header if show_header else normal) + message) 24 | 25 | def first(iterable): 26 | for x in iterable: 27 | return x 28 | return None 29 | -------------------------------------------------------------------------------- /riker/git_helpers.py: -------------------------------------------------------------------------------- 1 | from os.path import isdir, join 2 | 3 | from fabric.api import local 4 | from fabric.context_managers import settings 5 | 6 | from retry import synchronize 7 | 8 | def push_repo(remote_name='origin', branch_name=None, local_branch_name=None, auto_confirm=False, force=False): 9 | if branch_name is None: 10 | branch_name = 'master' 11 | with settings(warn_only=True): 12 | if local('git branch | grep {}'.format(local_branch_name if local_branch_name is not None else branch_name)).return_code != 0: 13 | local_branch_name = 'HEAD' 14 | full_branch_name = branch_name if local_branch_name is None else '%s:%s' % (local_branch_name, branch_name) 15 | options = ' ' 16 | if force: 17 | options += '-f' 18 | local('git push%s %s %s' % (options, remote_name, full_branch_name)) 19 | 20 | def clone_repo(remote_url, repo_dir, local_branch=None): 21 | branch_arg = "-b {}".format(local_branch) if local_branch is not None else '' 22 | local('git clone %s %s %s' % (branch_arg, remote_url, repo_dir)) 23 | 24 | @synchronize('ensure_remote.lock') 25 | def ensure_remote(remote_name, remote_url): 26 | remotes = [l.strip() for l in local('git remote', capture=True).split("\n")] 27 | if remote_name in remotes: 28 | local('git remote remove %s' % (remote_name,)) 29 | local('git remote add %s %s' % (remote_name, remote_url)) 30 | 31 | def get_head_commit_sha1(): 32 | return local('git rev-parse HEAD', capture=True).strip() 33 | 34 | def is_repo(repo_path): 35 | return isdir(join(repo_path, '.git')) 36 | 37 | class NotGitRepoError(Exception): 38 | pass 39 | 40 | def ensure_is_repo(repo_path): 41 | if not is_repo(repo_path): 42 | raise NotGitRepoError() 43 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | import os 2 | from setuptools import setup 3 | 4 | from riker.version import VERSION 5 | 6 | with open(os.path.join(os.path.dirname(__file__), 'README.rst')) as readme: 7 | README = readme.read() 8 | 9 | # allow setup.py to be run from any path 10 | os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir))) 11 | 12 | setup( 13 | name='riker', 14 | version=VERSION, 15 | packages=['riker'], 16 | install_requires=[ 17 | "boto == 2.31.1", 18 | "docopt == 0.6.2", 19 | "Fabric == 1.10.1", 20 | "giturlparse.py == 0.0.5", 21 | "pybars==0.0.4", 22 | "tld==0.6.4" 23 | ], 24 | entry_points={ 25 | 'console_scripts': [ 26 | 'riker = riker.main:main', 27 | ] 28 | }, 29 | author='Jimmy Schementi', 30 | author_email='jimmy@schementi.com', 31 | url='https://github.com/jschementi/riker', 32 | description='Deploy any application to AWS', 33 | long_description=README, 34 | keywords='aws deploy paas scale', 35 | classifiers=[ 36 | 'Development Status :: 4 - Beta', 37 | 'Environment :: Console', 38 | 'Intended Audience :: Developers', 39 | 'Intended Audience :: Information Technology', 40 | 'Intended Audience :: System Administrators', 41 | 'License :: OSI Approved :: GNU General Public License v2 (GPLv2)', 42 | 'Natural Language :: English', 43 | 'Operating System :: POSIX :: Linux', 44 | 'Operating System :: MacOS :: MacOS X', 45 | 'Programming Language :: Python :: 2.7', 46 | 'Topic :: System :: Installation/Setup', 47 | 'Topic :: System :: Software Distribution', 48 | 'Topic :: System :: Systems Administration', 49 | 'Topic :: Utilities' 50 | ] 51 | ) 52 | -------------------------------------------------------------------------------- /riker/retry.py: -------------------------------------------------------------------------------- 1 | import time 2 | from functools import wraps 3 | from os.path import exists as local_exists, join 4 | 5 | from fabric.api import run, local 6 | from fabric.contrib.files import exists 7 | 8 | import config 9 | 10 | def retry(tries=10, wait=0.5, on_none=False, on_empty=False, retry_message="."): 11 | def deco_retry(f): 12 | @wraps(f) 13 | def f_retry(*args, **kwargs): 14 | i = 0 15 | while i < tries - 1: 16 | try: 17 | result = f(*args, **kwargs) 18 | if on_none and result is None: 19 | raise Exception() 20 | if on_empty and hasattr(result, '__len__') and len(result) == 0: 21 | raise Exception() 22 | return result 23 | except: 24 | i += 1 25 | print retry_message 26 | time.sleep(wait) 27 | return f(*args, **kwargs) 28 | return f_retry 29 | return deco_retry 30 | 31 | def synchronize(lock_file_path, is_remote=False): 32 | lock_file_path = join(config.directory, lock_file_path) 33 | run_fn = run if is_remote else local 34 | exists_fn = exists if is_remote else local_exists 35 | def deco_sync(f): 36 | @wraps(f) 37 | def f_sync(*args, **kwargs): 38 | while True: 39 | if not exists_fn(lock_file_path): 40 | try: 41 | run_fn('mkdir -p $(dirname {0}) && touch {0}'.format(lock_file_path)) 42 | return f(*args, **kwargs) 43 | finally: 44 | run_fn('rm -f {}'.format(lock_file_path)) 45 | time.sleep(5) 46 | return f_sync 47 | return deco_sync 48 | 49 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | ===== 2 | Riker 3 | ===== 4 | 5 | *You're the captain, and Riker is your "Number One"* 6 | 7 | 8 | Heroku-like application deployments to Amazon Web Services. 9 | 10 | 11 | Install 12 | ------- 13 | 14 | :: 15 | 16 | pip install riker 17 | 18 | 19 | Configure 20 | --------- 21 | 22 | :: 23 | 24 | riker config 25 | 26 | 27 | Usage 28 | ----- 29 | 30 | Deploy a sample app to AWS with a single command: 31 | 32 | :: 33 | 34 | # Get Python sample app 35 | git clone git@github.com:heroku/python-sample.git 36 | cd python-sample 37 | 38 | riker deploy 39 | riker open 40 | 41 | This will launch an EC2 instance running the python-sample app, and open it in 42 | your default web browser. 43 | 44 | The first time this is run in your AWS account, it will take some time, as it 45 | needs to provision a base AMI which all EC2 instances will be launched from. 46 | Subsequent deploys to the same app will be very quick, and new application 47 | deployments will only need to wait for a new EC2 instance to boot. 48 | 49 | Since Riker uses Heroku Buildpacks, the app can be written in any language. 50 | 51 | 52 | You can also deploy a static website to S3 with the same command: 53 | 54 | :: 55 | 56 | # Generate simple website 57 | mkdir static-website && cd static-website 58 | echo "Hello, World" > index.html 59 | touch .s3 # indicates deployment to Amazon S3 60 | git init && git add -A && git commit -m "Initial commit" 61 | 62 | riker deploy 63 | riker open 64 | 65 | 66 | The ``.s3`` file indicates that this app should be deployed to S3. 67 | 68 | Riker also supports a production deploy mode, which ensures zero-downtime for 69 | the application being deployed, and a configuration which supports auto-scaling. 70 | Usually, Riker will deploy changes directly to existing instances. However, for 71 | a production deployment, Riker will deploy changes to new instances, and only 72 | swap old instances out for new instances when the new instances become healthy, 73 | and the old instances no longer have active connections. 74 | 75 | :: 76 | 77 | riker deploy --scale 78 | 79 | 80 | This will deploy the app behind a load-balancer and auto-scaling group. 81 | 82 | 83 | Contributing 84 | ------------ 85 | 86 | Please report bugs, suggest features, and ask questions on GitHub: 87 | https://github.com/jschementi/riker/issues 88 | 89 | Pull requests welcome! 90 | https://github.com/jschementi/riker/pulls 91 | 92 | 93 | Additional Resources 94 | -------------------- 95 | 96 | - `Riker - Heroku-like app deployments for AWS `_ 97 | - `BrooklynJS talk: Get Your Infrastructure Right `_ 98 | -------------------------------------------------------------------------------- /riker/config.py: -------------------------------------------------------------------------------- 1 | import boto 2 | import getpass 3 | import json 4 | import uuid 5 | import os 6 | from os import path 7 | 8 | import boto_helpers 9 | 10 | directory = path.expanduser('~/.riker') 11 | 12 | def default_config(): 13 | return { 14 | "instance_user": "ubuntu", 15 | "deploy_user": "dokku", 16 | "deploy_remote_name": "riker-deploy", 17 | "os_image_id": "ami-864d84ee", 18 | "instance_type": "t2.micro", 19 | "instance_key_pair_name": "riker", 20 | "base_instance_name": "riker-base-instance-v1", 21 | "security_groups": { 22 | "riker-instance": [ 23 | ["tcp", "22", "22", "0.0.0.0/0", None], 24 | ["tcp", "80", "80", "0.0.0.0/0", None] 25 | ], 26 | "riker-load-balancer": [ 27 | ["tcp", "80", "80", "0.0.0.0/0", None], 28 | ["tcp", "443", "443", "0.0.0.0/0", None] 29 | ] 30 | }, 31 | "vpc_id": None, 32 | "subnet_id": None, 33 | "availability_zone": None, 34 | "system_name": None, 35 | } 36 | 37 | def load_config(show_output=False): 38 | config_filename = path.join(directory, 'config') 39 | aws_dirty = False 40 | if not boto.config.has_option('Credentials', 'aws_access_key_id'): 41 | aws_access_key_id = getpass.getpass('AWS Access Key ID: ') 42 | boto.config.save_user_option('Credentials', 'aws_access_key_id', aws_access_key_id) 43 | aws_dirty = True 44 | if not boto.config.has_option('Credentials', 'aws_secret_access_key'): 45 | aws_secret_access_key = getpass.getpass('AWS Secret Access Key: ') 46 | boto.config.save_user_option('Credentials', 'aws_secret_access_key', aws_secret_access_key) 47 | aws_dirty = True 48 | if aws_dirty: 49 | print "-----> AWS configuration written to {}".format(boto.pyami.config.UserConfigPath) 50 | else: 51 | if show_output: print "-----> AWS configuration unchanged, see `{}`".format(boto.pyami.config.UserConfigPath) 52 | vpc = boto.connect_vpc() 53 | try: 54 | with open(config_filename, 'r') as config_file: 55 | config = json.loads(config_file.read()) 56 | except IOError: 57 | config = default_config() 58 | dirty = False 59 | vpc_id = config.get('vpc_id') 60 | if not vpc_id: 61 | vpc_id = raw_input("AWS VPC ID (choose: {}): ".format(', '.join([v.id for v in vpc.get_all_vpcs()]))) 62 | config['vpc_id'] = vpc_id 63 | dirty = True 64 | subnet_id = config.get('subnet_id') 65 | if not subnet_id: 66 | def format_subnet(s): 67 | return '{}({})'.format(s.id, s.availability_zone) 68 | possible_subnets = vpc.get_all_subnets(filters=[('vpcId', vpc_id)]) 69 | subnet_id = raw_input("AWS VPC Subnet ID (choose: {}): ".format(', '.join(map(format_subnet, possible_subnets)))) 70 | config['subnet_id'] = subnet_id 71 | dirty = True 72 | subnet = vpc.get_all_subnets(subnet_ids=[subnet_id])[0] 73 | if config['availability_zone'] != subnet.availability_zone: 74 | config['availability_zone'] = subnet.availability_zone 75 | dirty = True 76 | system_name = config.get('system_name') 77 | if not system_name: 78 | config['system_name'] = raw_input("Name of your \"system\" (press return for a uuid): ") or uuid.uuid1().hex 79 | dirty = True 80 | if dirty: 81 | config_filename = path.join(directory, 'config') 82 | if not path.exists(directory): 83 | os.makedirs(directory) 84 | with open(config_filename, 'w') as config_file: 85 | config_file.write(json.dumps(config, indent=2, separators=(',', ': '))) 86 | print "-----> Riker configuration written to {}".format(config_filename) 87 | else: 88 | if show_output: print "-----> Riker configuration unchanged, see `{}`".format(config_filename) 89 | def create_sgrs(memo, kvp): 90 | name = kvp[0] 91 | rules = kvp[1] 92 | sgrs = [boto_helpers.SecurityGroupRule(*rule) for rule in rules] 93 | memo[name] = sgrs 94 | return memo 95 | config['security_groups'] = reduce(create_sgrs, config['security_groups'].iteritems(), {}) 96 | return config 97 | -------------------------------------------------------------------------------- /riker/main.py: -------------------------------------------------------------------------------- 1 | """Heroku-like deployments with AWS 2 | 3 | Usage: 4 | riker deploy [--app ] [--env ] [--scale] [--static --domain ] [--force] 5 | riker create-new-ami [--app ] [--env ] 6 | riker deploy-ami [--app ] [--env ] 7 | riker update-config [--app ] [--env ] 8 | riker info [--app ] [--env ] 9 | riker ssh [--instance-id ] 10 | riker dokku --instance-id ... 11 | riker open [--app ] [--env ] 12 | riker url [--app ] [--env ] 13 | riker config 14 | riker (-h | --help) 15 | riker --version 16 | 17 | Options: 18 | -a , --app Name of app. 19 | -e , --env Environment for app. 20 | -s, --static Deploy to S3. 21 | -d , --domain Domain for app. 22 | --instance-id EC2 Instance ID. 23 | --scale Enable scalable deployments. 24 | -f, --force Force deployment. 25 | -h --help Show this screen. 26 | --version Show version. 27 | """ 28 | 29 | import os 30 | 31 | from fabric.network import disconnect_all 32 | 33 | from docopt import docopt 34 | 35 | import api 36 | 37 | from version import VERSION 38 | 39 | def main(): 40 | arguments = docopt(__doc__, version='riker {}'.format(VERSION)) 41 | try: 42 | if arguments.get('create-new-ami') == True: 43 | create_new_ami(arguments) 44 | elif arguments.get('deploy-ami') == True: 45 | deploy_ami(arguments) 46 | elif arguments.get('deploy') == True: 47 | deploy(arguments) 48 | elif arguments.get('update-config') == True: 49 | update_config(arguments) 50 | elif arguments.get('info') == True: 51 | get_info(arguments) 52 | elif arguments.get('ssh') == True: 53 | ssh(arguments) 54 | elif arguments.get('dokku') == True: 55 | dokku(arguments) 56 | elif arguments.get('open') == True: 57 | open_url(arguments) 58 | elif arguments.get('url') == True: 59 | get_url(arguments) 60 | elif arguments.get('config') == True: 61 | config(arguments) 62 | finally: 63 | disconnect_all() 64 | 65 | def deploy(arguments): 66 | if arguments.get('--scale') == True: 67 | create_new_ami(arguments) 68 | deploy_ami(arguments) 69 | elif api.is_static(arguments): 70 | deploy_static(arguments) 71 | else: 72 | deploy_to_single_instance(arguments) 73 | 74 | def create_new_ami(arguments): 75 | api.initialize_configuration() 76 | api.create_app_ami(arguments['--app'], arguments['--env']) 77 | 78 | def deploy_ami(arguments): 79 | api.initialize_configuration() 80 | api.deploy_latest_app_ami(arguments['--app'], arguments['--env']) 81 | 82 | def update_config(arguments): 83 | api.initialize_configuration() 84 | api.deploy_config_update(arguments['--app'], arguments['--env']) 85 | deploy_ami(arguments) 86 | 87 | def deploy_static(arguments): 88 | api.initialize_configuration() 89 | domain = arguments.get('--domain') 90 | force = arguments.get('--force') 91 | api.deploy_static(arguments['--app'], arguments['--env'], domain, force) 92 | 93 | def deploy_to_single_instance(arguments): 94 | api.initialize_configuration() 95 | api.deploy_to_single_instance(arguments['--app'], arguments['--env']) 96 | 97 | def get_info(arguments): 98 | api.initialize_configuration() 99 | api.get_info(arguments['--app'], arguments['--env']) 100 | 101 | def ssh(arguments): 102 | api.initialize_configuration() 103 | api.do_ssh(arguments['--instance-id']) 104 | 105 | def dokku(arguments): 106 | api.initialize_configuration() 107 | cmd = ' '.join(arguments.get('', '')) 108 | api.dokku(arguments['--instance-id'], cmd) 109 | 110 | def open_url(arguments): 111 | api.initialize_configuration() 112 | api.open_url(arguments['--app'], arguments['--env']) 113 | 114 | def get_url(arguments): 115 | api.initialize_configuration() 116 | url = api.get_url(arguments['--app'], arguments['--env']) 117 | if url: print url 118 | 119 | def config(arguments): 120 | api.initialize_configuration(show_output=True) 121 | 122 | if __name__ == '__main__': 123 | main() 124 | -------------------------------------------------------------------------------- /riker/boto_helpers.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import os 3 | import os.path 4 | import utils 5 | import fabric.api 6 | 7 | def get_or_create_key_pair(conn, name, get_pem_filename): 8 | utils.log('info', 'Ensuring key pair exists', show_header=True) 9 | key_pair = conn.get_key_pair(name) 10 | if key_pair is None: 11 | utils.log('info', 'Not found: creating') 12 | key_pair = create_key_pair(conn, name, get_pem_filename) 13 | return key_pair 14 | 15 | def create_key_pair(conn, name, get_pem_filename): 16 | key_pair = conn.create_key_pair(name) 17 | write_private_key_to_pem_file(key_pair, get_pem_filename) 18 | return key_pair 19 | 20 | def write_private_key_to_pem_file(key_pair, get_pem_filename): 21 | filename = get_pem_filename(key_pair.name) 22 | if os.path.isfile(filename): 23 | raise RuntimeError('%s already exists' % filename) 24 | with open(filename, 'w') as f: 25 | utils.log('info', 'writing private key to %s' % filename) 26 | f.write(key_pair.material) 27 | os.chmod(filename, 0600) 28 | fabric.api.local('ssh-add %s' % (filename,)) 29 | 30 | def get_security_group(c, group_name): 31 | groups = [g for g in c.get_all_security_groups() if g.name == group_name] 32 | return groups[0] if groups else None 33 | 34 | ############################################################################## 35 | # Based off of steder/aws_sg_recipe.py: https://gist.github.com/steder/1498451 36 | ############################################################################## 37 | 38 | SecurityGroupRule = collections.namedtuple("SecurityGroupRule", ["ip_protocol", "from_port", "to_port", "cidr_ip", "src_group_name"]) 39 | 40 | def ensure_security_groups(conn, security_groups, vpc_id): 41 | groups = [] 42 | for group_name, rules in security_groups: 43 | group = get_or_create_security_group(conn, group_name, vpc_id=vpc_id) 44 | update_security_group(conn, group, rules) 45 | groups.append(group) 46 | return groups 47 | 48 | def get_or_create_security_group(c, group_name, description="", vpc_id=None): 49 | """ 50 | """ 51 | groups = [g for g in c.get_all_security_groups() if g.name == group_name] 52 | group = groups[0] if groups else None 53 | if not group: 54 | print "-----> Creating group '%s'..."%(group_name,) 55 | group = c.create_security_group(group_name, "A group for %s"%(group_name,), vpc_id) 56 | return group 57 | 58 | 59 | def modify_sg(c, group, rule, authorize=False, revoke=False): 60 | src_group = None 61 | if rule.src_group_name: 62 | src_group = c.get_all_security_groups([rule.src_group_name,])[0] 63 | 64 | if authorize and not revoke: 65 | print " Authorizing missing rule %s..."%(rule,) 66 | group.authorize(ip_protocol=rule.ip_protocol, 67 | from_port=rule.from_port, 68 | to_port=rule.to_port, 69 | cidr_ip=rule.cidr_ip, 70 | src_group=src_group) 71 | elif not authorize and revoke: 72 | print " Revoking unexpected rule %s..."%(rule,) 73 | group.revoke(ip_protocol=rule.ip_protocol, 74 | from_port=rule.from_port, 75 | to_port=rule.to_port, 76 | cidr_ip=rule.cidr_ip, 77 | src_group=src_group) 78 | 79 | 80 | def authorize(c, group, rule): 81 | """Authorize `rule` on `group`.""" 82 | return modify_sg(c, group, rule, authorize=True) 83 | 84 | 85 | def revoke(c, group, rule): 86 | """Revoke `rule` on `group`.""" 87 | return modify_sg(c, group, rule, revoke=True) 88 | 89 | 90 | def update_security_group(c, group, expected_rules): 91 | """ 92 | """ 93 | print '-----> Updating group "%s"...'%(group.name,) 94 | #import pprint 95 | #print "Expected Rules:" 96 | #pprint.pprint(expected_rules) 97 | 98 | current_rules = [] 99 | for rule in group.rules: 100 | if not rule.grants[0].cidr_ip: 101 | current_rule = SecurityGroupRule(rule.ip_protocol, 102 | rule.from_port, 103 | rule.to_port, 104 | "0.0.0.0/0", 105 | rule.grants[0].name) 106 | else: 107 | current_rule = SecurityGroupRule(rule.ip_protocol, 108 | rule.from_port, 109 | rule.to_port, 110 | rule.grants[0].cidr_ip, 111 | None) 112 | 113 | if current_rule not in expected_rules: 114 | revoke(c, group, current_rule) 115 | else: 116 | current_rules.append(current_rule) 117 | 118 | #print "Current Rules:" 119 | #pprint.pprint(current_rules) 120 | 121 | for rule in expected_rules: 122 | if rule not in current_rules: 123 | authorize(c, group, rule) 124 | 125 | ############################################################################## 126 | 127 | -------------------------------------------------------------------------------- /LICENSE.txt: -------------------------------------------------------------------------------- 1 | GNU GENERAL PUBLIC LICENSE 2 | Version 2, June 1991 3 | 4 | Copyright (C) 1989, 1991 Free Software Foundation, Inc., 5 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 6 | Everyone is permitted to copy and distribute verbatim copies 7 | of this license document, but changing it is not allowed. 8 | 9 | Preamble 10 | 11 | The licenses for most software are designed to take away your 12 | freedom to share and change it. By contrast, the GNU General Public 13 | License is intended to guarantee your freedom to share and change free 14 | software--to make sure the software is free for all its users. This 15 | General Public License applies to most of the Free Software 16 | Foundation's software and to any other program whose authors commit to 17 | using it. (Some other Free Software Foundation software is covered by 18 | the GNU Lesser General Public License instead.) You can apply it to 19 | your programs, too. 20 | 21 | When we speak of free software, we are referring to freedom, not 22 | price. Our General Public Licenses are designed to make sure that you 23 | have the freedom to distribute copies of free software (and charge for 24 | this service if you wish), that you receive source code or can get it 25 | if you want it, that you can change the software or use pieces of it 26 | in new free programs; and that you know you can do these things. 27 | 28 | To protect your rights, we need to make restrictions that forbid 29 | anyone to deny you these rights or to ask you to surrender the rights. 30 | These restrictions translate to certain responsibilities for you if you 31 | distribute copies of the software, or if you modify it. 32 | 33 | For example, if you distribute copies of such a program, whether 34 | gratis or for a fee, you must give the recipients all the rights that 35 | you have. You must make sure that they, too, receive or can get the 36 | source code. And you must show them these terms so they know their 37 | rights. 38 | 39 | We protect your rights with two steps: (1) copyright the software, and 40 | (2) offer you this license which gives you legal permission to copy, 41 | distribute and/or modify the software. 42 | 43 | Also, for each author's protection and ours, we want to make certain 44 | that everyone understands that there is no warranty for this free 45 | software. If the software is modified by someone else and passed on, we 46 | want its recipients to know that what they have is not the original, so 47 | that any problems introduced by others will not reflect on the original 48 | authors' reputations. 49 | 50 | Finally, any free program is threatened constantly by software 51 | patents. We wish to avoid the danger that redistributors of a free 52 | program will individually obtain patent licenses, in effect making the 53 | program proprietary. To prevent this, we have made it clear that any 54 | patent must be licensed for everyone's free use or not licensed at all. 55 | 56 | The precise terms and conditions for copying, distribution and 57 | modification follow. 58 | 59 | GNU GENERAL PUBLIC LICENSE 60 | TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 61 | 62 | 0. This License applies to any program or other work which contains 63 | a notice placed by the copyright holder saying it may be distributed 64 | under the terms of this General Public License. The "Program", below, 65 | refers to any such program or work, and a "work based on the Program" 66 | means either the Program or any derivative work under copyright law: 67 | that is to say, a work containing the Program or a portion of it, 68 | either verbatim or with modifications and/or translated into another 69 | language. (Hereinafter, translation is included without limitation in 70 | the term "modification".) Each licensee is addressed as "you". 71 | 72 | Activities other than copying, distribution and modification are not 73 | covered by this License; they are outside its scope. The act of 74 | running the Program is not restricted, and the output from the Program 75 | is covered only if its contents constitute a work based on the 76 | Program (independent of having been made by running the Program). 77 | Whether that is true depends on what the Program does. 78 | 79 | 1. You may copy and distribute verbatim copies of the Program's 80 | source code as you receive it, in any medium, provided that you 81 | conspicuously and appropriately publish on each copy an appropriate 82 | copyright notice and disclaimer of warranty; keep intact all the 83 | notices that refer to this License and to the absence of any warranty; 84 | and give any other recipients of the Program a copy of this License 85 | along with the Program. 86 | 87 | You may charge a fee for the physical act of transferring a copy, and 88 | you may at your option offer warranty protection in exchange for a fee. 89 | 90 | 2. You may modify your copy or copies of the Program or any portion 91 | of it, thus forming a work based on the Program, and copy and 92 | distribute such modifications or work under the terms of Section 1 93 | above, provided that you also meet all of these conditions: 94 | 95 | a) You must cause the modified files to carry prominent notices 96 | stating that you changed the files and the date of any change. 97 | 98 | b) You must cause any work that you distribute or publish, that in 99 | whole or in part contains or is derived from the Program or any 100 | part thereof, to be licensed as a whole at no charge to all third 101 | parties under the terms of this License. 102 | 103 | c) If the modified program normally reads commands interactively 104 | when run, you must cause it, when started running for such 105 | interactive use in the most ordinary way, to print or display an 106 | announcement including an appropriate copyright notice and a 107 | notice that there is no warranty (or else, saying that you provide 108 | a warranty) and that users may redistribute the program under 109 | these conditions, and telling the user how to view a copy of this 110 | License. (Exception: if the Program itself is interactive but 111 | does not normally print such an announcement, your work based on 112 | the Program is not required to print an announcement.) 113 | 114 | These requirements apply to the modified work as a whole. If 115 | identifiable sections of that work are not derived from the Program, 116 | and can be reasonably considered independent and separate works in 117 | themselves, then this License, and its terms, do not apply to those 118 | sections when you distribute them as separate works. But when you 119 | distribute the same sections as part of a whole which is a work based 120 | on the Program, the distribution of the whole must be on the terms of 121 | this License, whose permissions for other licensees extend to the 122 | entire whole, and thus to each and every part regardless of who wrote it. 123 | 124 | Thus, it is not the intent of this section to claim rights or contest 125 | your rights to work written entirely by you; rather, the intent is to 126 | exercise the right to control the distribution of derivative or 127 | collective works based on the Program. 128 | 129 | In addition, mere aggregation of another work not based on the Program 130 | with the Program (or with a work based on the Program) on a volume of 131 | a storage or distribution medium does not bring the other work under 132 | the scope of this License. 133 | 134 | 3. You may copy and distribute the Program (or a work based on it, 135 | under Section 2) in object code or executable form under the terms of 136 | Sections 1 and 2 above provided that you also do one of the following: 137 | 138 | a) Accompany it with the complete corresponding machine-readable 139 | source code, which must be distributed under the terms of Sections 140 | 1 and 2 above on a medium customarily used for software interchange; or, 141 | 142 | b) Accompany it with a written offer, valid for at least three 143 | years, to give any third party, for a charge no more than your 144 | cost of physically performing source distribution, a complete 145 | machine-readable copy of the corresponding source code, to be 146 | distributed under the terms of Sections 1 and 2 above on a medium 147 | customarily used for software interchange; or, 148 | 149 | c) Accompany it with the information you received as to the offer 150 | to distribute corresponding source code. (This alternative is 151 | allowed only for noncommercial distribution and only if you 152 | received the program in object code or executable form with such 153 | an offer, in accord with Subsection b above.) 154 | 155 | The source code for a work means the preferred form of the work for 156 | making modifications to it. For an executable work, complete source 157 | code means all the source code for all modules it contains, plus any 158 | associated interface definition files, plus the scripts used to 159 | control compilation and installation of the executable. However, as a 160 | special exception, the source code distributed need not include 161 | anything that is normally distributed (in either source or binary 162 | form) with the major components (compiler, kernel, and so on) of the 163 | operating system on which the executable runs, unless that component 164 | itself accompanies the executable. 165 | 166 | If distribution of executable or object code is made by offering 167 | access to copy from a designated place, then offering equivalent 168 | access to copy the source code from the same place counts as 169 | distribution of the source code, even though third parties are not 170 | compelled to copy the source along with the object code. 171 | 172 | 4. You may not copy, modify, sublicense, or distribute the Program 173 | except as expressly provided under this License. Any attempt 174 | otherwise to copy, modify, sublicense or distribute the Program is 175 | void, and will automatically terminate your rights under this License. 176 | However, parties who have received copies, or rights, from you under 177 | this License will not have their licenses terminated so long as such 178 | parties remain in full compliance. 179 | 180 | 5. You are not required to accept this License, since you have not 181 | signed it. However, nothing else grants you permission to modify or 182 | distribute the Program or its derivative works. These actions are 183 | prohibited by law if you do not accept this License. Therefore, by 184 | modifying or distributing the Program (or any work based on the 185 | Program), you indicate your acceptance of this License to do so, and 186 | all its terms and conditions for copying, distributing or modifying 187 | the Program or works based on it. 188 | 189 | 6. Each time you redistribute the Program (or any work based on the 190 | Program), the recipient automatically receives a license from the 191 | original licensor to copy, distribute or modify the Program subject to 192 | these terms and conditions. You may not impose any further 193 | restrictions on the recipients' exercise of the rights granted herein. 194 | You are not responsible for enforcing compliance by third parties to 195 | this License. 196 | 197 | 7. If, as a consequence of a court judgment or allegation of patent 198 | infringement or for any other reason (not limited to patent issues), 199 | conditions are imposed on you (whether by court order, agreement or 200 | otherwise) that contradict the conditions of this License, they do not 201 | excuse you from the conditions of this License. If you cannot 202 | distribute so as to satisfy simultaneously your obligations under this 203 | License and any other pertinent obligations, then as a consequence you 204 | may not distribute the Program at all. For example, if a patent 205 | license would not permit royalty-free redistribution of the Program by 206 | all those who receive copies directly or indirectly through you, then 207 | the only way you could satisfy both it and this License would be to 208 | refrain entirely from distribution of the Program. 209 | 210 | If any portion of this section is held invalid or unenforceable under 211 | any particular circumstance, the balance of the section is intended to 212 | apply and the section as a whole is intended to apply in other 213 | circumstances. 214 | 215 | It is not the purpose of this section to induce you to infringe any 216 | patents or other property right claims or to contest validity of any 217 | such claims; this section has the sole purpose of protecting the 218 | integrity of the free software distribution system, which is 219 | implemented by public license practices. Many people have made 220 | generous contributions to the wide range of software distributed 221 | through that system in reliance on consistent application of that 222 | system; it is up to the author/donor to decide if he or she is willing 223 | to distribute software through any other system and a licensee cannot 224 | impose that choice. 225 | 226 | This section is intended to make thoroughly clear what is believed to 227 | be a consequence of the rest of this License. 228 | 229 | 8. If the distribution and/or use of the Program is restricted in 230 | certain countries either by patents or by copyrighted interfaces, the 231 | original copyright holder who places the Program under this License 232 | may add an explicit geographical distribution limitation excluding 233 | those countries, so that distribution is permitted only in or among 234 | countries not thus excluded. In such case, this License incorporates 235 | the limitation as if written in the body of this License. 236 | 237 | 9. The Free Software Foundation may publish revised and/or new versions 238 | of the General Public License from time to time. Such new versions will 239 | be similar in spirit to the present version, but may differ in detail to 240 | address new problems or concerns. 241 | 242 | Each version is given a distinguishing version number. If the Program 243 | specifies a version number of this License which applies to it and "any 244 | later version", you have the option of following the terms and conditions 245 | either of that version or of any later version published by the Free 246 | Software Foundation. If the Program does not specify a version number of 247 | this License, you may choose any version ever published by the Free Software 248 | Foundation. 249 | 250 | 10. If you wish to incorporate parts of the Program into other free 251 | programs whose distribution conditions are different, write to the author 252 | to ask for permission. For software which is copyrighted by the Free 253 | Software Foundation, write to the Free Software Foundation; we sometimes 254 | make exceptions for this. Our decision will be guided by the two goals 255 | of preserving the free status of all derivatives of our free software and 256 | of promoting the sharing and reuse of software generally. 257 | 258 | NO WARRANTY 259 | 260 | 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY 261 | FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN 262 | OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES 263 | PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED 264 | OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 265 | MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS 266 | TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE 267 | PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, 268 | REPAIR OR CORRECTION. 269 | 270 | 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 271 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR 272 | REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, 273 | INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING 274 | OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED 275 | TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY 276 | YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER 277 | PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE 278 | POSSIBILITY OF SUCH DAMAGES. 279 | 280 | END OF TERMS AND CONDITIONS 281 | 282 | How to Apply These Terms to Your New Programs 283 | 284 | If you develop a new program, and you want it to be of the greatest 285 | possible use to the public, the best way to achieve this is to make it 286 | free software which everyone can redistribute and change under these terms. 287 | 288 | To do so, attach the following notices to the program. It is safest 289 | to attach them to the start of each source file to most effectively 290 | convey the exclusion of warranty; and each file should have at least 291 | the "copyright" line and a pointer to where the full notice is found. 292 | 293 | {description} 294 | Copyright (C) {year} {fullname} 295 | 296 | This program is free software; you can redistribute it and/or modify 297 | it under the terms of the GNU General Public License as published by 298 | the Free Software Foundation; either version 2 of the License, or 299 | (at your option) any later version. 300 | 301 | This program is distributed in the hope that it will be useful, 302 | but WITHOUT ANY WARRANTY; without even the implied warranty of 303 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 304 | GNU General Public License for more details. 305 | 306 | You should have received a copy of the GNU General Public License along 307 | with this program; if not, write to the Free Software Foundation, Inc., 308 | 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 309 | 310 | Also add information on how to contact you by electronic and paper mail. 311 | 312 | If the program is interactive, make it output a short notice like this 313 | when it starts in an interactive mode: 314 | 315 | Gnomovision version 69, Copyright (C) year name of author 316 | Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 317 | This is free software, and you are welcome to redistribute it 318 | under certain conditions; type `show c' for details. 319 | 320 | The hypothetical commands `show w' and `show c' should show the appropriate 321 | parts of the General Public License. Of course, the commands you use may 322 | be called something other than `show w' and `show c'; they could even be 323 | mouse-clicks or menu items--whatever suits your program. 324 | 325 | You should also get your employer (if you work as a programmer) or your 326 | school, if any, to sign a "copyright disclaimer" for the program, if 327 | necessary. Here is a sample; alter the names: 328 | 329 | Yoyodyne, Inc., hereby disclaims all copyright interest in the program 330 | `Gnomovision' (which makes passes at compilers) written by James Hacker. 331 | 332 | {signature of Ty Coon}, 1 April 1989 333 | Ty Coon, President of Vice 334 | 335 | This General Public License does not permit incorporating your program into 336 | proprietary programs. If your program is a subroutine library, you may 337 | consider it more useful to permit linking proprietary applications with the 338 | library. If this is what you want to do, use the GNU Lesser General 339 | Public License instead of this License. 340 | -------------------------------------------------------------------------------- /riker/api.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from os import walk, getcwd 3 | from os.path import join, isdir, expanduser, relpath, normpath, basename 4 | import os.path 5 | from operator import itemgetter 6 | import datetime 7 | import time 8 | import re 9 | import json 10 | import uuid 11 | 12 | import boto 13 | import boto.ec2 14 | from boto.route53.record import ResourceRecordSets 15 | from boto.ec2.elb import HealthCheck 16 | from boto.ec2.autoscale import LaunchConfiguration 17 | from boto.ec2.autoscale import AutoScalingGroup 18 | from boto.ec2.elb.attributes import ConnectionDrainingAttribute 19 | from fabric.api import task, run, local, env, sudo, lcd, execute, put 20 | from fabric.contrib.files import exists, append, sed 21 | from fabric.operations import reboot 22 | import giturlparse 23 | from tld import get_tld 24 | import pybars 25 | 26 | import git_helpers as git 27 | import boto_helpers 28 | import config as riker_config 29 | from utils import poll_for_condition, log, first 30 | from retry import synchronize 31 | 32 | import fabric 33 | fabric.state.output.everything = True 34 | 35 | # http://docs.aws.amazon.com/general/latest/gr/rande.html#s3_website_region_endpoints 36 | s3_website_regions = { 37 | 'us-east-1': ('s3-website-us-east-1.amazonaws.com.', 'Z3AQBSTGFYJSTF'), 38 | 'us-west-2': ('s3-website-us-west-2.amazonaws.com.', 'Z3BJ6K6RIION7M'), 39 | 'us-west-1': ('s3-website-us-west-1.amazonaws.com.', 'Z2F56UZL2M1ACD'), 40 | 'eu-west-1': ('s3-website-eu-west-1.amazonaws.com.', 'Z1BKCTXD74EZPE'), 41 | 'ap-southeast-1': ('s3-website-ap-southeast-1.amazonaws.com.', 'Z3O0J2DXBE1FTB'), 42 | 'ap-southeast-2': ('s3-website-ap-southeast-2.amazonaws.com.', 'Z1WCIGYICN2BYD'), 43 | 'ap-northeast-1': ('s3-website-ap-northeast-1.amazonaws.com.', 'Z2M4EHUR26P7ZW'), 44 | 'sa-east-1': ('s3-website-sa-east-1.amazonaws.com.', 'Z7KQH4QJS55SO'), 45 | 'us-gov-west-1': ('s3-website-us-gov-west-1.amazonaws.com.', 'Z31GFT0UA1I2HV') 46 | } 47 | 48 | aws = None 49 | initialized = False 50 | config = riker_config.default_config() 51 | 52 | def get_public_dns(instances): 53 | return [inst.public_dns_name for inst in instances] 54 | 55 | def ensure_running(instances, timeout=600, poll_delay=10): 56 | if len(instances) == 0: 57 | return 58 | log('info', 'Waiting for instances {} to be running'.format(instances), show_header=True) 59 | def get_status(): 60 | try: 61 | return aws.conn.get_all_instance_status([inst.id for inst in instances]) 62 | except boto.exception.EC2ResponseError: 63 | log('info', 'No status yet') 64 | def is_status_ok(statuses): 65 | #for s in statuses: 66 | # log('info', 'state={}, system_status={}'.format(s.state_name, s.system_status.status)) 67 | return len(statuses) > 0 and \ 68 | all([s.state_name == 'running' and s.system_status.status == 'ok' for s in statuses]) 69 | poll_for_condition(get_status, is_status_ok, timeout, poll_delay) 70 | [instance.update() for instance in instances] 71 | 72 | def ensure_complete(image_ids, timeout=1200, poll_delay=10): 73 | if len(image_ids) == 0: 74 | return 75 | log('info', 'Waiting for image {} to be available'.format(image_ids), show_header=True) 76 | def get_image(image_id): 77 | def _get_image(): 78 | try: 79 | return aws.conn.get_image(image_id) 80 | except boto.exception.EC2ResponseError: 81 | log('info', 'No state yet') 82 | return _get_image 83 | def is_image_available(image): 84 | #log('info', 'state={}'.format(image.state if image is not None else 'noimage',)) 85 | return image is not None and image.state == 'available' 86 | for image_id in image_ids: 87 | poll_for_condition(get_image(image_id), is_image_available, timeout, poll_delay) 88 | 89 | class AWS(object): 90 | 91 | @classmethod 92 | def from_config(cls, config): 93 | return cls(key_pair_name=config['instance_key_pair_name'], 94 | vpc_id=config['vpc_id'], 95 | availability_zone=config['availability_zone'], 96 | subnet_id=config['subnet_id'], 97 | security_groups=config['security_groups'], 98 | base_image=config['os_image_id'], 99 | instance_type=config['instance_type']) 100 | 101 | def __init__(self, key_pair_name, security_groups, base_image, instance_type, vpc_id, availability_zone, subnet_id): 102 | self.key_pair_name = key_pair_name 103 | self.security_groups = security_groups 104 | self.base_image = base_image 105 | self.instance_type = instance_type 106 | self.vpc_id = vpc_id 107 | self.availability_zone = availability_zone 108 | self.subnet_id = subnet_id 109 | 110 | def connect(self): 111 | log('info', 'Connecting to AWS', show_header=True) 112 | self.conn = boto.connect_ec2() 113 | 114 | def setup(self): 115 | log('info', 'Setting up AWS', show_header=True) 116 | boto_helpers.get_or_create_key_pair(self.conn, self.key_pair_name, get_pem_filename) 117 | boto_helpers.ensure_security_groups(self.conn, 118 | self.security_groups.items(), 119 | self.vpc_id) 120 | 121 | def run_instance(self, group_ids, ami=None): 122 | if ami is None: 123 | ami = self.base_image 124 | log('info', 'running instance') 125 | return self.conn.run_instances(ami, 126 | instance_type=self.instance_type, 127 | security_group_ids=group_ids, 128 | key_name=self.key_pair_name, 129 | subnet_id=self.subnet_id) 130 | 131 | def create_tags(self, ids, tags): 132 | log('info', 'tagging resource {}: {}'.format(ids, tags)) 133 | self.conn.create_tags(ids, tags) 134 | 135 | def create_image(self, instance_ids, name): 136 | log('info', 'creating image from {}'.format(instance_ids)) 137 | image_ids = [self.conn.create_image(instance_id=instance_id, 138 | name=name) 139 | for instance_id in instance_ids] 140 | log('info', 'images: {}'.format(image_ids)) 141 | return image_ids 142 | 143 | def get_security_group(self, name): 144 | return boto_helpers.get_security_group(self.conn, name) 145 | 146 | def get_security_group_id(self, name): 147 | return self.get_security_group(name).id 148 | 149 | 150 | class Repo(object): 151 | 152 | def __init__(self, name, path, remote_url=None, remote_branch=None, local_branch=None): 153 | self.remote_url = remote_url 154 | self.remote_branch = remote_branch 155 | self.local_branch = local_branch 156 | self.name = name 157 | self._path = path 158 | self.app_config = config.get('apps', {}).get(name, {}) 159 | 160 | self.set_prop_from_app_config('remote_url') 161 | self.set_prop_from_app_config('remote_branch') 162 | self.set_prop_from_app_config('local_branch') 163 | 164 | def set_prop_from_app_config(self, prop): 165 | if getattr(self, prop) is None: 166 | setattr(self, prop, self.app_config.get(prop)) 167 | 168 | @property 169 | def path(self): 170 | return self._path or expanduser(join(riker_config.directory, 'apps', self.name)) 171 | 172 | @synchronize('repo_fetch.lock') 173 | def fetch(self): 174 | if self._path: return 175 | log('info', 'Fetching app {} from {} to {}'.format(self.name, self.remote_url, self.path), show_header=True) 176 | git_remote_host = giturlparse.parse(self.remote_url).host 177 | ssh.remove_from_known_hosts(git_remote_host) 178 | ssh.add_to_known_hosts(git_remote_host) 179 | if not isdir(self.path): 180 | local('mkdir -p {}'.format(self.path)) 181 | git.clone_repo(self.remote_url, self.path, self.local_branch) 182 | else: 183 | git.ensure_is_repo(self.path) 184 | with lcd(self.path): 185 | local_branch = self.local_branch if self.local_branch is not None else 'master' 186 | local('git reset --hard HEAD') 187 | local('git pull origin {}'.format(local_branch)) 188 | local('git fetch') 189 | local('git checkout {}'.format(local_branch)) 190 | 191 | def head_commit_id(self): 192 | with lcd(self.path): 193 | return git.get_head_commit_sha1() 194 | 195 | def get_deploy_remote_url(self, host): 196 | return '{}@{}:{}'.format(config['deploy_user'], host, self.name) 197 | 198 | class NoAppFoundError(Exception): 199 | pass 200 | 201 | class App(object): 202 | 203 | def __init__(self, env_name, app_name): 204 | if git.is_repo(getcwd()): 205 | app_path = getcwd() 206 | else: 207 | app_path = None 208 | 209 | if app_name is None: 210 | if app_path is not None: 211 | app_name = basename(app_path) 212 | else: 213 | raise Exception("Riker App: either provide an app name, or " + 214 | "current directory must be a git repo") 215 | self.repo = Repo(app_name, app_path) 216 | 217 | if env_name is None: 218 | env_name = 'dev' 219 | self.env_name = env_name 220 | 221 | @property 222 | def name(self): 223 | return '{}/{}'.format(self.env_name, self.repo.name) 224 | 225 | @property 226 | def config(self): 227 | return self.repo.app_config 228 | 229 | class CachedObject(object): 230 | 231 | def create(self, *args, **kwargs): 232 | raise NotImplementedError() 233 | 234 | def get(self, *args, **kwargs): 235 | raise NotImplementedError() 236 | 237 | def get_or_create(self, *args, **kwargs): 238 | obj = self.get(*args, **kwargs) 239 | if obj is None or (hasattr(obj, '__len__') and len(obj) == 0): 240 | obj = self.create(*args, **kwargs) 241 | return obj 242 | 243 | class AppImage(CachedObject): 244 | 245 | def __init__(self, app, instances): 246 | self.app = app 247 | self.instances = instances 248 | 249 | def tags(self): 250 | return { 251 | 'app': self.app.name, 252 | 'version': self.app.repo.head_commit_id(), 253 | 'timestamp': datetime.datetime.now().isoformat(), 254 | 'deploy-id': self.get_deploy_id() 255 | } 256 | 257 | def image_name(self): 258 | tags = self.tags() 259 | return "{}/{}/{}".format(tags['app'], tags['deploy-id'], tags['version']) 260 | 261 | def get_deploy_id(self): 262 | images = [aws.conn.get_image(instance.image_id) for instance in self.instances] 263 | deploy_id_values = [image.tags.get('deploy-id') for image in images] 264 | deploy_ids = [int(deploy_id) for deploy_id in deploy_id_values if deploy_id is not None] 265 | latest_deploy_id = max(deploy_ids) if len(deploy_ids) > 0 else 0 266 | return latest_deploy_id + 1 267 | 268 | def create(self): 269 | log('info', 'Creating app image {} (deploy-id: {})'.format(self.app.name, self.get_deploy_id()), show_header=True) 270 | image_ids = aws.create_image([inst.id for inst in self.instances], self.image_name()) 271 | aws.create_tags(image_ids, self.tags()) 272 | ensure_complete(image_ids) 273 | return image_ids 274 | 275 | def get(self): 276 | return None 277 | 278 | class LatestAppImage(CachedObject): 279 | def __init__(self, app): 280 | self.app = app 281 | def get(self): 282 | print '-----> Looking for latest app image: {}'.format(self.app.name) 283 | images = aws.conn.get_all_images(owners=['self'], filters={'tag:app': self.app.name, 284 | 'tag-key': 'deploy-id', 285 | 'tag-key': 'version'}) 286 | images_ordered_by_deploy_id = sorted(images, 287 | key=lambda image: itemgetter('deploy-id')(image.tags), reverse=True) 288 | for image in images_ordered_by_deploy_id: 289 | print '-----> Found {} (deploy-id: {}) (image: {})'.format(self.app.name, image.tags['deploy-id'], image.id) 290 | return image 291 | print '-----> First deploy of {}!'.format(self.app.name) 292 | return None 293 | def create(self): 294 | return None 295 | 296 | class AppInstance(CachedObject): 297 | 298 | def __init__(self, app, image, group_ids): 299 | self.app = app 300 | self.image = image 301 | self.group_ids = group_ids 302 | 303 | def create(self): 304 | print '-----> App instance for {} not found, running from image {}'.format(self.app.name, self.image.id) 305 | reservation = aws.run_instance(self.group_ids, self.image.id) 306 | instance_ids = [inst.id for inst in reservation.instances] 307 | aws.create_tags(instance_ids, { 308 | 'app_instance': 'true', 309 | 'deployed': 'false', 310 | 'app': self.app.name, 311 | 'timestamp': datetime.datetime.now().isoformat() 312 | }) 313 | return reservation.instances 314 | 315 | def get(self): 316 | print '-----> Looking for app instance to deploy to: {} (image: {})'.format(self.app.name, self.image.id) 317 | return aws.conn.get_only_instances(filters={'tag:app': self.app.name, 318 | 'tag:app_instance': 'true', 319 | 'tag:deployed': 'true', 320 | 'image-id': self.image.id, 321 | 'instance-state-name': 'running'}) 322 | 323 | def deploy_instances(self, instances): 324 | print '-----> Deploying app {} to instances {}'.format(self.app.name, instances) 325 | ensure_running(instances) 326 | hosts = get_public_dns(instances) 327 | execute(self.deploy(), hosts=hosts) 328 | 329 | def update_instances_config(self, instances): 330 | print '-----> Updating config of {} to instances {}'.format(self.app.name, instances) 331 | ensure_running(instances) 332 | hosts = get_public_dns(instances) 333 | execute(update_config, self.app.repo.name, self.app.env_name, hosts=hosts) 334 | 335 | def deploy(self): 336 | @task 337 | def _deploy(): 338 | app_name = self.app.name 339 | repo_name = self.app.repo.name 340 | env_name = self.app.env_name 341 | repo_path = self.app.repo.path 342 | remote_branch = self.app.repo.remote_branch 343 | local_branch = self.app.repo.local_branch 344 | remote_name = config['deploy_remote_name'] 345 | log('info', 'Deploying app: {}'.format(app_name), show_header=True) 346 | git.ensure_is_repo(repo_path) 347 | with lcd(repo_path): 348 | git.ensure_remote(remote_name, self.app.repo.get_deploy_remote_url(env.host)) 349 | ssh.add_to_known_hosts(env.host) 350 | put('~/.ssh/id_rsa.pub', '~', mirror_local_mode=True) 351 | run('sudo sshcommand acl-remove {} ubuntu'.format(config['deploy_user']), warn_only=True) 352 | run('cat ~/id_rsa.pub | sudo sshcommand acl-add {} ubuntu'.format(config['deploy_user'])) 353 | run('rm ~/id_rsa.pub') 354 | git.push_repo(remote_name, branch_name=remote_branch, local_branch_name=local_branch, auto_confirm=True) 355 | # make dokku (nginx) serve this app for any server name 356 | # this is OK since we're only deploying one app per server 357 | run('dokku domains:set {} "{}"'.format(repo_name, '~^(www\.)?(?.+)$')) 358 | update_config(repo_name, env_name) 359 | ssh.remove_from_known_hosts(env.host) 360 | instance_id = get_instance_id_from_server() 361 | aws.create_tags([instance_id], { 362 | 'deployed': 'true', 363 | 'version': self.app.repo.head_commit_id(), 364 | 'timestamp': datetime.datetime.now().isoformat() 365 | }) 366 | return _deploy 367 | 368 | def configure_nginx_xforwarded_passthru(name): 369 | sed("/home/dokku/{}/nginx.conf".format(name), 'X-Forwarded-Proto \$scheme', 'X-Forwarded-Proto \$real_scheme', use_sudo=True) 370 | sed("/home/dokku/{}/nginx.conf".format(name), 'X-Forwarded-For \$remote_addr', 'X-Forwarded-For \$real_remote_addr', use_sudo=True) 371 | sed("/home/dokku/{}/nginx.conf".format(name), 'X-Forwarded-Port \$server_port', 'X-Forwarded-Port \$real_server_port', use_sudo=True) 372 | 373 | class BaseImage(CachedObject): 374 | 375 | def __init__(self, name, base_instance): 376 | self.name = name 377 | self.base_instance = base_instance 378 | 379 | def get_or_create_base_instance(self): 380 | instances = self.base_instance.get_or_create() 381 | if len(instances) != 1: 382 | raise Exception("1 base instance must be running") 383 | self.base_instance.provision_instances(instances) 384 | return instances 385 | 386 | def create(self): 387 | print '-----> Creating base image {}'.format(self.name) 388 | instances = self.get_or_create_base_instance() 389 | instance_ids = [inst.id for inst in instances] 390 | image_ids = aws.create_image(instance_ids, self.name) 391 | aws.create_tags(image_ids, { 392 | 'base_image': 'true', 393 | 'timestamp': datetime.datetime.now().isoformat() 394 | }) 395 | ensure_complete(image_ids) 396 | terminate_instances(instance_ids) 397 | for image_id in image_ids: 398 | return aws.conn.get_image(image_id) 399 | return None 400 | 401 | def get(self): 402 | print '-----> Getting base image {}'.format(self.name) 403 | images = aws.conn.get_all_images(owners=['self'], filters={'name': self.name, 'tag:base_image': 'true'}) 404 | image_ids = [image.id for image in images] 405 | ensure_complete(image_ids) 406 | for image_id in image_ids: 407 | print '-----> Found {}'.format(image_id) 408 | return aws.conn.get_image(image_id) 409 | print '-----> Not found' 410 | return None 411 | 412 | def get_instance_id_from_server(): 413 | return run('curl http://169.254.169.254/latest/meta-data/instance-id') 414 | 415 | class BaseInstance(CachedObject): 416 | 417 | def __init__(self, name, image, group_ids): 418 | self.image = image 419 | self.group_ids = group_ids 420 | self.name = name 421 | 422 | def create(self): 423 | print '-----> Base instance {} not found, running from image {}'.format(self.name, self.image.id) 424 | reservation = aws.run_instance(self.group_ids, self.image.id) 425 | instances = reservation.instances 426 | instance_ids = [inst.id for inst in instances] 427 | aws.create_tags(instance_ids, { 428 | 'Name': self.name, 429 | 'base_instance': 'true', 430 | 'provisioned': 'false' 431 | }) 432 | return instances 433 | 434 | def get(self): 435 | print '-----> Getting base instance {} (image {})'.format(self.name, self.image.id) 436 | return aws.conn.get_only_instances(filters={'tag:Name': self.name, 437 | 'tag:base_instance': 'true', 438 | 'image-id': self.image.id, 439 | 'instance-state-name': 'running'}) 440 | 441 | def provision_instances(self, instances): 442 | ensure_running(instances) 443 | hosts = get_public_dns(instances) 444 | execute(self.provision(), hosts=hosts) 445 | 446 | def provision(self): 447 | @task 448 | def _provision(): 449 | self.add_swap_space() 450 | self.install_mosh() 451 | self.install_docker() 452 | self.install_dokku() 453 | self.configure_nginx() 454 | aws.create_tags([get_instance_id_from_server()], { 455 | 'provisioned': 'true' 456 | }) 457 | return _provision 458 | 459 | # @synchronize('add_swap_space.lock', is_remote=True) 460 | def add_swap_space(self): 461 | # t2.micro instances only have 512MB RAM, so compensate with swap space. 462 | log('info', 'Creating swap', show_header=True) 463 | if exists('/extraswap', use_sudo=True): 464 | log('info', 'Swap already exists') 465 | return 466 | sudo('dd if=/dev/zero of=/extraswap bs=1M count=512') 467 | sudo('chown root:root /extraswap') 468 | sudo('chmod 600 /extraswap') 469 | sudo('mkswap /extraswap') 470 | sudo('swapon /extraswap') 471 | append('/etc/fstab', '/extraswap swap swap defaults 0 0', use_sudo=True) 472 | sudo('swapon -a') 473 | 474 | # @synchronize('install_docker.lock', is_remote=True) 475 | def install_docker(self): 476 | """ 477 | http://docs.docker.com/installation/ubuntulinux/#ubuntu-trusty-1404-lts-64-bit 478 | """ 479 | log('info', 'Installing docker', show_header=True) 480 | run('curl -s https://get.docker.io/ubuntu/ > ~/docker_install.sh') 481 | sudo('sh ~/docker_install.sh; rm -f ~/docker_install.sh') 482 | 483 | # @synchronize('install_dokku.lock', is_remote=True) 484 | def install_dokku(self): 485 | """ 486 | https://github.com/progrium/dokku 487 | """ 488 | log('info', 'Installing dokku', show_header=True) 489 | run('curl -sL https://raw.github.com/progrium/dokku/v0.3.15/bootstrap.sh > ~/dokku-install.sh') 490 | sudo('DOKKU_TAG=v0.3.15 bash ~/dokku-install.sh; rm -f ~/dokku-install.sh') 491 | reboot(wait=5*60) 492 | sudo('git clone https://github.com/statianzo/dokku-supervisord.git /var/lib/dokku/plugins/dokku-supervisord') 493 | sudo('git clone https://github.com/neam/dokku-custom-domains.git /var/lib/dokku/plugins/custom-domains') 494 | sudo('git clone https://github.com/musicglue/dokku-user-env-compile.git /var/lib/dokku/plugins/user-env-compile') 495 | sudo('dokku plugins-install') 496 | 497 | # @synchronize('install_mosh.lock', is_remote=True) 498 | def install_mosh(self): 499 | log('info', 'Installing mosh', show_header=True) 500 | sudo('add-apt-repository -y ppa:keithw/mosh') 501 | sudo('apt-get update -y') 502 | sudo('apt-get install -y python-software-properties') 503 | sudo('apt-get install -y mosh') 504 | 505 | # @synchronize('configure_nginx.lock', is_remote=True) 506 | def configure_nginx(self): 507 | log('info', 'Configuring nginx', show_header=True) 508 | # nginx default domain name cache size is 64 bytes per domain. This may be 509 | # too small for EC2 public domain names, so increase to 256 bytes. 510 | sed('/etc/nginx/nginx.conf', 'server_names_hash_bucket_size 64;', 'server_names_hash_bucket_size 256;', use_sudo=True) 511 | 512 | class ssh(object): 513 | 514 | @staticmethod 515 | @synchronize('add_to_known_hosts.lock') 516 | def add_to_known_hosts(host): 517 | ip = local('dig +short {}'.format(host), capture=True).strip() 518 | local('ssh-keyscan -H {} >> ~/.ssh/known_hosts'.format(host)) 519 | local('ssh-keyscan -H {} >> ~/.ssh/known_hosts'.format(ip)) 520 | local('ssh-keyscan -H {},{} >> ~/.ssh/known_hosts'.format(host, ip)) 521 | 522 | @staticmethod 523 | @synchronize('remove_from_known_hosts.lock') 524 | def remove_from_known_hosts(host): 525 | ip = local('dig +short {}'.format(host), capture=True).strip() 526 | # http://serverfault.com/questions/132970/can-i-automatically-add-a-new-host-to-known-hosts 527 | local('ssh-keygen -R {}'.format(host)) 528 | local('ssh-keygen -R {}'.format(ip)) 529 | local('ssh-keygen -R {},{}'.format(host, ip)) 530 | 531 | def get_pem_filename(name): 532 | return expanduser(join(riker_config.directory, '{}.pem'.format(name))) 533 | 534 | def get_config_path(env_name): 535 | return expanduser(join(riker_config.directory, 'envs', env_name)) 536 | 537 | def terminate_instances(instance_ids): 538 | log('info', 'Terminating instances: {}'.format(instance_ids), show_header=True) 539 | return aws.conn.terminate_instances(instance_ids) 540 | 541 | def get_config(app, env): 542 | config_path = get_config_path(env) 543 | cfg_path = join(config_path, '{}.env'.format(app)) 544 | log('info', "looking for config at cfg_path: {}".format(cfg_path)) 545 | try: 546 | with open(cfg_path) as f: 547 | cfg = f.read().replace("\n", ' ').strip() 548 | return cfg 549 | except IOError: 550 | return None 551 | 552 | def test_docker_installation(): 553 | sudo('docker run -i -t ubuntu /bin/bash -c "uname -a"') 554 | 555 | @task 556 | def update_config(app, env, clear='yes'): 557 | log('info', 'Updating config for {}'.format(app), show_header=True) 558 | cfg = get_config(app, env) 559 | if cfg is None: 560 | log('info', "No configuration found for {}".format(app)) 561 | return 562 | if clear == 'yes': 563 | sudo('truncate -s0 /home/{}/{}/ENV'.format(config['deploy_user'], app), user=config['deploy_user']) 564 | sudo('dokku config:set {} {}'.format(app, cfg), user=config['deploy_user']) 565 | 566 | # make nginx pass-through x-forwarded-* headers 567 | nginx_transparent_forward = """map $http_x_forwarded_proto $real_scheme { 568 | default $http_x_forwarded_proto; 569 | '' $scheme; 570 | } 571 | map $http_x_forwarded_for $real_remote_addr { 572 | default $http_x_forwarded_for; 573 | '' $remote_addr; 574 | } 575 | map $http_x_forwarded_port $real_server_port { 576 | default $http_x_forwarded_port; 577 | '' $server_port; 578 | } 579 | """ 580 | sudo('rm -f /etc/nginx/conf.d/x-forwarded-passthru.conf') 581 | append('/etc/nginx/conf.d/x-forwarded-passthru.conf', nginx_transparent_forward, use_sudo=True) 582 | configure_nginx_xforwarded_passthru(app) 583 | sudo('/etc/init.d/nginx restart') 584 | 585 | def logs(app, tail='no'): 586 | run('dokku logs {}{}'.format(app, ' -t' if tail == 'yes' else '')) 587 | 588 | def ps(): 589 | sudo('docker ps') 590 | 591 | def deploy_to_single_instance(app_name, env_name): 592 | global aws 593 | 594 | aws = AWS.from_config(config) 595 | 596 | aws.connect() 597 | 598 | aws.setup() 599 | 600 | env.key_filename = get_pem_filename(aws.key_pair_name) 601 | 602 | os_image = aws.conn.get_image(aws.base_image) 603 | 604 | group_ids=[aws.get_security_group_id('riker-instance')] 605 | 606 | app = App(env_name, app_name) 607 | 608 | app.repo.fetch() 609 | 610 | base_image = BaseImage(name=config['base_instance_name'], 611 | base_instance=BaseInstance(name=config['base_instance_name'], 612 | image=os_image, 613 | group_ids=group_ids) 614 | ).get_or_create() 615 | 616 | app_inst = AppInstance(app=app, image=base_image, group_ids=group_ids) 617 | app_instances = app_inst.get_or_create() 618 | app_inst.deploy_instances(app_instances) 619 | 620 | print '=====> DONE!' 621 | 622 | def create_app_ami(app_name, env_name): 623 | global aws 624 | 625 | aws = AWS.from_config(config) 626 | 627 | aws.connect() 628 | 629 | aws.setup() 630 | 631 | env.key_filename = get_pem_filename(aws.key_pair_name) 632 | 633 | os_image = aws.conn.get_image(aws.base_image) 634 | 635 | group_ids=[aws.get_security_group_id('riker-instance')] 636 | 637 | app = App(env_name, app_name) 638 | 639 | base_image = BaseImage(name=config['base_instance_name'], 640 | base_instance=BaseInstance(name=config['base_instance_name'], 641 | image=os_image, 642 | group_ids=group_ids) 643 | ).get_or_create() 644 | 645 | existing_app_image = LatestAppImage(app).get() 646 | 647 | app_inst = AppInstance(app=app, 648 | image=existing_app_image or base_image, 649 | group_ids=group_ids 650 | ) 651 | app_instances = app_inst.get_or_create() 652 | 653 | 654 | app.repo.fetch() 655 | 656 | app_inst.deploy_instances(app_instances) 657 | 658 | app_img = AppImage(app=app, 659 | instances=app_instances 660 | ) 661 | app_images = app_img.get_or_create() 662 | 663 | terminate_instances([inst.id for inst in app_instances]) 664 | 665 | print '-----> DONE: {} images ready'.format(app_images) 666 | 667 | def deploy_config_update(app_name, env_name): 668 | global aws 669 | aws = AWS.from_config(config) 670 | aws.connect() 671 | aws.setup() 672 | env.key_filename = get_pem_filename(aws.key_pair_name) 673 | os_image = aws.conn.get_image(aws.base_image) 674 | group_ids=[aws.get_security_group_id('riker-instance')] 675 | app = App(env_name, app_name) 676 | base_image = BaseImage(name=config['base_instance_name'], base_instance=BaseInstance(name=config['base_instance_name'], image=os_image, group_ids=group_ids)).get_or_create() 677 | existing_app_image = LatestAppImage(app).get() 678 | if not existing_app_image: 679 | raise Exception("Need previous deployment to update config!") 680 | app_inst = AppInstance(app=app, image=existing_app_image or base_image, group_ids=group_ids) 681 | app_instances = app_inst.get_or_create() 682 | app_inst.update_instances_config(app_instances) 683 | app_img = AppImage(app=app, instances=app_instances) 684 | app_images = app_img.get_or_create() 685 | terminate_instances([inst.id for inst in app_instances]) 686 | print '-----> DONE: {} images ready'.format(app_images) 687 | 688 | def deploy_latest_app_ami(app_name, env_name): 689 | 690 | global aws 691 | aws = AWS.from_config(config) 692 | 693 | aws.connect() 694 | 695 | lb_group_ids=[aws.get_security_group_id('riker-load-balancer')] 696 | inst_group_ids=[aws.get_security_group_id('riker-instance')] 697 | 698 | app = App(env_name, app_name) 699 | 700 | health_check_target = app.config.get('health_check', 'TCP:80') 701 | 702 | name = re.sub('[^A-Za-z0-9\-]', '-', app.name) 703 | 704 | app_image = LatestAppImage(app).get() 705 | 706 | print '-----> Connecting to ELB' 707 | elb_conn = boto.connect_elb() 708 | 709 | log('info', 'Load balancer', show_header=True) 710 | load_balancer_name = name 711 | try: 712 | elb_result = elb_conn.get_all_load_balancers(load_balancer_names=[load_balancer_name]) 713 | lb = elb_result[0] 714 | log('info', 'Found {}'.format(load_balancer_name)) 715 | except boto.exception.BotoServerError: 716 | log('info', 'Not found, creating load balancer') 717 | listeners = [(80, 80, 'HTTP', 'HTTP')] 718 | lb = elb_conn.create_load_balancer(name=load_balancer_name, 719 | zones=None, 720 | complex_listeners=listeners, 721 | security_groups=lb_group_ids, 722 | subnets=[aws.subnet_id]) 723 | hc = HealthCheck(target=health_check_target) 724 | lb.configure_health_check(hc) 725 | cda = ConnectionDrainingAttribute() 726 | cda.enabled = True 727 | cda.timeout = 300 728 | elb_conn.modify_lb_attribute(load_balancer_name=load_balancer_name, 729 | attribute='connectionDraining', 730 | value=cda) 731 | 732 | print '-----> Connecting to AutoScale' 733 | as_conn = boto.connect_autoscale() 734 | 735 | log('info', 'Launch configuration', show_header=True) 736 | launch_config_name = "{}-{}".format(name, app_image.tags['deploy-id']) 737 | lc_result = as_conn.get_all_launch_configurations(names=[launch_config_name]) 738 | if len(lc_result) == 0: 739 | log('info', 'Not found, creating LaunchConfiguration') 740 | lc = LaunchConfiguration(name=launch_config_name, 741 | image_id=app_image.id, 742 | key_name=aws.key_pair_name, 743 | security_groups=inst_group_ids, 744 | instance_type=aws.instance_type) 745 | as_conn.create_launch_configuration(lc) 746 | else: 747 | log('info', 'Found {}'.format(launch_config_name)) 748 | lc = lc_result[0] 749 | 750 | existing_group = None 751 | deploy_id = int(app_image.tags['deploy-id'] or 0) 752 | log('info', 'Getting previous auto-scaling group', show_header=True) 753 | for did in xrange(deploy_id-1, 0, -1): 754 | existing_group_name = "{}-{}".format(name, did) 755 | log('info', '{} ?'.format(existing_group_name)) 756 | ag_result = as_conn.get_all_groups(names=[existing_group_name]) 757 | if len(ag_result) > 0: 758 | existing_group = ag_result[0] 759 | log('info', 'Found {}'.format(existing_group.name)) 760 | break 761 | else: 762 | log('info', 'No') 763 | 764 | if existing_group is not None: 765 | existing_healthy_instances = [inst for inst in existing_group.instances if inst.lifecycle_state == 'InService' and inst.health_status == 'Healthy'] 766 | existing_healthy_instance_count = len(existing_healthy_instances) 767 | desired_capacity = existing_group.desired_capacity 768 | min_size = existing_group.min_size 769 | max_size = existing_group.max_size 770 | if existing_healthy_instance_count == 0 and desired_capacity == 0: 771 | print '-----> WARNING: existing auto-scaling group {} has no healthy instances and a desired capacity of 0. New auto-scaling group will launch 1 instance.'.format(existing_group) 772 | desired_capacity = 1 773 | min_size = 1 774 | max_size = max_size if max_size > 0 else 1 775 | else: 776 | existing_healthy_instance_count = 0 777 | desired_capacity = 1 778 | min_size = 1 779 | max_size = 1 780 | 781 | log('info', '{} existing instance(s) found'.format(existing_healthy_instance_count), show_header=True) 782 | 783 | log('info', 'Existing auto-scale properties: desired_capacity={}, min_size={}, max_size={}'.format(desired_capacity, min_size, max_size)) 784 | 785 | log('info', 'Auto-scaling group', show_header=True) 786 | group_name = "{}-{}".format(name, app_image.tags['deploy-id']) 787 | ag_result = as_conn.get_all_groups(names=[group_name]) 788 | if len(ag_result) == 0: 789 | log('info', 'Not found, creating autoscale group') 790 | ag = AutoScalingGroup(name=group_name, 791 | load_balancers=[load_balancer_name], launch_config=lc, 792 | desired_capacity=desired_capacity, min_size=min_size, max_size=max_size, 793 | health_check_type='ELB', health_check_period='300', 794 | vpc_zone_identifier=aws.subnet_id) 795 | as_conn.create_auto_scaling_group(ag) 796 | else: 797 | log('info', 'Found {}'.format(group_name)) 798 | ag = ag_result[0] 799 | ag.desired_capacity = desired_capacity 800 | ag.max_size = max_size 801 | ag.min_size = min_size 802 | ag.launch_config_name = launch_config_name 803 | ag.update() 804 | 805 | log('info', 'Waiting for new instances to become healthy', show_header=True) 806 | all_healthy = False 807 | for i in xrange(60): 808 | if i > 0: 809 | print ' ---' 810 | time.sleep(10) 811 | elb_result = elb_conn.get_all_load_balancers(load_balancer_names=[load_balancer_name]) 812 | lb = elb_result[0] 813 | lb_insts = lb.get_instance_health() 814 | print ' Load-balancer instances: {}'.format(lb_insts) 815 | # NOTE: re-get auto-scaling group to get updated instance info. 816 | ag = as_conn.get_all_groups(names=[group_name])[0] 817 | ag_insts = [inst for inst in ag.instances] 818 | log('info', 'Auto-scaling group Instances: {}'.format(ag_insts)) 819 | if len(ag_insts) < desired_capacity: 820 | not_yet_launched_count = desired_capacity - len(ag_insts) 821 | log('info', '{} new instance(s) not yet launched'.format(not_yet_launched_count)) 822 | continue 823 | ag_inst_ids = set(inst.instance_id for inst in ag_insts) 824 | lb_inst_ids = set(inst.instance_id for inst in lb_insts) 825 | asg_insts_not_in_lb = ag_inst_ids.difference(lb_inst_ids) 826 | if len(asg_insts_not_in_lb) > 0: 827 | log('info', '{} new instance(s) not yet in load balancer'.format(len(asg_insts_not_in_lb))) 828 | continue 829 | new_lb_insts = [inst for inst in lb_insts if inst.instance_id in ag_inst_ids] 830 | healthy_new_lb_insts = [inst for inst in new_lb_insts if inst.state == 'InService'] 831 | all_healthy = len(healthy_new_lb_insts) == len(ag_insts) 832 | log('info', '{} new instance(s) are healthy'.format(len(healthy_new_lb_insts))) 833 | diff = existing_healthy_instance_count - len(healthy_new_lb_insts) 834 | if existing_group is not None and diff >= 0: 835 | change = False 836 | if existing_group.desired_capacity != diff: 837 | existing_group.desired_capacity = diff 838 | change = True 839 | if existing_group.max_size != diff: 840 | existing_group.max_size = diff 841 | change = True 842 | if diff < existing_group.min_size: 843 | existing_group.min_size = diff 844 | change = True 845 | if change: 846 | existing_group.update() 847 | log('info', 'Change previous auto-scale group {} properties: desired_capacity={}, min_size={}, max_size={}'.format(existing_group, existing_group.desired_capacity, existing_group.min_size, existing_group.max_size)) 848 | if all_healthy: 849 | log('info', 'All new instances healthy!', show_header=True) 850 | healthy_lb_inst_ids = [inst.instance_id for inst in lb_insts if inst.state == 'InService'] 851 | previous_healthy_inst_ids = [inst.instance_id for inst in existing_healthy_instances] if existing_group else [] 852 | not_yet_out_of_service = set(previous_healthy_inst_ids).intersection(healthy_lb_inst_ids) 853 | if len(not_yet_out_of_service) > 0: 854 | log('info', 'Waiting to remove previous instances ({}) from load balancer'.format(not_yet_out_of_service)) 855 | else: 856 | log('info', 'All previous instances ({}) have been removed from load balancer'.format(previous_healthy_inst_ids), show_header=True) 857 | if all_healthy and len(not_yet_out_of_service) == 0: 858 | break 859 | else: 860 | raise Exception("Timeout") 861 | 862 | elb_result = elb_conn.get_all_load_balancers(load_balancer_names=[load_balancer_name]) 863 | lb = elb_result[0] 864 | lb_insts = [inst for inst in lb.get_instance_health() if inst.state == 'InService'] 865 | print '-----> Deployed {} instance(s) of {} to {}'.format(lb_insts, app.name, lb.dns_name) 866 | 867 | print '-----> DONE!' 868 | 869 | def deploy_static(app_name, env_name, domain, force): 870 | app = App(env_name, app_name) 871 | bucket_name = domain or '{}-{}'.format(config.get('system_name', uuid.uuid1().hex), app.repo.name) 872 | 873 | app.repo.fetch() 874 | 875 | version = app.repo.head_commit_id() 876 | 877 | s3 = boto.connect_s3() 878 | b = s3.lookup(bucket_name) 879 | 880 | if b is not None: 881 | version_key = b.get_key('__VERSION__') 882 | if version_key is not None: 883 | current_version = version_key.get_metadata('git-version') 884 | if version == current_version: 885 | if force: 886 | print '-----> Version {} already deployed, but re-deploying anyway'.format(version) 887 | else: 888 | print '-----> Version {} already deployed!'.format(version) 889 | return 890 | 891 | with lcd(app.repo.path): 892 | build_cmd = app.config.get('build_script') 893 | if build_cmd: 894 | print '-----> Building' 895 | local(build_cmd) 896 | 897 | if b is None: 898 | print '-----> Creating bucket {}'.format(bucket_name) 899 | b = s3.create_bucket(bucket_name) 900 | 901 | # TODO: this policy allows all users read access to all objects. 902 | # Need to find a way to limit access to __VERSION__ to only authenticated 903 | # users. 904 | public_access_policy = json.dumps({"Version":"2012-10-17", 905 | "Statement":[{"Sid":"PublicReadForGetBucketObjects", 906 | "Effect":"Allow", 907 | "Principal": "*", 908 | "Action":["s3:GetObject"], 909 | "Resource":["arn:aws:s3:::{}/*".format(bucket_name)]}]}) 910 | b.set_policy(public_access_policy) 911 | #b.configure_versioning(versioning=False) 912 | b.configure_website(suffix="index.html", error_key="error.html") 913 | 914 | def map_key_to_obj(m, obj): 915 | if obj.key != '__VERSION__': 916 | m[obj.key] = obj 917 | return m 918 | existing_keys = reduce(map_key_to_obj, b.get_all_keys(), {}) 919 | 920 | root = normpath(join(app.repo.path, app.config.get('root_dir', ''))) 921 | 922 | app_redirects = app.config.get('redirects', {}) 923 | for key_name in app_redirects.keys(): 924 | existing_keys.pop(key_name, None) 925 | 926 | print '-----> Uploading {} to {} bucket'.format(root, bucket_name) 927 | new_keys = [] 928 | updated_keys = [] 929 | for dirname, dirnames, filenames in walk(root): 930 | reldirname = relpath(dirname, root) 931 | reldirname = '' if reldirname == '.' else reldirname 932 | if os.path.commonprefix(['.git', reldirname]) == '.git': 933 | continue 934 | for filename in filenames: 935 | full_filename = join(reldirname, filename) 936 | if full_filename == '.s3': 937 | continue 938 | new_or_update = ' ' 939 | if existing_keys.has_key(full_filename): 940 | new_or_update = '[UPDATE]' 941 | updated_keys.append(full_filename) 942 | key = existing_keys.pop(full_filename) 943 | else: 944 | new_or_update = '[NEW] ' 945 | new_keys.append(full_filename) 946 | key = b.new_key(full_filename) 947 | print ' {} Uploading {}'.format(new_or_update, full_filename) 948 | key.set_contents_from_filename(join(dirname, filename)) 949 | if len(existing_keys) > 0: 950 | print '-----> WARNING: the following files are still present but no' 951 | print ' longer part of the website:' 952 | for k,v in existing_keys.iteritems(): 953 | print ' {}'.format(k) 954 | 955 | print '-----> Tagging bucket with git version {}'.format(version) 956 | version_key = b.get_key('__VERSION__') 957 | if version_key: 958 | version_key.delete() 959 | version_key = b.new_key('__VERSION__') 960 | version_key.set_metadata('git-version', version) 961 | version_key.set_contents_from_string('') 962 | 963 | print '-----> Setting up redirects' 964 | app_redirects = app.config.get('redirects', {}) 965 | if len(app_redirects) == 0: 966 | print ' No redirects.' 967 | else: 968 | def get_or_new_key(bucket, name): 969 | key = bucket.get_key(name) 970 | if key is not None: 971 | key.delete() 972 | return bucket.new_key(name) 973 | elb = boto.connect_elb() 974 | pybars_compiler = pybars.Compiler() 975 | for key_name, redirect_source in app_redirects.iteritems(): 976 | redirect_template = pybars_compiler.compile(redirect_source) 977 | app_redirects[key_name] = redirect_template 978 | data = { 979 | 'webui_dns': elb.get_all_load_balancers(load_balancer_names=['{}-web-ui'.format(env_name)])[0].dns_name 980 | } 981 | for key_name, redirect_template in app_redirects.iteritems(): 982 | k = get_or_new_key(b, key_name) 983 | redirect = unicode(redirect_template(data)) 984 | print ' Redirect {} to {}'.format(key_name, redirect) 985 | k.set_redirect(redirect) 986 | 987 | print '=====> Deployed to {}!'.format(b.get_website_endpoint()) 988 | 989 | if domain is not None: 990 | 991 | # TODO: support redirection from www. 992 | # b_www = 'www.{}'.format(bucket_name) 993 | 994 | ec2 = boto.connect_ec2() 995 | region_name = first([z.region.name for z in ec2.get_all_zones() if z.name == config['availability_zone']]) 996 | s3_website_region = s3_website_regions[region_name] 997 | 998 | route53 = boto.connect_route53() 999 | zone_name = "{}.".format(get_tld("http://{}".format(domain))) 1000 | zone = route53.get_zone(zone_name) 1001 | if zone is None: 1002 | raise Exception("Cannot find zone {}".format(zone_name)) 1003 | full_domain = "{}.".format(domain) 1004 | a_record = zone.get_a(full_domain) 1005 | if not a_record: 1006 | print '-----> Creating ALIAS for {} to S3'.format(full_domain) 1007 | changes = ResourceRecordSets(route53, zone.id) 1008 | change_a = changes.add_change('CREATE', full_domain, 'A') 1009 | change_a.set_alias(alias_hosted_zone_id=s3_website_region[1], alias_dns_name=s3_website_region[0]) 1010 | #change_cname = records.add_change('CREATE', 'www.' + full_domain, 'CNAME') 1011 | #change_cname.add_value(b_www.get_website_endpoint()) 1012 | changes.commit() 1013 | else: 1014 | print '-----> ALIAS for {} to S3 already exists'.format(full_domain) 1015 | print ' {}'.format(a_record) 1016 | if a_record.alias_dns_name != s3_website_region[0]: 1017 | print ' WARNING: Alias DNS name is {}, but should be {}'.format(a_record.alias_dns_name, s3_website_region[0]) 1018 | if a_record.alias_hosted_zone_id != s3_website_region[1]: 1019 | print ' WARNING: Alias hosted zone ID is {}, but should be {}'.format(a_record.alias_hosted_zone_id, s3_website_region[1]) 1020 | if a_record.name != full_domain: 1021 | print ' WARNING: Domain is {}, but should be {}'.format(a_record.name, full_domain) 1022 | if a_record.type != 'A': 1023 | print ' WARNING: Record type is {}, but should be {}'.format(a_record.type, 'A') 1024 | 1025 | print '=====> DONE!' 1026 | 1027 | def get_ssh_command(inst_id): 1028 | ec2 = boto.connect_ec2() 1029 | instance = ec2.get_only_instances(instance_ids=[inst_id])[0] 1030 | pem_filename = get_pem_filename(config['instance_key_pair_name']) 1031 | return 'ssh -i {} {}@{}'.format(pem_filename, config['instance_user'], instance.public_dns_name) 1032 | 1033 | def get_dokku_command(inst_id, cmd): 1034 | ec2 = boto.connect_ec2() 1035 | instance = ec2.get_only_instances(instance_ids=[inst_id])[0] 1036 | return 'ssh {}@{} {}'.format(config['deploy_user'], instance.public_dns_name, cmd) 1037 | 1038 | def do_ssh(inst_id): 1039 | if inst_id == None: 1040 | inst_id = get_info(None, None)[0]['instance_id'] 1041 | local(get_ssh_command(inst_id)) 1042 | 1043 | def dokku(inst_id, cmd): 1044 | local(get_dokku_command(inst_id, cmd)) 1045 | 1046 | def get_info(app_name, env_name): 1047 | ec2 = boto.connect_ec2() 1048 | elb = boto.connect_elb() 1049 | lb = None 1050 | app = App(env_name, app_name) 1051 | try: 1052 | lbresult = elb.get_all_load_balancers(load_balancer_names=['{}-{}'.format(env_name, app_name)]) 1053 | lb = lbresult[0] if len(lbresult) > 0 else None 1054 | except boto.exception.BotoServerError: 1055 | pass 1056 | if lb is None: 1057 | instances = ec2.get_only_instances(filters={'tag:app': '{}/{}'.format(app.env_name, app.repo.name), 1058 | 'tag:deployed': 'true', 1059 | 'instance-state-name': 'running'}) 1060 | datas = [{'instance_id': instance.id, 1061 | 'public_dns_name': instance.public_dns_name, 1062 | 'ssh_command': get_ssh_command(instance.id)} for instance in instances] 1063 | for data in datas: 1064 | print '-----> Instance {}'.format(data['instance_id']) 1065 | print ' DNS: {}'.format(data['public_dns_name']) 1066 | print ' SSH: {}'.format(data['ssh_command']) 1067 | if len(datas) == 0: 1068 | print 'No deployment found' 1069 | sys.exit(1) 1070 | else: 1071 | return datas 1072 | print '-----> Load Balancer' 1073 | print ' Name: {}'.format(lb.name) 1074 | print ' DNS: {}'.format(lb.dns_name) 1075 | i = 0 1076 | for inst in lb.get_instance_health(): 1077 | i += 1 1078 | print '-----> Instance #{}'.format(i) 1079 | inst_id = inst.instance_id 1080 | print ' ID: {}'.format(inst_id) 1081 | print ' State: {}'.format(inst.state) 1082 | instance = ec2.get_only_instances(instance_ids=[inst_id])[0] 1083 | print ' DNS: {}'.format(instance.public_dns_name) 1084 | print ' SSH: {}'.format(get_ssh_command(inst_id)) 1085 | if i == 0: 1086 | print '-----> Instances' 1087 | print ' None' 1088 | 1089 | def get_url(app_name, env_name): 1090 | protocol = 'http://' # TODO detect if load balancer supports HTTPS 1091 | app = App(env_name, app_name) 1092 | bucket_name = '{}-{}'.format(config.get('system_name', uuid.uuid1().hex), app.repo.name) 1093 | 1094 | ec2 = boto.connect_ec2() 1095 | elb = boto.connect_elb() 1096 | s3 = boto.connect_s3() 1097 | 1098 | b = s3.lookup(bucket_name) 1099 | if b is not None: 1100 | return protocol + b.get_website_endpoint() 1101 | 1102 | lb = None 1103 | try: 1104 | lbresult = elb.get_all_load_balancers(load_balancer_names=['{}-{}'.format(app.env_name, app.repo.name)]) 1105 | lb = lbresult[0] if len(lbresult) > 0 else None 1106 | except boto.exception.BotoServerError: 1107 | pass 1108 | if lb is None: 1109 | instances = ec2.get_only_instances(filters={'tag:app': app.name, 1110 | 'tag:deployed': 'true', 1111 | 'instance-state-name': 'running'}) 1112 | if len(instances) != 1: 1113 | return None 1114 | else: 1115 | return protocol + instances[0].public_dns_name 1116 | return protocol + lb.dns_name 1117 | 1118 | def is_static(arguments): 1119 | return arguments['--static'] or os.path.exists(join(getcwd(), '.s3')) 1120 | 1121 | def open_url(app_name, env_name): 1122 | import webbrowser 1123 | url = get_url(app_name, env_name) 1124 | if not url: 1125 | print "=====> Error: No running application found." 1126 | return False 1127 | print '-----> Opening {}'.format(url) 1128 | webbrowser.open_new(url) 1129 | 1130 | def initialize_configuration(show_output=False): 1131 | global config 1132 | global initialized 1133 | if not initialized: 1134 | config = riker_config.load_config(show_output) 1135 | env.user = config['instance_user'] 1136 | env.use_ssh_config = True 1137 | initialized = True 1138 | --------------------------------------------------------------------------------