├── nubo ├── __init__.py ├── clouds │ ├── __init__.py │ ├── rackspace.py │ ├── linode.py │ ├── opennebula.py │ ├── digitalocean.py │ ├── ec2.py │ └── base.py ├── config.py └── remote.py ├── docs ├── index.rst ├── Makefile └── conf.py ├── MANIFEST.in ├── requirements.txt ├── .travis.yml ├── .gitignore ├── Makefile ├── setup.py ├── LICENSE ├── tests.py ├── scripts └── nubo └── README.rst /nubo/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /nubo/clouds/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | ../README.rst -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | include LICENSE 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | apache-libcloud==0.12.3 2 | argparse==1.2.1 3 | paramiko==1.10.1 4 | pycrypto==2.6 5 | texttable==0.8.1 6 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - 2.6 4 | - 2.7 5 | - pypy 6 | install: 7 | - pip install -r requirements.txt --use-mirrors 8 | - pip install coveralls 9 | - python setup.py install 10 | script: 11 | - coverage run --source=nubo tests.py 12 | after_success: 13 | - coveralls 14 | notifications: 15 | email: 16 | - ema@linux.it 17 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | *.py[cod] 2 | 3 | # C extensions 4 | *.so 5 | 6 | # Packages 7 | *.egg 8 | *.egg-info 9 | dist 10 | build 11 | eggs 12 | parts 13 | bin 14 | var 15 | sdist 16 | develop-eggs 17 | .installed.cfg 18 | lib 19 | lib64 20 | __pycache__ 21 | 22 | # Installer logs 23 | pip-log.txt 24 | 25 | # Unit test / coverage reports 26 | .coverage 27 | .tox 28 | nosetests.xml 29 | 30 | # Translations 31 | *.mo 32 | 33 | -------------------------------------------------------------------------------- /nubo/config.py: -------------------------------------------------------------------------------- 1 | import os 2 | import json 3 | 4 | CONFFILE = os.path.join(os.getenv('HOME'), '.nuborc') 5 | 6 | def read_config(): 7 | try: 8 | return json.loads(open(CONFFILE).read()) 9 | except IOError: 10 | return {} 11 | 12 | def write_config(values): 13 | old_values = read_config() 14 | 15 | updated = dict(old_values.items() + values.items()) 16 | open(CONFFILE, 'w').write(json.dumps(updated, indent=4)) 17 | 18 | os.chmod(CONFFILE, 0600) 19 | return updated 20 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | # 2 | 3 | PYTHON=`which python` 4 | COVERAGE=`which python-coverage` 5 | 6 | all: 7 | @echo "make source - Create source package" 8 | @echo "make install - Install on local system" 9 | @echo "make clean - Get rid of scratch and byte files" 10 | @echo "make test - Run unit tests and generate coverage report" 11 | @echo "make upload - Build and upload a new version to pypi" 12 | 13 | source: 14 | $(PYTHON) setup.py sdist 15 | 16 | install: 17 | $(PYTHON) setup.py install 18 | 19 | clean: 20 | $(PYTHON) setup.py clean 21 | rm -rf build/ dist/ nubo.egg-info/ MANIFEST 22 | find . -name '*.pyc' -delete 23 | 24 | test: 25 | $(COVERAGE) run --source=nubo tests.py 26 | $(COVERAGE) report -m 27 | 28 | upload: 29 | $(PYTHON) setup.py sdist bdist_egg upload 30 | -------------------------------------------------------------------------------- /nubo/remote.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.remote 5 | =========== 6 | 7 | Execute remote commands via SSH. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | import paramiko 13 | 14 | ssh = paramiko.SSHClient() 15 | ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 16 | 17 | class RemoteHost(object): 18 | def __init__(self, host, private_key): 19 | self.host = host 20 | self.private_key = private_key 21 | 22 | def run_command(self, command, user='root'): 23 | """Execute the given command as 'user' on the given host. 24 | 25 | Return stdout, stderr 26 | """ 27 | ssh.connect(self.host, username=user, key_filename=self.private_key) 28 | _, stdout, stderr = ssh.exec_command(command) 29 | return stdout.read(), stderr.read() 30 | 31 | def whoami(self, user='root'): 32 | return self.run_command("whoami", user)[0].rstrip('\n') 33 | 34 | if __name__ == "__main__": 35 | host1 = RemoteHost('192.168.122.6', '/home/ema/.ssh/id_rsa') 36 | print host1.run_command('uptime')[0] 37 | -------------------------------------------------------------------------------- /nubo/clouds/rackspace.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.clouds.rackspace 5 | ===================== 6 | 7 | Support deployments on Rackspace cloud. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | from libcloud.compute.deployment import MultiStepDeployment 13 | from libcloud.compute.deployment import ScriptDeployment 14 | from libcloud.compute.deployment import SSHKeyDeployment 15 | 16 | from nubo.clouds.base import BaseCloud, node2dict 17 | 18 | class Rackspace(BaseCloud): 19 | 20 | PROVIDER_NAME = 'RACKSPACE' 21 | 22 | def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): 23 | """Rackspace supports libcloud's `libcloud.compute.deployment`. 24 | 25 | Pass an `SSHKeyDeployment` to `self.driver.deploy_node`.""" 26 | sd = SSHKeyDeployment(open(self.ssh_public_key).read()) 27 | script = ScriptDeployment("/bin/true") # NOP 28 | msd = MultiStepDeployment([sd, script]) 29 | 30 | class Image: 31 | id = image_id 32 | 33 | size = self.driver.list_sizes()[size_idx] 34 | location = self.driver.list_locations()[location_idx] 35 | 36 | return node2dict(self.driver.deploy_node(name=name, image=Image, 37 | size=size, location=location, deploy=msd)) 38 | -------------------------------------------------------------------------------- /nubo/clouds/linode.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.clouds.Linode 5 | ===================== 6 | 7 | Support deployments on Linode. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | from libcloud.compute.deployment import MultiStepDeployment 13 | from libcloud.compute.deployment import ScriptDeployment 14 | from libcloud.compute.deployment import SSHKeyDeployment 15 | 16 | from nubo.clouds.base import BaseCloud, node2dict 17 | 18 | class Linode(BaseCloud): 19 | 20 | PROVIDER_NAME = 'LINODE' 21 | NEEDED_PARAMS = ['key'] 22 | 23 | def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): 24 | """Linode supports libcloud's `libcloud.compute.deployment`. 25 | 26 | Pass an `SSHKeyDeployment` to `self.driver.deploy_node`.""" 27 | sd = SSHKeyDeployment(open(self.ssh_public_key).read()) 28 | script = ScriptDeployment("/bin/true") # NOP 29 | msd = MultiStepDeployment([sd, script]) 30 | 31 | class Image: 32 | id = image_id 33 | 34 | size = self.driver.list_sizes()[size_idx] 35 | location = self.driver.list_locations()[location_idx] 36 | 37 | return node2dict(self.driver.deploy_node(name=name, image=Image, 38 | size=size, location=location, deploy=msd)) 39 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | """ 2 | nubo 3 | ---- 4 | An easy way to deploy Linux VMs on different cloud providers. 5 | 6 | Links 7 | ````` 8 | * `GitHub Repository `_ 9 | * `Development Version 10 | `_ 11 | """ 12 | 13 | from setuptools import setup 14 | 15 | install_requires = [ 16 | 'setuptools', 17 | 'apache-libcloud', 18 | 'paramiko', 19 | 'texttable' 20 | ] 21 | 22 | try: 23 | import importlib 24 | except ImportError: 25 | install_requires.append('importlib') 26 | 27 | setup( 28 | name='nubo', 29 | version='0.7', 30 | url='http://pythonhosted.org/nubo', 31 | license='BSD', 32 | author='Emanuele Rocca', 33 | author_email='ema@linux.it', 34 | description='Virtual Machine deployments on multiple cloud providers', 35 | long_description=__doc__, 36 | install_requires=install_requires, 37 | packages=['nubo', 'nubo.clouds'], 38 | scripts=['scripts/nubo'], 39 | classifiers=[ 40 | 'Development Status :: 4 - Beta', 41 | 'Environment :: Console', 42 | 'License :: OSI Approved :: BSD License', 43 | 'Programming Language :: Python', 44 | 'Intended Audience :: Developers', 45 | 'Intended Audience :: System Administrators', 46 | 'Topic :: Internet', 47 | 'Topic :: System', 48 | ], 49 | keywords='cloud vm startup devops ec2 rackspace linode', 50 | ) 51 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright (C) 2013 by Emanuele Rocca. 2 | 3 | Some rights reserved. 4 | 5 | Redistribution and use in source and binary forms, with or without 6 | modification, are permitted provided that the following conditions 7 | are met: 8 | 9 | * Redistributions of source code must retain the above copyright 10 | notice, this list of conditions and the following disclaimer. 11 | 12 | * Redistributions in binary form must reproduce the above 13 | copyright notice, this list of conditions and the following 14 | disclaimer in the documentation and/or other materials provided 15 | with the distribution. 16 | 17 | * The names of the contributors may not be used to endorse or 18 | promote products derived from this software without specific 19 | prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | -------------------------------------------------------------------------------- /nubo/clouds/opennebula.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.clouds.opennebula 5 | ====================== 6 | 7 | Support deployments on OpenNebula private clouds. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | from nubo.clouds.base import BaseCloud 13 | from nubo.clouds.base import AVAILABLE_CLOUDS, CLOUDS_MAPPING 14 | 15 | class OpenNebula(BaseCloud): 16 | 17 | PROVIDER_NAME = 'OPENNEBULA' 18 | NEEDED_PARAMS = [ 'key', 'secret', 'host', 'port', 'network_id', 'api_version' ] 19 | 20 | def __init__(self, ssh_private_key=None, login_as='root'): 21 | self.network_id = AVAILABLE_CLOUDS[ 22 | CLOUDS_MAPPING['OPENNEBULA']].pop('network_id') 23 | BaseCloud.__init__(self, ssh_private_key) 24 | 25 | def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): 26 | script = """#!/bin/bash 27 | dhclient eth0 28 | 29 | # assiging IP 30 | . /mnt/context.sh 31 | ip addr add dev eth0 $IP_PUBLIC 32 | 33 | # removing IP obtained from DHCP 34 | ip addr del dev eth0 `ip addr show dev eth0 | awk '/inet 192/ { print $2 ; exit }'` 35 | 36 | # adding ssh_key_file 37 | mkdir ~%s/.ssh || true 38 | cat <~%s/.ssh/authorized_keys 39 | %s 40 | EOF 41 | """ % (self.login_as, self.login_as, open(self.ssh_public_key).read()) 42 | 43 | size = self.driver.list_sizes()[size_idx] 44 | 45 | class Image: 46 | id = image_id 47 | 48 | class Network: 49 | id = self.network_id 50 | address = None 51 | 52 | context = { 53 | 'USERDATA': script.encode('hex'), 54 | 'IP_PUBLIC': '$NIC[IP]' 55 | } 56 | return self.startup({ 'size': size, 'image': Image, 57 | 'networks': Network, 'name': name, 58 | 'context': context }) 59 | -------------------------------------------------------------------------------- /nubo/clouds/digitalocean.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.clouds.digitalocean 5 | ======================== 6 | 7 | Support deployments on DigitalOcean cloud. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | from nubo.clouds.base import BaseCloud 13 | 14 | class DigitalOcean(BaseCloud): 15 | 16 | PROVIDER_NAME = 'DIGITAL_OCEAN' 17 | 18 | def get_ssh_key_id(self): 19 | """Return uploaded key id if this SSH public key has been already 20 | submitted to Digital Ocean. We use libcloud's 21 | `driver.ex_list_ssh_keys` in order to find it out. 22 | 23 | Return None if the SSH key still has to be uploaded.""" 24 | uploaded_key = [ key.id for key in self.driver.ex_list_ssh_keys() 25 | if key.name == self.ssh_key_name ] 26 | 27 | if uploaded_key: 28 | return str(uploaded_key[0]) 29 | 30 | def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): 31 | """Digital Ocean needs the following information: VM size, image, name, 32 | location and SSH key id. 33 | 34 | First, we check if our SSH key is already uploaded on Digital Ocean's 35 | cloud. If not, we upload it using libcloud's `driver.ex_create_ssh_key`. 36 | Then, we call `self.startup` with the required arguments.""" 37 | key_id = self.get_ssh_key_id() 38 | 39 | if not key_id: 40 | uploaded_key = self.driver.ex_create_ssh_key(self.ssh_key_name, 41 | open(self.ssh_public_key).read()) 42 | 43 | key_id = str(uploaded_key.id) 44 | 45 | class Image: 46 | id = image_id 47 | 48 | size = self.driver.list_sizes()[size_idx] 49 | location = self.driver.list_locations()[location_idx] 50 | 51 | return self.startup({ 52 | 'size': size, 'image': Image, 'name': name, 53 | 'location': location, 'ex_ssh_key_ids': [ key_id ] 54 | }) 55 | -------------------------------------------------------------------------------- /nubo/clouds/ec2.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.clouds.ec2 5 | =============== 6 | 7 | Support deployments on Amazon EC2. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | from nubo.clouds.base import BaseCloud 13 | 14 | class AmazonEC2(BaseCloud): 15 | 16 | PROVIDER_NAME = 'EC2_US_EAST' 17 | 18 | def get_ssh_key_id(self): 19 | """Return uploaded key id if this SSH public key has been already 20 | submitted to Amazon EC2. We use libcloud's `driver.ex_describe_keypairs` 21 | in order to find it out. 22 | 23 | Return None if the SSH key still has to be uploaded.""" 24 | try: 25 | key = self.driver.ex_describe_keypairs(self.ssh_key_name) 26 | return key['keyName'] 27 | except Exception: 28 | # This key has not been uploaded yet 29 | return 30 | 31 | def list_images(self, limit=20, keyword=''): 32 | """Amazon also returns kernel-related info in `driver.list_images`. We 33 | do not care about kernels here, only about bootable VM images (AMIs). 34 | 35 | First, we get the list of available AMIs. Then, we search for the 36 | user-specified keyword (if any). 37 | 38 | Only 20 results are returned by default to avoid flooding users with 39 | too much output.""" 40 | ami_images = [ image for image in self.driver.list_images() 41 | if 'ami-' in image.id ] 42 | 43 | if not limit: 44 | limit = 20 45 | 46 | return [ image for image in ami_images 47 | if (keyword.lower() in image.name.lower()) or not keyword ][:limit] 48 | 49 | def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): 50 | """Amazon EC2 needs the following information: VM size, image, name, 51 | location, SSH key name and security group name. 52 | 53 | First, we check if our SSH key is already uploaded on Amazon's cloud. 54 | If not, we upload it using libcloud's `driver.ex_import_keypair`. 55 | 56 | Then, we create a permissive Security Group with 57 | `driver.ex_create_security_group` and 58 | `driver.ex_authorize_security_group_permissive`. 59 | 60 | Finally, we call `self.startup` with the required arguments.""" 61 | # Uploading SSH key if necessary 62 | key_id = self.get_ssh_key_id() 63 | 64 | if not key_id: 65 | key = self.driver.ex_import_keypair(self.ssh_key_name, 66 | self.ssh_public_key) 67 | 68 | key_id = key['keyName'] 69 | 70 | # Creating security group if necessary 71 | if __name__ not in self.driver.ex_list_security_groups(): 72 | self.driver.ex_create_security_group(__name__, "nubolib's SG") 73 | self.driver.ex_authorize_security_group_permissive(__name__) 74 | 75 | class Image: 76 | id = image_id 77 | 78 | size = self.driver.list_sizes()[size_idx] 79 | location = self.driver.list_locations()[location_idx] 80 | 81 | return self.startup({ 82 | 'size': size, 'image': Image, 'name': name, 83 | 'location': location, 'ex_keyname': key_id, 84 | 'ex_securitygroup': __name__ 85 | }) 86 | -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line. 5 | SPHINXOPTS = 6 | SPHINXBUILD = sphinx-build 7 | PAPER = 8 | BUILDDIR = _build 9 | 10 | # Internal variables. 11 | PAPEROPT_a4 = -D latex_paper_size=a4 12 | PAPEROPT_letter = -D latex_paper_size=letter 13 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . 14 | 15 | .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest 16 | 17 | help: 18 | @echo "Please use \`make ' where is one of" 19 | @echo " html to make standalone HTML files" 20 | @echo " dirhtml to make HTML files named index.html in directories" 21 | @echo " singlehtml to make a single large HTML file" 22 | @echo " pickle to make pickle files" 23 | @echo " json to make JSON files" 24 | @echo " htmlhelp to make HTML files and a HTML help project" 25 | @echo " qthelp to make HTML files and a qthelp project" 26 | @echo " devhelp to make HTML files and a Devhelp project" 27 | @echo " epub to make an epub" 28 | @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" 29 | @echo " latexpdf to make LaTeX files and run them through pdflatex" 30 | @echo " text to make text files" 31 | @echo " man to make manual pages" 32 | @echo " changes to make an overview of all changed/added/deprecated items" 33 | @echo " linkcheck to check all external links for integrity" 34 | @echo " doctest to run all doctests embedded in the documentation (if enabled)" 35 | 36 | clean: 37 | -rm -rf $(BUILDDIR)/* 38 | 39 | html: 40 | $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html 41 | @echo 42 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." 43 | 44 | dirhtml: 45 | $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml 46 | @echo 47 | @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." 48 | 49 | singlehtml: 50 | $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml 51 | @echo 52 | @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." 53 | 54 | pickle: 55 | $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle 56 | @echo 57 | @echo "Build finished; now you can process the pickle files." 58 | 59 | json: 60 | $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json 61 | @echo 62 | @echo "Build finished; now you can process the JSON files." 63 | 64 | htmlhelp: 65 | $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp 66 | @echo 67 | @echo "Build finished; now you can run HTML Help Workshop with the" \ 68 | ".hhp project file in $(BUILDDIR)/htmlhelp." 69 | 70 | qthelp: 71 | $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp 72 | @echo 73 | @echo "Build finished; now you can run "qcollectiongenerator" with the" \ 74 | ".qhcp project file in $(BUILDDIR)/qthelp, like this:" 75 | @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/Flask-OAuth.qhcp" 76 | @echo "To view the help file:" 77 | @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/Flask-OAuth.qhc" 78 | 79 | devhelp: 80 | $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp 81 | @echo 82 | @echo "Build finished." 83 | @echo "To view the help file:" 84 | @echo "# mkdir -p $$HOME/.local/share/devhelp/Flask-OAuth" 85 | @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/Flask-OAuth" 86 | @echo "# devhelp" 87 | 88 | epub: 89 | $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub 90 | @echo 91 | @echo "Build finished. The epub file is in $(BUILDDIR)/epub." 92 | 93 | latex: 94 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 95 | @echo 96 | @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." 97 | @echo "Run \`make all-pdf' or \`make all-ps' in that directory to" \ 98 | "run these through (pdf)latex." 99 | 100 | latexpdf: latex 101 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex 102 | @echo "Running LaTeX files through pdflatex..." 103 | make -C $(BUILDDIR)/latex all-pdf 104 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." 105 | 106 | text: 107 | $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text 108 | @echo 109 | @echo "Build finished. The text files are in $(BUILDDIR)/text." 110 | 111 | man: 112 | $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man 113 | @echo 114 | @echo "Build finished. The manual pages are in $(BUILDDIR)/man." 115 | 116 | changes: 117 | $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes 118 | @echo 119 | @echo "The overview file is in $(BUILDDIR)/changes." 120 | 121 | linkcheck: 122 | $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck 123 | @echo 124 | @echo "Link check complete; look for any errors in the above output " \ 125 | "or in $(BUILDDIR)/linkcheck/output.txt." 126 | 127 | doctest: 128 | $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest 129 | @echo "Testing of doctests in the sources finished, look at the " \ 130 | "results in $(BUILDDIR)/doctest/output.txt." 131 | -------------------------------------------------------------------------------- /tests.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | from nubo import config 4 | from nubo import remote 5 | 6 | from nubo.clouds import base 7 | 8 | from nubo.clouds.ec2 import AmazonEC2 9 | from nubo.clouds.rackspace import Rackspace 10 | from nubo.clouds.opennebula import OpenNebula 11 | from nubo.clouds.digitalocean import DigitalOcean 12 | from nubo.clouds.linode import Linode 13 | 14 | import unittest 15 | import tempfile 16 | 17 | from os import getenv, unlink 18 | from os.path import join 19 | 20 | class DummyCloud(base.BaseCloud): 21 | """Dummy cloud using the DUMMY libcloud provider""" 22 | PROVIDER_NAME = 'DUMMY' 23 | 24 | class BaseTest(unittest.TestCase): 25 | 26 | def test_supported_clouds(self): 27 | self.failUnless(base.supported_clouds()) 28 | 29 | for cloud in base.supported_clouds(): 30 | self.assertEquals(str, type(cloud)) 31 | 32 | def __test_get_cloud(self, provider_name, cloud_class): 33 | cloud = base.get_cloud(provider_name) 34 | self.assertEquals(cloud_class, cloud) 35 | self.assertEquals(provider_name, cloud.PROVIDER_NAME) 36 | 37 | def test_get_cloud(self): 38 | self.__test_get_cloud('EC2_AP_SOUTHEAST2', AmazonEC2) 39 | self.__test_get_cloud('OPENNEBULA', OpenNebula) 40 | self.__test_get_cloud('DIGITAL_OCEAN', DigitalOcean) 41 | self.__test_get_cloud('RACKSPACE', Rackspace) 42 | 43 | def test_read_config(self): 44 | self.assertEquals(dict, type(config.read_config())) 45 | 46 | def test_read_no_conffile(self): 47 | oldfile = config.CONFFILE 48 | 49 | config.CONFFILE = '/does_not_exist' 50 | self.assertEquals({}, config.read_config()) 51 | 52 | config.CONFFILE = oldfile 53 | 54 | def test_write_config_no_new_values(self): 55 | old_config = config.read_config() 56 | new_config = config.write_config({}) 57 | self.assertEquals(old_config, new_config) 58 | 59 | def test_remote_object(self): 60 | host = remote.RemoteHost('127.0.0.1', 'dummy') 61 | self.assertEquals(host.host, '127.0.0.1') 62 | self.assertEquals(host.private_key, 'dummy') 63 | 64 | class BaseCloudTest(unittest.TestCase): 65 | 66 | def setUp(self): 67 | base.CLOUDS_MAPPING['DUMMY'] = 'tests.DummyCloud' 68 | base.AVAILABLE_CLOUDS = { 69 | 'tests.DummyCloud': { 'creds': '', } 70 | } 71 | 72 | # Write dummy private key file 73 | self.privkey = tempfile.mkstemp()[1] 74 | 75 | # Write dummy public key file 76 | self.pubkey = self.privkey + '.pub' 77 | open(self.pubkey, 'w').write('') 78 | 79 | self.CloudClass = base.get_cloud('DUMMY') 80 | self.cloud = self.CloudClass(ssh_private_key=self.privkey) 81 | 82 | def tearDown(self): 83 | unlink(self.privkey) 84 | unlink(self.pubkey) 85 | 86 | def test_test_conn(self): 87 | self.failUnless(self.CloudClass.test_conn(creds='')) 88 | 89 | def test_init(self): 90 | self.assertEquals(self.cloud.ssh_public_key, 91 | self.cloud.ssh_private_key + '.pub') 92 | 93 | self.assertEquals('root', self.cloud.login_as) 94 | 95 | def test_init_wrong_provider_name(self): 96 | self.CloudClass.PROVIDER_NAME = 'WRONG_PROVIDER' 97 | self.assertRaises(Exception, self.CloudClass) 98 | 99 | def test_startup(self): 100 | # Patching RemoteHost.run_command: we do not want to actually ssh into 101 | # the fake server 102 | remote.RemoteHost.run_command = lambda x, y, z: ('root', '') 103 | 104 | new_node = self.cloud.startup({}) 105 | 106 | self.assertEquals(dict, type(new_node)) 107 | self.assertEquals('RUNNING', new_node['state']) 108 | 109 | def test_node2dict(self): 110 | node = self.cloud.driver.list_nodes()[0] 111 | expected = { 112 | 'extra': {'foo': 'bar'}, 113 | 'id': '1', 114 | 'name': 'dummy-1', 115 | 'private_ips': [], 116 | 'public_ips': ['127.0.0.1'], 117 | 'state': 'RUNNING' 118 | } 119 | self.assertEquals(expected, base.node2dict(node)) 120 | 121 | def test_is_running(self): 122 | self.failUnless(self.cloud.is_running(node_id='1')) 123 | self.failIf(self.cloud.is_running(node_id='42')) 124 | 125 | def test_shutdown(self): 126 | # node 42 is not running. shutdown should return False 127 | self.failIf(self.cloud.shutdown(node_id='42')) 128 | 129 | # this one does not work, libcloud's DUMMY driver is a bit outdated 130 | #self.failUnless(cloud.shutdown(node_id='1')) 131 | 132 | def test_reboot(self): 133 | # node 42 is not running. reboot should return False 134 | self.failIf(self.cloud.reboot(node_id='42')) 135 | 136 | # node 1 is running. reboot should return True 137 | self.failUnless(self.cloud.reboot(node_id='1')) 138 | 139 | def test_list_nodes(self): 140 | nodes = self.cloud.list_nodes() 141 | self.assertEquals(2, len(nodes)) 142 | 143 | for node in nodes: 144 | self.assertEquals('RUNNING', node['state']) 145 | 146 | def test_list_images(self): 147 | images = self.cloud.list_images() 148 | 149 | self.assertEquals(3, len(images)) 150 | 151 | self.assertEquals('Ubuntu 9.10', images[0].name) 152 | self.assertEquals('Ubuntu 9.04', images[1].name) 153 | self.assertEquals('Slackware 4', images[2].name) 154 | 155 | def test_list_images_with_limit(self): 156 | images = self.cloud.list_images(limit=2) 157 | self.assertEquals(2, len(images)) 158 | self.assertEquals('Ubuntu 9.10', images[0].name) 159 | self.assertEquals('Ubuntu 9.04', images[1].name) 160 | 161 | def test_list_images_with_keyword(self): 162 | images = self.cloud.list_images(keyword='slackware') 163 | self.assertEquals(1, len(images)) 164 | self.assertEquals('Slackware 4', images[0].name) 165 | 166 | def test_list_sizes(self): 167 | sizes = self.cloud.list_sizes() 168 | expected = [ 'Small', 'Medium', 'Big', 'XXL Big'] 169 | self.assertEquals(expected, sizes) 170 | 171 | def test_deploy_on_base_class(self): 172 | base.BaseCloud.PROVIDER_NAME = 'DUMMY' 173 | cloud = base.BaseCloud(ssh_private_key=self.privkey) 174 | self.assertRaises(NotImplementedError, cloud.deploy, ['']) 175 | 176 | if __name__ == "__main__": 177 | unittest.main() 178 | -------------------------------------------------------------------------------- /scripts/nubo: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # -*- coding: utf-8 -*- 3 | 4 | """ 5 | nubo 6 | ==== 7 | 8 | CLI interface to multiple cloud providers. 9 | 10 | :copyright: (C) 2013 by Emanuele Rocca. 11 | """ 12 | 13 | import os 14 | import sys 15 | import argparse 16 | import readline 17 | 18 | from nubo.config import write_config, read_config 19 | from nubo.clouds.base import supported_clouds, get_cloud, CLOUDS_MAPPING 20 | 21 | from texttable import Texttable 22 | 23 | def rlinput(prompt, prefill='', default=None): 24 | readline.set_startup_hook(lambda: readline.insert_text(prefill)) 25 | try: 26 | prompt = "{} [Default: {}]:".format(prompt, default or "") 27 | 28 | return raw_input(prompt) or default 29 | except (KeyboardInterrupt, EOFError): 30 | print 31 | sys.exit(0) 32 | finally: 33 | readline.set_startup_hook() 34 | 35 | def print_table(rows): 36 | table = Texttable() 37 | table.set_deco(Texttable.HEADER) 38 | table.add_rows(rows) 39 | print table.draw() 40 | 41 | def config(args): 42 | sclouds = supported_clouds() 43 | sclouds.sort() 44 | 45 | for idx, cloud in enumerate(sclouds): 46 | print "% 2s" % (1 + idx), cloud 47 | 48 | try: 49 | cloudidx = int(rlinput( 50 | "Please choose the cloud provider you want to setup [1-%d] " % 51 | len(sclouds))) 52 | 53 | Cloud = get_cloud(sclouds[cloudidx - 1]) 54 | except (IndexError, ValueError): 55 | return config(args) 56 | 57 | values = {} 58 | for what in Cloud.NEEDED_PARAMS: 59 | try: 60 | oldval = read_config()[CLOUDS_MAPPING[Cloud.PROVIDER_NAME]][what] 61 | except (IndexError, KeyError): 62 | oldval = '' 63 | values[what] = rlinput('Please provide your API %s: ' % what, oldval) 64 | 65 | privkey = rlinput( 66 | "Please enter the location of your Private SSH Key", 67 | default="~/.ssh/id_rsa" 68 | ) 69 | 70 | if Cloud.test_conn(**values): 71 | write_config({ 72 | CLOUDS_MAPPING[Cloud.PROVIDER_NAME]: values, 73 | "nubo": { 74 | "privkey": privkey 75 | } 76 | }) 77 | print Cloud.PROVIDER_NAME, "cloud configured properly" 78 | return 79 | else: 80 | print "\nE: Invalid Credentials\n" 81 | return config(args) 82 | 83 | def clouds(args): 84 | for cl in supported_clouds(): 85 | print cl 86 | 87 | def images(args): 88 | Cloud = get_cloud() 89 | 90 | images = Cloud().list_images(keyword=args.keyword, limit=args.limit) 91 | 92 | print len(images), "images available on", Cloud.PROVIDER_NAME 93 | 94 | if not images: 95 | return 96 | 97 | rows = [ [ 'id', 'name', ] ] 98 | for img in images: 99 | rows.append([ img.id, img.name ]) 100 | 101 | print_table(rows) 102 | 103 | def sizes(args): 104 | Cloud = get_cloud() 105 | 106 | rows = [ [ 'id', 'name', ] ] 107 | 108 | for idx, name in enumerate(Cloud().list_sizes()): 109 | rows.append([ idx, name ]) 110 | 111 | print_table(rows) 112 | 113 | def list_(args): 114 | Cloud = get_cloud() 115 | 116 | nodes = Cloud().list_nodes() 117 | print len(nodes), "VMs running on", Cloud.PROVIDER_NAME 118 | 119 | if not nodes: 120 | return 121 | 122 | rows = [ [ 'id', 'name', 'state', 'ip' ] ] 123 | for node in nodes: 124 | rows.append([ node['id'], 125 | node['name'], 126 | node['state'], 127 | ', '.join(node['public_ips']) ]) 128 | 129 | print_table(rows) 130 | 131 | def start(args): 132 | CloudClass = get_cloud() 133 | 134 | cloud = CloudClass(ssh_private_key=args.privkey, login_as=args.user) 135 | 136 | vm = cloud.deploy(image_id=args.imageid, size_idx=args.sizeid, 137 | name=args.name) 138 | 139 | print "Instance %s available on %s. Login as %s@%s" % ( 140 | vm['id'], os.getenv('NUBO_CLOUD'), args.user, ', '.join(vm['public_ips'])) 141 | 142 | def reboot(args): 143 | if get_cloud()().reboot(args.vmid): 144 | print args.vmid, "rebooted" 145 | 146 | def delete(args): 147 | if get_cloud()().shutdown(args.vmid): 148 | print args.vmid, "deleted" 149 | 150 | def main(): 151 | arger = argparse.ArgumentParser( 152 | #usage='%(prog)s [options]', 153 | description='Start Virtual Machines on multiple clouds') 154 | 155 | subparsers = arger.add_subparsers() 156 | 157 | # config 158 | parser_config = subparsers.add_parser("config", 159 | help="set your cloud credentials") 160 | parser_config.set_defaults(func=config) 161 | 162 | # clouds 163 | parser_clouds = subparsers.add_parser("clouds", 164 | help="list available clouds") 165 | parser_clouds.set_defaults(func=clouds) 166 | 167 | # list 168 | parser_list = subparsers.add_parser("list", help="list running VMs") 169 | parser_list.set_defaults(func=list_) 170 | 171 | # images 172 | parser_images = subparsers.add_parser("images", 173 | help="list available images") 174 | parser_images.add_argument("--keyword", default='') 175 | parser_images.add_argument("--limit", default=0, type=int, 176 | help='the number of images to display') 177 | parser_images.set_defaults(func=images) 178 | 179 | # sizes 180 | parser_sizes = subparsers.add_parser("sizes", 181 | help="list available instance sizes") 182 | parser_sizes.set_defaults(func=sizes) 183 | 184 | # start 185 | parser_start = subparsers.add_parser("start", help="start a new VM") 186 | parser_start.add_argument("imageid") 187 | parser_start.add_argument("--user", default='root') 188 | parser_start.add_argument("--privkey", default=None) 189 | parser_start.add_argument("--sizeid", default=0, type=int) 190 | parser_start.add_argument("--name", default='new-instance') 191 | parser_start.set_defaults(func=start) 192 | 193 | # reboot 194 | parser_reboot = subparsers.add_parser("reboot", help="reboot a given VM") 195 | parser_reboot.add_argument("vmid") 196 | parser_reboot.set_defaults(func=reboot) 197 | 198 | # delete 199 | parser_delete = subparsers.add_parser("delete", help="delete a given VM") 200 | parser_delete.add_argument("vmid") 201 | parser_delete.set_defaults(func=delete) 202 | 203 | if len(sys.argv) == 1: 204 | # At least one argument is expected 205 | arger.print_help() 206 | return 207 | 208 | # We got (at least) one argument 209 | opts = arger.parse_args() 210 | opts.func(opts) 211 | 212 | if __name__ == "__main__": 213 | main() 214 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | Nubo 2 | ==== 3 | 4 | .. image:: https://secure.travis-ci.org/ema/nubo.png?branch=master 5 | :target: http://travis-ci.org/ema/nubo 6 | 7 | .. module:: nubo 8 | 9 | `Nubo` is a command line program that allows you to start virtual machines on 10 | different cloud providers, also making sure you can SSH into those instances 11 | once they are available. 12 | 13 | As an example, you might want to start a new node on Amazon EC2:: 14 | 15 | $ export NUBO_CLOUD=EC2_EU_WEST 16 | $ nubo start ami-27013f53 17 | Instance i-4ea89004 available on EC2_EU_WEST. Public IP: 54.247.8.150 18 | 19 | And then install puppet on it:: 20 | 21 | $ ssh root@54.247.8.150 "apt-get -y install puppet" 22 | Warning: Permanently added '54.247.8.150' (RSA) to the list of known hosts. 23 | Reading package lists... 24 | Building dependency tree... 25 | Reading state information... 26 | The following extra packages will be installed: 27 | [...] 28 | 29 | One of the biggest challenges when deploying virtual machines on multiple 30 | clouds is ensuring you can actually access those machines after they have 31 | started up. For example, different cloud providers allow you to upload your SSH 32 | public key in different ways. Certain providers automatically configure 33 | firewall rules which by default deny traffic to your instances. If your 34 | deployments need to be automated, your infrastructure code has to deal with 35 | that. 36 | 37 | `nubo` abstracts away these differences for you. It uses `Apache Libcloud`_ to 38 | start virtual machines on different cloud providers and `Paramiko`_ to 39 | establish SSH connections to the instances you start. Its functionalities are 40 | also available as a Python library. 41 | 42 | .. _Apache Libcloud: https://libcloud.apache.org/ 43 | .. _Paramiko: http://www.lag.net/paramiko/ 44 | 45 | Installation 46 | ------------ 47 | 48 | Install `nubo` with one of the following commands:: 49 | 50 | $ pip install nubo 51 | 52 | Alternatively, use `easy_install`:: 53 | 54 | $ easy_install nubo 55 | 56 | You need to have `ca-certificates`_ installed on your system. 57 | 58 | .. _ca-certificates: https://wiki.apache.org/incubator/LibcloudSSL#Enabling_SSL_Certificate_Check 59 | 60 | Usage 61 | ----- 62 | 63 | Invoke `nubo` without arguments to see the available functionalities:: 64 | 65 | $ nubo 66 | usage: nubo [-h] {config,clouds,list,images,start,reboot,delete} ... 67 | 68 | Start Virtual Machines on multiple clouds 69 | 70 | positional arguments: 71 | {config,clouds,list,images,start,reboot,delete} 72 | config set your cloud credentials 73 | clouds list available clouds 74 | list list running VMs 75 | images list available images 76 | start start a new VM 77 | reboot reboot a given VM 78 | delete delete a given VM 79 | 80 | optional arguments: 81 | -h, --help show this help message and exit 82 | 83 | Run `nubo config` to set your cloud credentials. The following examples shows 84 | how we can configure one of the available cloud providers:: 85 | 86 | $ nubo config 87 | 1 DIGITAL_OCEAN 88 | 2 EC2_AP_NORTHEAST 89 | 3 EC2_AP_SOUTHEAST 90 | 4 EC2_AP_SOUTHEAST2 91 | 5 EC2_EU_WEST 92 | 6 EC2_US_EAST 93 | 7 EC2_US_WEST 94 | 8 EC2_US_WEST_OREGON 95 | 9 OPENNEBULA 96 | 10 RACKSPACE 97 | 11 RACKSPACE_UK 98 | Please choose the cloud provider you want to setup [1-11] 5 99 | Please provide your API key: MYAPIKEY 100 | Please provide your API secret: MYAPISECRET 101 | EC2_EU_WEST cloud configured properly 102 | 103 | To see which virtual machine images are available, we can use `nubo images`:: 104 | 105 | $ export NUBO_CLOUD=DIGITAL_OCEAN 106 | $ nubo images 107 | 20 images available on DIGITAL_OCEAN 108 | id name 109 | =============================== 110 | 85271 wheezy 111 | 85431 postgres-base 112 | 1607 Gentoo x64 113 | 13632 Open Suse 12.1 x32 114 | 13863 Open Suse 12.2 X64 115 | 18414 Arch Linux 2012-09 x64 116 | 23593 Arch Linux 2012-09 x64 117 | 63749 Gentoo 2013-1 x64 118 | 1601 CentOS 5.8 x64 119 | 1602 CentOS 5.8 x32 120 | 1609 Ubuntu 11.10 x32 Server 121 | 1611 CentOS 6.2 x64 122 | 1615 Fedora 16 x64 Server 123 | 1618 Fedora 16 x64 Desktop 124 | 2676 Ubuntu 12.04 x64 Server 125 | 12573 Debian 6.0 x64 126 | 12574 CentOS 6.3 x64 127 | 12575 Debian 6.0 x32 128 | 12578 CentOS 6.3 x32 129 | 14097 Ubuntu 10.04 x64 Server 130 | 131 | New virtual machine instances can be started with `nubo start`. Note that the 132 | command will not return until the remote machine has finished booting up and 133 | it accepts SSH connections:: 134 | 135 | $ nubo start 12573 136 | Instance 150843 available on DIGITAL_OCEAN. Public IP: 198.199.72.211 137 | 138 | With `nubo list` we can see the status of our virtual machines on a given cloud 139 | provider:: 140 | 141 | $ nubo list 142 | 1 VMs running on DIGITAL_OCEAN 143 | id name state ip 144 | ======================================== 145 | 150843 test RUNNING 198.199.72.211 146 | 147 | API Reference 148 | ------------- 149 | All `nubo` functionalities can be accessed via its Python API. Here is a brief 150 | example of how to deploy a new virtual machine:: 151 | 152 | from nubo.clouds.base import get_cloud 153 | 154 | Cloud = get_cloud('EC2_EU_WEST') 155 | ec2 = Cloud() 156 | 157 | print ec2.deploy(image_id='ami-27013f53', name='my-new-vm') 158 | 159 | Please refer to the following API documentation for further details. 160 | 161 | .. automodule:: nubo.clouds.base 162 | :members: 163 | 164 | .. autoclass:: nubo.clouds.base.BaseCloud 165 | :show-inheritance: 166 | :members: 167 | 168 | .. automodule:: nubo.clouds.digitalocean 169 | :members: 170 | 171 | .. autoclass:: nubo.clouds.digitalocean.DigitalOcean 172 | :show-inheritance: 173 | :members: 174 | 175 | .. automodule:: nubo.clouds.ec2 176 | :members: 177 | 178 | .. autoclass:: nubo.clouds.ec2.AmazonEC2 179 | :show-inheritance: 180 | :members: 181 | 182 | .. automodule:: nubo.clouds.rackspace 183 | :members: 184 | 185 | .. autoclass:: nubo.clouds.rackspace.Rackspace 186 | :show-inheritance: 187 | :members: 188 | 189 | .. automodule:: nubo.clouds.opennebula 190 | :members: 191 | 192 | .. autoclass:: nubo.clouds.opennebula.OpenNebula 193 | :show-inheritance: 194 | :members: 195 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | # 3 | # nubo documentation build configuration file, created by 4 | # sphinx-quickstart on Sat May 8 13:26:52 2010. 5 | # 6 | # This file is execfile()d with the current directory set to its containing dir. 7 | # 8 | # Note that not all possible configuration values are present in this 9 | # autogenerated file. 10 | # 11 | # All configuration values have a default; values that are commented out 12 | # serve to show the default. 13 | 14 | import sys, os, subprocess 15 | 16 | # If extensions (or modules to document with autodoc) are in another directory, 17 | # add these directories to sys.path here. If the directory is relative to the 18 | # documentation root, use os.path.abspath to make it absolute, like shown here. 19 | sys.path.append(os.path.abspath('..')) 20 | sys.path.append(os.path.abspath('_themes')) 21 | 22 | # -- General configuration ----------------------------------------------------- 23 | 24 | # If your documentation needs a minimal Sphinx version, state it here. 25 | #needs_sphinx = '1.0' 26 | 27 | # Add any Sphinx extension module names here, as strings. They can be extensions 28 | # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. 29 | extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx'] 30 | 31 | # Add any paths that contain templates here, relative to this directory. 32 | templates_path = ['_templates'] 33 | 34 | # The suffix of source filenames. 35 | source_suffix = '.rst' 36 | 37 | # The encoding of source files. 38 | #source_encoding = 'utf-8-sig' 39 | 40 | # The master toctree document. 41 | master_doc = 'index' 42 | 43 | # General information about the project. 44 | project = u'nubo' 45 | copyright = u'2013, Emanuele Rocca' 46 | 47 | # The version info for the project you're documenting, acts as replacement for 48 | # |version| and |release|, also used in various other places throughout the 49 | # built documents. 50 | # 51 | # The short X.Y version. 52 | version = subprocess.Popen([sys.executable, 'setup.py', '--version'], 53 | cwd=os.path.abspath('..'), 54 | stdout=subprocess.PIPE).communicate()[0].strip() 55 | # The full version, including alpha/beta/rc tags. 56 | release = version 57 | 58 | # The language for content autogenerated by Sphinx. Refer to documentation 59 | # for a list of supported languages. 60 | #language = None 61 | 62 | # There are two options for replacing |today|: either, you set today to some 63 | # non-false value, then it is used: 64 | #today = '' 65 | # Else, today_fmt is used as the format for a strftime call. 66 | #today_fmt = '%B %d, %Y' 67 | 68 | # List of patterns, relative to source directory, that match files and 69 | # directories to ignore when looking for source files. 70 | exclude_patterns = ['_build'] 71 | 72 | # The reST default role (used for this markup: `text`) to use for all documents. 73 | #default_role = None 74 | 75 | # If true, '()' will be appended to :func: etc. cross-reference text. 76 | #add_function_parentheses = True 77 | 78 | # If true, the current module name will be prepended to all description 79 | # unit titles (such as .. function::). 80 | #add_module_names = True 81 | 82 | # If true, sectionauthor and moduleauthor directives will be shown in the 83 | # output. They are ignored by default. 84 | #show_authors = False 85 | 86 | # The name of the Pygments (syntax highlighting) style to use. 87 | #pygments_style = 'sphinx' 88 | 89 | # A list of ignored prefixes for module index sorting. 90 | #modindex_common_prefix = [] 91 | 92 | 93 | # -- Options for HTML output --------------------------------------------------- 94 | 95 | # The theme to use for HTML and HTML Help pages. Major themes that come with 96 | # Sphinx are currently 'default' and 'sphinxdoc'. 97 | html_theme = 'flask_small' 98 | 99 | # Theme options are theme-specific and customize the look and feel of a theme 100 | # further. For a list of options available for each theme, see the 101 | # documentation. 102 | html_theme_options = { 103 | 'index_logo': '', 104 | 'github_fork': 'ema/nubo' 105 | } 106 | 107 | # Add any paths that contain custom themes here, relative to this directory. 108 | html_theme_path = ['_themes'] 109 | 110 | # The name for this set of Sphinx documents. If None, it defaults to 111 | # " v documentation". 112 | html_title = 'A command line interface to multiple cloud providers' 113 | 114 | # A shorter title for the navigation bar. Default is the same as html_title. 115 | #html_short_title = None 116 | 117 | # The name of an image file (relative to this directory) to place at the top 118 | # of the sidebar. 119 | #html_logo = None 120 | 121 | # The name of an image file (within the static path) to use as favicon of the 122 | # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 123 | # pixels large. 124 | #html_favicon = None 125 | 126 | # Add any paths that contain custom static files (such as style sheets) here, 127 | # relative to this directory. They are copied after the builtin static files, 128 | # so a file named "default.css" will overwrite the builtin "default.css". 129 | html_static_path = ['_static'] 130 | 131 | # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, 132 | # using the given strftime format. 133 | #html_last_updated_fmt = '%b %d, %Y' 134 | 135 | # If true, SmartyPants will be used to convert quotes and dashes to 136 | # typographically correct entities. 137 | #html_use_smartypants = True 138 | 139 | # Custom sidebar templates, maps document names to template names. 140 | #html_sidebars = {} 141 | 142 | # Additional templates that should be rendered to pages, maps page names to 143 | # template names. 144 | #html_additional_pages = {} 145 | 146 | # If false, no module index is generated. 147 | #html_domain_indices = True 148 | 149 | # If false, no index is generated. 150 | #html_use_index = True 151 | 152 | # If true, the index is split into individual pages for each letter. 153 | #html_split_index = False 154 | 155 | # If true, links to the reST sources are added to the pages. 156 | #html_show_sourcelink = True 157 | 158 | # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. 159 | #html_show_sphinx = True 160 | 161 | # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. 162 | #html_show_copyright = True 163 | 164 | # If true, an OpenSearch description file will be output, and all pages will 165 | # contain a tag referring to it. The value of this option must be the 166 | # base URL from which the finished HTML is served. 167 | #html_use_opensearch = '' 168 | 169 | # If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml"). 170 | #html_file_suffix = '' 171 | 172 | # Output file base name for HTML help builder. 173 | htmlhelp_basename = 'nubodoc' 174 | 175 | 176 | # -- Options for LaTeX output -------------------------------------------------- 177 | 178 | # The paper size ('letter' or 'a4'). 179 | #latex_paper_size = 'letter' 180 | 181 | # The font size ('10pt', '11pt' or '12pt'). 182 | #latex_font_size = '10pt' 183 | 184 | # Grouping the document tree into LaTeX files. List of tuples 185 | # (source start file, target name, title, author, documentclass [howto/manual]). 186 | latex_documents = [ 187 | ('index', 'nubo.tex', u'Nubo Documentation', 188 | u'Emanuele Rocca', 'manual'), 189 | ] 190 | 191 | # The name of an image file (relative to this directory) to place at the top of 192 | # the title page. 193 | #latex_logo = None 194 | 195 | # For "manual" documents, if this is true, then toplevel headings are parts, 196 | # not chapters. 197 | #latex_use_parts = False 198 | 199 | # Additional stuff for the LaTeX preamble. 200 | #latex_preamble = '' 201 | 202 | # Documents to append as an appendix to all manuals. 203 | #latex_appendices = [] 204 | 205 | # If false, no module index is generated. 206 | #latex_domain_indices = True 207 | 208 | 209 | # -- Options for manual page output -------------------------------------------- 210 | 211 | # One entry per manual page. List of tuples 212 | # (source start file, name, description, authors, manual section). 213 | man_pages = [ 214 | ('nubo', 'nubo', u'Nubo Documentation', 215 | [u'Emanuele Rocca'], 1) 216 | ] 217 | 218 | 219 | # Example configuration for intersphinx: refer to the Python standard library. 220 | intersphinx_mapping = {'http://docs.python.org/': None} 221 | -------------------------------------------------------------------------------- /nubo/clouds/base.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | """ 4 | nubo.clouds.base 5 | ================ 6 | 7 | Support deployments on multiple cloud providers. 8 | 9 | :copyright: (C) 2013 by Emanuele Rocca. 10 | """ 11 | 12 | import sys 13 | import time 14 | import socket 15 | import logging 16 | import hashlib 17 | 18 | from importlib import import_module 19 | 20 | from os import getenv, path 21 | 22 | from libcloud.compute.types import Provider, InvalidCredsError 23 | from libcloud.compute.providers import get_driver 24 | 25 | import paramiko 26 | 27 | from nubo.config import read_config 28 | from nubo.remote import RemoteHost 29 | 30 | CLOUDS_MAPPING = { 31 | 'EC2_US_EAST': 'nubo.clouds.ec2.AmazonEC2', 32 | 'EC2_US_WEST': 'nubo.clouds.ec2.AmazonEC2', 33 | 'EC2_US_WEST_OREGON': 'nubo.clouds.ec2.AmazonEC2', 34 | 'EC2_AP_SOUTHEAST': 'nubo.clouds.ec2.AmazonEC2', 35 | 'EC2_AP_SOUTHEAST2': 'nubo.clouds.ec2.AmazonEC2', 36 | 'EC2_AP_NORTHEAST': 'nubo.clouds.ec2.AmazonEC2', 37 | 'EC2_EU_WEST': 'nubo.clouds.ec2.AmazonEC2', 38 | 'RACKSPACE': 'nubo.clouds.rackspace.Rackspace', 39 | 'DIGITAL_OCEAN': 'nubo.clouds.digitalocean.DigitalOcean', 40 | 'LINODE': 'nubo.clouds.linode.Linode', 41 | 'OPENNEBULA': 'nubo.clouds.opennebula.OpenNebula', 42 | } 43 | 44 | NODE_STATES = { 45 | 0: 'RUNNING', 46 | 1: 'REBOOTING', 47 | 2: 'TERMINATED', 48 | 3: 'PENDING', 49 | 4: 'UNKNOWN' 50 | } 51 | 52 | AVAILABLE_CLOUDS = read_config() 53 | 54 | 55 | def resolvepath(s): 56 | return path.abspath(path.expanduser(s)) 57 | 58 | 59 | def node2dict(node): 60 | """Convert a node object into a dict""" 61 | fields = ( 'id', 'name', 'state', 'public_ips', 62 | 'private_ips', 'image', 'size', 'extra' ) 63 | values = {} 64 | for field in fields: 65 | value = getattr(node, field) 66 | if value is not None: 67 | values[field] = value 68 | 69 | values['state'] = NODE_STATES[values['state']] 70 | 71 | for field in 'image', 'size': 72 | if field in values and values[field]: 73 | values[field] = values[field].name 74 | 75 | for field in 'public_ips', 'private_ips': 76 | try: 77 | values[field] = [ ip_addr.address for ip_addr in values[field] ] 78 | except AttributeError: 79 | pass 80 | 81 | return values 82 | 83 | def supported_clouds(): 84 | return CLOUDS_MAPPING.keys() 85 | 86 | def get_cloud(cloud_name=None): 87 | """Return a class representing the given cloud provider. 88 | 89 | eg: get_cloud(cloud_name='EC2_US_WEST_OREGON') 90 | -> 91 | """ 92 | if cloud_name is None: 93 | cloud_name = getenv('NUBO_CLOUD') 94 | 95 | if cloud_name not in supported_clouds(): 96 | print "E: The NUBO_CLOUD environment variable should be set to one of the following values:" 97 | print ", ".join(supported_clouds()) 98 | sys.exit(1) 99 | 100 | 101 | # fullname = [ 'nubo', 'clouds', 'ec2', 'EC2Cloud' ] 102 | fullname = CLOUDS_MAPPING[cloud_name].split('.') 103 | 104 | # classname = 'EC2Cloud' 105 | classname = fullname.pop() 106 | 107 | # module_name = 'nubo.clouds.ec2' 108 | module_name = '.'.join(fullname) 109 | 110 | module = import_module(module_name) 111 | 112 | cloudclass = getattr(module, classname) 113 | cloudclass.PROVIDER_NAME = cloud_name 114 | 115 | return cloudclass 116 | 117 | class BaseCloud(object): 118 | 119 | # Wait a maximum of 5 minutes 120 | MAX_ATTEMPTS = 5 * 60 121 | 122 | # Has to be set by extending classes 123 | PROVIDER_NAME = None 124 | 125 | # Can be extended 126 | NEEDED_PARAMS = [ 'key', 'secret' ] 127 | 128 | @classmethod 129 | def test_conn(cls, **params): 130 | provider = getattr(Provider, cls.PROVIDER_NAME) 131 | DriverClass = get_driver(provider) 132 | driver = DriverClass(**params) 133 | try: 134 | return type(driver.list_nodes()) == list 135 | except InvalidCredsError: 136 | return False 137 | 138 | def __init__(self, ssh_private_key=None, login_as='root'): 139 | if ssh_private_key is None: 140 | ssh_private_key = resolvepath(AVAILABLE_CLOUDS["nubo"]["privkey"]) 141 | 142 | self.ssh_private_key = ssh_private_key 143 | self.ssh_public_key = ssh_private_key + '.pub' 144 | 145 | # Use public key's MD5 sum as its name 146 | key_hash = hashlib.md5() 147 | key_hash.update(open(self.ssh_public_key).read()) 148 | self.ssh_key_name = key_hash.hexdigest() 149 | 150 | self.login_as = login_as 151 | 152 | try: 153 | provider = getattr(Provider, self.PROVIDER_NAME) 154 | except AttributeError: 155 | raise Exception, "Unknown cloud %s" % self.PROVIDER_NAME 156 | 157 | DriverClass = get_driver(provider) 158 | self.driver = DriverClass( 159 | **AVAILABLE_CLOUDS[CLOUDS_MAPPING[self.PROVIDER_NAME]]) 160 | 161 | def __wait_for_node(self, node_id): 162 | attempts = self.MAX_ATTEMPTS 163 | 164 | while attempts: 165 | for node in self.list_nodes(): 166 | if node['id'] != node_id: 167 | continue 168 | 169 | if node['state'] == "RUNNING": 170 | return node 171 | 172 | logging.info("%s attempts left on %s: %s != RUNNING" % ( 173 | attempts, node_id, node['state'])) 174 | 175 | time.sleep(1) 176 | attempts -= 1 177 | 178 | def wait_for_ssh(self, node): 179 | attempts = self.MAX_ATTEMPTS 180 | remotehost = RemoteHost(node['public_ips'][0], self.ssh_private_key) 181 | 182 | while attempts: 183 | try: 184 | return remotehost.whoami(self.login_as) 185 | except socket.error: 186 | logging.info("%s SSH attempts left for user %s on %s" 187 | % (attempts, self.login_as, node['id'])) 188 | 189 | time.sleep(1) 190 | attempts -= 1 191 | except paramiko.PasswordRequiredException: 192 | msg = 'Authentication failed for %s@%s. ' % ( 193 | self.login_as, node['id']) 194 | msg += 'Perhaps you should login as a different user?' 195 | raise Exception(msg) 196 | 197 | def startup(self, params): 198 | """Start a new instance. 199 | 200 | Each cloud provider requires different values here. 201 | 202 | 'name', 'image', and 'size' are the lowest common denominator. 203 | 204 | eg: startup(params) -> dict 205 | """ 206 | # Start a new VM and keep track of its ID 207 | node_id = node2dict(self.driver.create_node(**params))['id'] 208 | 209 | # Wait for the VM to be RUNNING 210 | node = self.__wait_for_node(node_id) 211 | assert node is not None 212 | 213 | # Wait for SSH connections to be accepted 214 | user = self.wait_for_ssh(node) 215 | assert user == self.login_as 216 | 217 | return node 218 | 219 | def is_running(self, node_id): 220 | """Return True if the given node is running.""" 221 | running_ids = [ 222 | node2dict(node)['id'] for node in self.driver.list_nodes() 223 | ] 224 | 225 | return node_id in running_ids 226 | 227 | def __call_if_running(self, function, node_id): 228 | if not self.is_running(node_id): 229 | return False 230 | 231 | class Node: 232 | id = node_id 233 | 234 | return function(Node) 235 | 236 | def shutdown(self, node_id): 237 | """Shutdown the given instance id. 238 | 239 | eg: shutdown('i-bb6c3b88') -> bool 240 | """ 241 | return self.__call_if_running(self.driver.destroy_node, node_id) 242 | 243 | def reboot(self, node_id): 244 | """Reboot the given instance id. 245 | 246 | eg: reboot('i-bb6c3b88') -> bool 247 | """ 248 | return self.__call_if_running(self.driver.reboot_node, node_id) 249 | 250 | def list_nodes(self): 251 | """Return a list of dictionaries representing currently running 252 | nodes.""" 253 | return [ node2dict(node) for node in self.driver.list_nodes() ] 254 | 255 | def list_sizes(self): 256 | """Return a list of strings representing the available instance 257 | size names.""" 258 | return [ size.name for size in self.driver.list_sizes() ] 259 | 260 | def list_images(self, limit=None, keyword=''): 261 | """Return a list of VM images available on this cloud.""" 262 | images = [ image for image in self.driver.list_images() 263 | if (keyword.lower() in image.name.lower()) or not keyword ] 264 | 265 | if limit: 266 | return images[:limit] 267 | 268 | return images 269 | 270 | def deploy(self, image_id, size_idx=0, location_idx=0, name='test'): 271 | """Deploy a VM instance on this cloud. This method is not implemented 272 | here, it has to be specialized by the classes implementing specific 273 | cloud providers.""" 274 | raise NotImplementedError() 275 | --------------------------------------------------------------------------------