├── .gitignore ├── .travis.yml ├── AUTHORS ├── COPYING ├── LICENSE ├── MANIFEST.in ├── README.rst ├── ci ├── apache │ ├── hil.cfg.postgres │ ├── hil.cfg.sqlite │ ├── install.sh │ └── run_integration_tests.sh ├── deployment-mock-networks │ ├── run_integration_tests.sh │ ├── site-layout.json │ └── testsuite.cfg ├── hil_install.sh ├── keystone │ ├── .gitignore │ ├── install.sh │ ├── keystone.sh │ ├── keystonerc │ ├── run_integration_tests.sh │ └── uwsgi.ini ├── list_tracked_pyfiles.sh ├── no_trailing_whitespace.sh ├── obmd │ ├── install.sh │ └── run_integration_tests.sh ├── optimize_postgres.sh ├── pycodestyle_tests.sh ├── pylint_tests.sh ├── run_tests.sh ├── run_unit_tests.sh ├── testsuite.cfg.postgres ├── testsuite.cfg.sqlite └── wsgi.conf ├── docs ├── CONTRIBUTING.md ├── INSTALL-devel-flowchart.txt ├── INSTALL-devel-tldr.rst ├── INSTALL-devel.rst ├── INSTALL.rst ├── Install_configure_PostgreSQL_CENTOS7.md ├── Makefile ├── README.rst ├── UPGRADING.rst ├── USING.rst ├── client-library.md ├── conf.py ├── consistency-model.md ├── deployment.md ├── driver-model.rst ├── extensions.rst ├── index.rst ├── keystone-auth.md ├── logging.md ├── maintenance-pool.md ├── migrations.md ├── network-drivers.md ├── network-teardown.md ├── networks.md ├── overview.md ├── rest_api.md ├── testing.md └── vlan-primer.md ├── examples ├── cloud-img-with-passwd │ ├── .gitignore │ ├── Makefile │ ├── README.rst │ ├── SHA256SUMS.centos │ ├── SHA256SUMS.ubuntu │ ├── centos.mk │ └── ubuntu.mk ├── dbinit.py ├── deployment.cfg ├── hil.cfg ├── hil.cfg.dev-no-hardware ├── leasing │ ├── README │ ├── leasing │ ├── leasing.cfg │ └── node_release_script.py ├── puppet_headnode │ ├── README.md │ ├── download_iso.sh │ ├── manifests │ │ ├── site.pp │ │ └── static │ │ │ ├── boot_notify.py │ │ │ ├── default │ │ │ ├── dhcpd.conf │ │ │ ├── inetd.conf │ │ │ ├── interfaces │ │ │ ├── isc-dhcp-server │ │ │ ├── ks.cfg │ │ │ ├── make-links │ │ │ ├── pxelinux_cfg │ │ │ ├── rc.local │ │ │ └── tftpd-hpa │ └── sha256sum.txt ├── site-layout.json └── testsuite.cfg-deployment ├── hil.wsgi ├── hil ├── __init__.py ├── api.py ├── auth.py ├── class_resolver.py ├── cli │ ├── __init__.py │ ├── cli.py │ ├── client_setup.py │ ├── headnode.py │ ├── helper.py │ ├── misc.py │ ├── network.py │ ├── node.py │ ├── port.py │ ├── project.py │ ├── switch.py │ └── user.py ├── client │ ├── __init__.py │ ├── base.py │ ├── client.py │ ├── extensions.py │ ├── network.py │ ├── node.py │ ├── project.py │ ├── switch.py │ └── user.py ├── commands │ ├── __init__.py │ ├── admin.py │ ├── db.py │ ├── migrate_ipmi_info.py │ └── util.py ├── config.py ├── deferred.py ├── dev_support.py ├── errors.py ├── ext │ ├── __init__.py │ ├── auth │ │ ├── __init__.py │ │ ├── database.py │ │ ├── keystone.py │ │ ├── migrations │ │ │ └── database │ │ │ │ └── 96f1e8f87f85_upgrading_user_to_bigint.py │ │ ├── mock.py │ │ └── null.py │ ├── network_allocators │ │ ├── __init__.py │ │ ├── migrations │ │ │ └── vlan_pool │ │ │ │ └── e06576b2ea9e_vlan_pool_pk_to_bigint.py │ │ ├── null.py │ │ └── vlan_pool.py │ └── switches │ │ ├── __init__.py │ │ ├── _console.py │ │ ├── _dell_base.py │ │ ├── _vlan_http.py │ │ ├── brocade.py │ │ ├── common.py │ │ ├── dell.py │ │ ├── dellnos9.py │ │ ├── migrations │ │ ├── brocade │ │ │ ├── 03ae4ec647da_brocade_pk_to_bigint.py │ │ │ └── 5a6db7a7222d_added_brocade_driver.py │ │ ├── dell │ │ │ ├── 099b939261c1_rename_dell_switch_table_for_flask_.py │ │ │ └── b1b0e6d4302e_dell_pk_to_bigint.py │ │ ├── mock │ │ │ ├── b5b31d19257d_rename_mockswitch_table_for_flask_.py │ │ │ └── fa9ef2c9b67f_mock_switch_pk_to_bigint.py │ │ ├── n3000 │ │ │ ├── 357bcff65fb3_n3000_pk_to_bigint.py │ │ │ └── b96d46bbfb12_add_dell_n3048_driver.py │ │ └── nexus │ │ │ └── 09d96bf567aa_nexus_pks_to_bigint.py │ │ ├── mock.py │ │ ├── n3000.py │ │ ├── nexus.py │ │ └── ovs.py ├── flaskapp.py ├── migrations.py ├── migrations │ ├── alembic.ini │ ├── env.py │ ├── script.py.mako │ └── versions │ │ ├── 02f7e9607e16_delete_legacy_obm_support.py │ │ ├── 264ddaebdfcc_make_labels_unique.py │ │ ├── 3b2dab2e0d7d_add_type_field_to_networkingaction.py │ │ ├── 57f4c30b0ad4_added_metadata.py │ │ ├── 655e037522d0_mock_obm_pks_to_bigint.py │ │ ├── 6a8c19565060_move_to_flask.py │ │ ├── 7acb050f783c_add_obmd_fields.py │ │ ├── 89630e3872ec_network_acl.py │ │ ├── 89ff8a6d72b2_add_uuid_and_status_to_networkingaction.py │ │ ├── 9089fa811a2b_core_pks_to_bigint.py │ │ ├── aa9106430f1c_testing_only_avoid_manual_intervention.py │ │ ├── c45f6a96dbe7_nic_primary_key_changed_to_bigint.py │ │ ├── d65a9dc873d7_mark_obmd_fields_not_nullable.py │ │ ├── df8d9f423f2b_rename_mockobm_table_for_flask.py │ │ └── fcb23cd2e9b7_ipmi_obm_pks_to_bigint.py ├── model.py ├── network_allocator.py ├── rest.py ├── server.py └── test_common.py ├── scripts ├── create_bridges ├── create_bridges.service ├── hil-complete.sh └── hil_network.service ├── setup.cfg ├── setup.py ├── specs ├── README.md ├── obmd-integration.md ├── openvpn-support.md └── switch-driver-capabilities.md └── tests ├── custom_lint.py ├── deployment ├── headnodes.py ├── multi_networks.py ├── native_networks.py ├── switch_config.py └── vlan_networks.py ├── integration ├── cli.py ├── client_integration.py ├── keystone.py └── obmd.py ├── stress.py └── unit ├── api ├── auth.py ├── main.py ├── maintenance-pool.py ├── port_register.py └── port_revert.py ├── class_resolver.py ├── cli.py ├── client_unit.py ├── config.py ├── deferred.py ├── dev_support.py ├── ext ├── auth │ ├── database.py │ └── mock.py ├── network_allocators │ └── vlan_pool.py └── switches │ ├── brocade.py │ ├── common.py │ └── dellnos9.py ├── hil_auth.py ├── migrations.py ├── migrations ├── after-PK-bigint.sql ├── flask.sql └── pending-networking-actions.sql ├── model.py ├── rest.py └── test_common.py /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | *.swp 3 | *.pyc 4 | *.~ 5 | .ropeproject 6 | *.egg-info 7 | build 8 | dist 9 | .venv 10 | .coverage 11 | *.db 12 | /hil.cfg 13 | /testsuite.cfg 14 | /site-layout.json* 15 | /.cache 16 | hil.log 17 | .eggs/ 18 | .pytest_cache 19 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: python 2 | python: 3 | - "2.7" 4 | - "3.4" 5 | matrix: 6 | allow_failures: 7 | - python: "3.4" 8 | 9 | virtualenv: 10 | system_site_packages: true 11 | 12 | sudo: required 13 | dist: trusty 14 | 15 | cache: 16 | - pip 17 | 18 | addons: 19 | postgresql: "9.3" 20 | apt: 21 | packages: 22 | - apache2 23 | - libapache2-mod-wsgi 24 | 25 | branches: 26 | except: 27 | - none 28 | 29 | env: 30 | - TEST_SUITE=unit DB=sqlite 31 | # SPOOF_MANUAL_MIGRATIONS is used to tell the migration scripts that they 32 | # should make up data that would otherwise require manual intervention; 33 | # this is important for the migration tests. 34 | - TEST_SUITE=unit DB=postgres SPOOF_MANUAL_MIGRATIONS=true 35 | - TEST_SUITE=integration DB=postgres SPOOF_MANUAL_MIGRATIONS=true 36 | 37 | install: 38 | # A dependency might use new pip syntax; upgrade to prevent breakage. See #771 39 | - pip install --upgrade pip 40 | - sh -e ci/optimize_postgres.sh 41 | - sh -e ci/hil_install.sh 42 | - sh -e ci/apache/install.sh 43 | - sh -e ci/keystone/install.sh 44 | - sh -e ci/obmd/install.sh 45 | 46 | script: 47 | - sh -e ci/run_tests.sh 48 | 49 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Abishek Raju 2 | Amin Mosayyebzadeh 3 | Andrew Mohn 4 | George Silvis, III 5 | Ian Ballou 6 | Ian Denhardt 7 | Igibek Koishybayev 8 | Jay Hennessey 9 | (Jethro) Shuwen Sun 10 | Jonathan Bell 11 | Kristi Nikolla 12 | Kyle Hogan 13 | Logan Bernard 14 | Lucas Xu 15 | Naved Ansari 16 | Orran Krieger 17 | Peter Desnoyers 18 | Ritesh Singh 19 | Rohan Garg 20 | Sahil Tikale 21 | Valerie Young 22 | Viggnesh Venugopal 23 | Zhaoliang Liu 24 | -------------------------------------------------------------------------------- /COPYING: -------------------------------------------------------------------------------- 1 | Copyright 2013-2018 Mass Open Cloud Contributors (see AUTHORS). 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /MANIFEST.in: -------------------------------------------------------------------------------- 1 | global-include *.rst 2 | include LICENSE 3 | include COPYING 4 | graft docs 5 | graft tests/unit 6 | graft examples 7 | include scripts/hil_network.service 8 | 9 | 10 | -------------------------------------------------------------------------------- /ci/apache/hil.cfg.postgres: -------------------------------------------------------------------------------- 1 | # This configuration files is used for testing HIL with postgresql and apache 2 | [general] 3 | log_level = debug 4 | 5 | [auth] 6 | require_authentication = True 7 | 8 | [headnode] 9 | trunk_nic = eth0 10 | base_imgs = img1, img2, img3, img4 11 | libvirt_endpoint = qemu:///system 12 | 13 | [client] 14 | endpoint = http://127.0.0.1 15 | 16 | [database] 17 | uri = postgresql://postgres@localhost/hil 18 | 19 | [extensions] 20 | hil.ext.switches.mock = 21 | hil.ext.auth.database = 22 | 23 | hil.ext.network_allocators.null = 24 | -------------------------------------------------------------------------------- /ci/apache/hil.cfg.sqlite: -------------------------------------------------------------------------------- 1 | # This configuration file is used for testing HIL with sqlite and apache 2 | [general] 3 | log_level = debug 4 | 5 | [auth] 6 | require_authentication = True 7 | 8 | [headnode] 9 | trunk_nic = eth0 10 | base_imgs = img1, img2, img3, img4 11 | libvirt_endpoint = qemu:///system 12 | 13 | [client] 14 | endpoint = http://127.0.0.1 15 | 16 | [database] 17 | uri = sqlite:////home/travis/hil.db 18 | 19 | [devel] 20 | dry_run=True 21 | 22 | [extensions] 23 | hil.ext.switches.mock = 24 | hil.ext.auth.database = 25 | 26 | hil.ext.network_allocators.null = 27 | -------------------------------------------------------------------------------- /ci/apache/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Exit if we are only running unit tests 4 | if [ $TEST_SUITE = unit ]; then 5 | exit 0 6 | fi 7 | 8 | # Apache Setup 9 | sudo chown -R travis:travis /var/www 10 | mkdir /var/www/hil 11 | 12 | sudo cp ci/wsgi.conf /etc/apache2/sites-available/hil.conf 13 | sudo sed -e "s|%VIRTUAL_ENV%|$VIRTUAL_ENV|g" -i /etc/apache2/sites-available/hil.conf 14 | cp hil.wsgi /var/www/hil/hil.wsgi 15 | 16 | sudo a2dissite 000-default && sudo a2ensite hil 17 | sudo service apache2 restart 18 | -------------------------------------------------------------------------------- /ci/apache/run_integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export HIL_ENDPOINT=http://localhost 3 | export HIL_USERNAME=admin 4 | export HIL_PASSWORD=12345 5 | 6 | # Initial Setup 7 | cd /etc 8 | hil-admin db create 9 | hil-admin create-admin-user $HIL_USERNAME $HIL_PASSWORD 10 | hil-admin serve-networks & 11 | cd $TRAVIS_BUILD_DIR 12 | 13 | # Test commands 14 | py.test tests/integration/cli.py tests/integration/client_integration.py 15 | 16 | # Test dbinit script 17 | python examples/dbinit.py 18 | -------------------------------------------------------------------------------- /ci/deployment-mock-networks/run_integration_tests.sh: -------------------------------------------------------------------------------- 1 | 2 | cd "$(dirname $0)" 3 | 4 | cp testsuite.cfg ../../ 5 | cp site-layout.json ../../ 6 | 7 | cd ../.. 8 | py.test tests/deployment/*_networks.py 9 | -------------------------------------------------------------------------------- /ci/deployment-mock-networks/site-layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "switches": [ 3 | { 4 | "switch": "mock-0", 5 | "type": "http://schema.massopencloud.org/haas/v0/switches/mock", 6 | "hostname": "mock-0.example.com", 7 | "username": "alice", 8 | "password": "secret" 9 | }, 10 | { 11 | "switch": "mock-1", 12 | "type": "http://schema.massopencloud.org/haas/v0/switches/mock", 13 | "hostname": "mock-0.example.com", 14 | "username": "alice", 15 | "password": "secret" 16 | } 17 | ], 18 | "nodes" : [ 19 | { 20 | "name": "node-0", 21 | "nics": [ 22 | { 23 | "name": "nic1", 24 | "mac" : "de:ad:be:ef:20:14", 25 | "port": "gi1/0/1", 26 | "switch": "mock-0" 27 | }, 28 | { 29 | "name": "nic2", 30 | "mac" : "de:ad:be:ef:20:15", 31 | "port": "gi1/0/2", 32 | "switch": "mock-1" 33 | } 34 | ], 35 | "obm": { 36 | "type": "http://schema.massopencloud.org/haas/v0/obm/mock", 37 | "host": "192.168.1.1", 38 | "user": "foo", 39 | "password": "bar" 40 | }, 41 | "obmd": { 42 | "uri": "http://obmd.example.com/nodes/node-1", 43 | "admin_token": "secret" 44 | } 45 | }, 46 | { 47 | "name": "node-1", 48 | "nics": [ 49 | { 50 | "name": "nic1", 51 | "mac" : "de:ad:be:ef:20:16", 52 | "port": "gi1/0/3", 53 | "switch": "mock-1" 54 | } 55 | ], 56 | "obm": { 57 | "type": "http://schema.massopencloud.org/haas/v0/obm/mock", 58 | "host": "192.168.1.2", 59 | "user": "foo", 60 | "password": "bar" 61 | }, 62 | "obmd": { 63 | "uri": "http://obmd.example.com/nodes/node-1", 64 | "admin_token": "secret" 65 | } 66 | }, 67 | { 68 | "name": "node-2", 69 | "nics": [ 70 | { 71 | "name": "nic1", 72 | "mac" : "de:ad:be:ef:20:14", 73 | "port": "gi1/0/4", 74 | "switch": "mock-0" 75 | }, 76 | { 77 | "name": "nic2", 78 | "mac" : "de:ad:be:ef:20:15", 79 | "port": "gi1/0/5", 80 | "switch": "mock-1" 81 | } 82 | ], 83 | "obm": { 84 | "type": "http://schema.massopencloud.org/haas/v0/obm/mock", 85 | "host": "192.168.1.1", 86 | "user": "foo", 87 | "password": "bar" 88 | }, 89 | "obmd": { 90 | "uri": "http://obmd.example.com/nodes/node-1", 91 | "admin_token": "secret" 92 | } 93 | }, 94 | { 95 | "name": "node-3", 96 | "nics": [ 97 | { 98 | "name": "nic1", 99 | "mac" : "de:ad:be:ef:20:16", 100 | "port": "gi1/0/6", 101 | "switch": "mock-1" 102 | } 103 | ], 104 | "obm": { 105 | "type": "http://schema.massopencloud.org/haas/v0/obm/mock", 106 | "host": "192.168.1.2", 107 | "user": "foo", 108 | "password": "bar" 109 | }, 110 | "obmd": { 111 | "uri": "http://obmd.example.com/nodes/node-1", 112 | "admin_token": "secret" 113 | } 114 | } 115 | ] 116 | } 117 | -------------------------------------------------------------------------------- /ci/deployment-mock-networks/testsuite.cfg: -------------------------------------------------------------------------------- 1 | # This is an example testsuite.cfg for running the deployment tests (in 2 | # ``tests/deployment``). ``site-layout.json`` it is designed to accompany 3 | # ``site-layout.json`` in this directory. You will most likely have to modify 4 | # both files according to your local environment. 5 | [general] 6 | log_level = debug 7 | 8 | [database] 9 | uri = sqlite:///:memory: 10 | 11 | [headnode] 12 | # Set this to the appropriate interface on your hil master: 13 | # trunk_nic = eth0 14 | 15 | # Note that the test suite requires these two base images are available; this 16 | # *cannot* be set to a different value for running the tests. 17 | base_imgs = base-headnode, base-headnode-2 18 | libvirt_endpoint = qemu:///system 19 | 20 | [extensions] 21 | hil.ext.network_allocators.vlan_pool = 22 | hil.ext.switches.mock = 23 | hil.ext.auth.null = 24 | 25 | [hil.ext.network_allocators.vlan_pool] 26 | # Set to a range appropriate for your local environment. A pool of at least 10 27 | # VLANs is recommended for running the tests. The tests don't use quite that 28 | # many at present, but may do so in the future. 29 | # 30 | vlans = 100-110 31 | -------------------------------------------------------------------------------- /ci/hil_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # Setup configuration 4 | cp ci/testsuite.cfg.$DB testsuite.cfg 5 | chmod 0600 testsuite.cfg 6 | sudo cp ci/apache/hil.cfg.$DB /etc/hil.cfg 7 | sudo chown travis:travis /etc/hil.cfg 8 | sudo chmod 0600 /etc/hil.cfg 9 | 10 | # Database Setup 11 | if [ $DB = postgres ]; then 12 | sudo apt-get install -y python-psycopg2 13 | psql --version 14 | psql -c 'CREATE DATABASE hil_tests;' -U postgres 15 | psql -c 'CREATE DATABASE hil;' -U postgres 16 | fi 17 | 18 | # Address #577 via 19 | # https://stackoverflow.com/questions/2192323/what-is-the-python-egg-cache-python-egg-cache 20 | mkdir -p ~/.python-eggs 21 | chmod go-w ~/.python-eggs # Eliminate "writable by group/others" warnings 22 | 23 | # Install HIL, incl. test dependencies 24 | pip install .[tests] 25 | -------------------------------------------------------------------------------- /ci/keystone/.gitignore: -------------------------------------------------------------------------------- 1 | /keystone 2 | -------------------------------------------------------------------------------- /ci/keystone/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -ex 3 | 4 | # Exit if we are only running unit tests 5 | if [ $TEST_SUITE = unit ]; then 6 | exit 0 7 | fi 8 | 9 | pip install .[keystone-auth-backend,keystone-client] 10 | 11 | # The exact commit we use here is somewhat arbitrary, but we want 12 | # something that (a) won't change out from under our feet, and (b) 13 | # works with our existing tests. 14 | keystone_commit=stable/pike ./ci/keystone/keystone.sh setup 15 | -------------------------------------------------------------------------------- /ci/keystone/keystonerc: -------------------------------------------------------------------------------- 1 | export OS_USERNAME=admin 2 | export OS_PASSWORD=s3cr3t 3 | export OS_PROJECT_NAME=admin 4 | export OS_USER_DOMAIN_ID=default 5 | export OS_PROJECT_DOMAIN_ID=default 6 | export OS_IDENTITY_API_VERSION=3 7 | export OS_AUTH_URL=http://localhost:5000/v3 8 | -------------------------------------------------------------------------------- /ci/keystone/run_integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | 3 | set -ex 4 | 5 | ./ci/keystone/keystone.sh run & 6 | 7 | # Wait for curl to successfully connect to each of the ports keystone 8 | # is supposed to be listening on before continuing. 9 | for port in 5000 35357; do 10 | while [ "$(curl http://127.0.0.1:$port; echo $?)" -ne 0 ]; do 11 | sleep .2 12 | done 13 | done 14 | 15 | py.test tests/integration/keystone.py 16 | -------------------------------------------------------------------------------- /ci/keystone/uwsgi.ini: -------------------------------------------------------------------------------- 1 | [uwsgi] 2 | # misc. settings copied from devstack: 3 | # 4 | # https://github.com/openstack-dev/devstack/blob/e88c51cc1b0aa59abbae353f3fd3c2ef58e1602a/lib/keystone#L304-L342 5 | # 6 | # I (zenhack) did this after talking to stevemar in #openstack-keystone; some 7 | # intermittent issues magically solved themselves when adding these. My money is 8 | # on `add-header` as the important one, but some of the other stuff is still 9 | # useful sinces it mean the call to `kill` in keystone.sh actually gets the 10 | # worker processes too. 11 | # 12 | # This file is common to both the "public" and "admin" endpoints; the 13 | # per-endpoint options are passed on the command line in keystone.sh 14 | master = true 15 | die-on-term = true 16 | exit-on-reload = true 17 | enable-threads = true 18 | 19 | add-header = Connection: close 20 | -------------------------------------------------------------------------------- /ci/list_tracked_pyfiles.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | # This script lists all of the python source files that are tracked 3 | # by git. The primary use is deciding what files to run the linters on. 4 | 5 | set -e 6 | 7 | cd "$(git rev-parse --show-toplevel)" # Enter the source tree root 8 | 9 | # List all files not matched by a .gitignore and ending in .py or .wsgi. 10 | git ls-files $(find -name .gitignore | sed 's/^/--exclude-per-directory /') | \ 11 | grep -E '\.(py|wsgi)$' 12 | -------------------------------------------------------------------------------- /ci/no_trailing_whitespace.sh: -------------------------------------------------------------------------------- 1 | # This script checks for trailing whitespace in all files tracked by git, 2 | # failing if it finds any. Note that (a) it removes the whitespace as a side 3 | # effect, and (b) it will fail unless HEAD is trailing-whitespace free, even 4 | # if the working directory is OK. 5 | 6 | if [ "$DB" != sqlite ] ; then 7 | # Only run in the SQLite pass. 8 | exit 0 9 | fi 10 | 11 | cd "$(git rev-parse --show-toplevel)" # Enter the source tree root 12 | 13 | # Remove all trailing whitespace in tracked files: 14 | sed --in-place -e 's/ *$//' $(git ls-files) 15 | 16 | diff="$(git diff)" 17 | 18 | if [ -n "$diff" ] ; then 19 | printf 'Error: trailing whitespace found. Diff:\n\n' 20 | printf '%s\n' "$diff" 21 | exit 1 22 | fi 23 | -------------------------------------------------------------------------------- /ci/obmd/install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -ex 3 | 4 | # download the obmd v0.1 binary 5 | wget https://github.com/CCI-MOC/obmd/releases/download/v0.1/obmd 6 | chmod +x obmd 7 | sudo mv obmd /usr/local/bin 8 | -------------------------------------------------------------------------------- /ci/obmd/run_integration_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | set -ex 3 | 4 | py.test --cov=hil --cov-append tests/integration/obmd.py 5 | -------------------------------------------------------------------------------- /ci/optimize_postgres.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -ex 3 | 4 | # Skip if we are in the sqlite build 5 | if [ $DB = sqlite ]; then 6 | exit 0 7 | fi 8 | 9 | # these changes optimize postgres. They are fine for testing, but not suitable 10 | # for production. 11 | # See https://www.postgresql.org/docs/current/static/non-durability.html 12 | echo "fsync = off 13 | synchronous_commit = off 14 | full_page_writes = off 15 | checkpoint_timeout = 30min 16 | "| sudo tee --append /etc/postgresql/9.*/main/postgresql.conf 17 | 18 | sudo service postgresql restart 19 | -------------------------------------------------------------------------------- /ci/pycodestyle_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # If we're running in CI, only run pycodestyle in the sqlite env. 4 | # If the DB variable is undefined we assume a developer has invoked 5 | # this script directly. 6 | if [ -z "$DB" ] || [ "$DB" = sqlite ]; then 7 | cd "$(dirname $0)/.." 8 | pycodestyle $(./ci/list_tracked_pyfiles.sh) 9 | fi 10 | -------------------------------------------------------------------------------- /ci/pylint_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # If we're running in CI, only run pylint in the sqlite env. 4 | # If the DB variable is undefined we assume a developer has 5 | # invoked this script directly. 6 | if [ -z "$DB" ] || [ "$DB" = sqlite ]; then 7 | cd "$(dirname $0)/.." 8 | pylint \ 9 | --disable=all \ 10 | --enable=undefined-variable \ 11 | --enable=unused-variable \ 12 | --enable=unused-import \ 13 | --enable=wildcard-import \ 14 | --enable=signature-differs \ 15 | --enable=arguments-differ \ 16 | --enable=missing-docstring \ 17 | --enable=logging-not-lazy \ 18 | --enable=reimported \ 19 | $(./ci/list_tracked_pyfiles.sh) 20 | fi 21 | -------------------------------------------------------------------------------- /ci/run_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | if [ $TEST_SUITE = integration ]; then 4 | sh -e ci/apache/run_integration_tests.sh 5 | sh -e ci/keystone/run_integration_tests.sh 6 | sh -e ci/deployment-mock-networks/run_integration_tests.sh 7 | sh -e ci/obmd/run_integration_tests.sh 8 | else 9 | sh -e ci/pycodestyle_tests.sh 10 | sh -e ci/pylint_tests.sh 11 | sh -e ci/no_trailing_whitespace.sh 12 | sh -e ci/run_unit_tests.sh 13 | fi 14 | -------------------------------------------------------------------------------- /ci/run_unit_tests.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | # On SQLite we run the tests in parallel. 4 | # This speeds things up, but is currently only safe for sqlite, since it uses 5 | # an in memory (and therefore per-process) database. 6 | 7 | if [ $DB = sqlite ]; then 8 | extra_flags='-n auto' 9 | fi 10 | 11 | py.test $extra_flags \ 12 | tests/custom_lint.py \ 13 | tests/unit \ 14 | tests/stress.py 15 | -------------------------------------------------------------------------------- /ci/testsuite.cfg.postgres: -------------------------------------------------------------------------------- 1 | # This is almost exactly testsuite.cfg.default; the only difference is that we 2 | # use postgres for the database. This is designed for running the usual test 3 | # suite against postgres on travis ci. 4 | [extensions] 5 | hil.ext.network_allocators.null = 6 | hil.ext.auth.null = 7 | [devel] 8 | dry_run = True 9 | [headnode] 10 | base_imgs = base-headnode, img1, img2, img3, img4 11 | [database] 12 | uri = postgresql://postgres@localhost/hil_tests 13 | -------------------------------------------------------------------------------- /ci/testsuite.cfg.sqlite: -------------------------------------------------------------------------------- 1 | # These are the default settings used by the test suite if 2 | # testsuite.cfg is not present. If you require any modification 3 | # to these settings, copy this file to testsuite.cfg and edit 4 | # accordingly 5 | [extensions] 6 | hil.ext.network_allocators.null = 7 | hil.ext.auth.null = 8 | [devel] 9 | dry_run = True 10 | [headnode] 11 | base_imgs = base-headnode, img1, img2, img3, img4 12 | [database] 13 | uri = sqlite:///:memory: 14 | -------------------------------------------------------------------------------- /ci/wsgi.conf: -------------------------------------------------------------------------------- 1 | LoadModule wsgi_module modules/mod_wsgi.so 2 | WSGISocketPrefix /var/run/wsgi 3 | 4 | 5 | ServerName 127.0.0.1 6 | AllowEncodedSlashes On 7 | WSGIPassAuthorization On 8 | WSGIDaemonProcess hil user=travis group=travis threads=2 python-path=/var/www/hil:%VIRTUAL_ENV%/lib/python2.7/site-packages 9 | WSGIScriptAlias / /var/www/hil/hil.wsgi 10 | 11 | WSGIProcessGroup hil 12 | WSGIApplicationGroup %{GLOBAL} 13 | Order deny,allow 14 | Allow from all 15 | 16 | 17 | -------------------------------------------------------------------------------- /docs/INSTALL-devel-flowchart.txt: -------------------------------------------------------------------------------- 1 | 2 | 3 | --------------------- 4 | | Install Packages | 5 | | Required for HIL | 6 | --------------------- 7 | # 8 | # 9 | # 10 | # 11 | # # ################ 12 | # # NO # Install # 13 | # SQLite # # # # # # # # # # # # # # # # # # # # PostgreSQL DB # 14 | # DB # # # 15 | # # ############### 16 | # # 17 | # YES # 18 | # # 19 | # # 20 | -------------------------- ------------------------------------- 21 | | Set up python virtual | | Create a systems user: "hil_dev" | 22 | | Environment |<-----| ------------------------------------- 23 | -------------------------- | # 24 | # | # 25 | # | # 26 | # | # 27 | ------------------- | -------------------------------- 28 | | GIT clone HIL | | | Create a DB role: "hil_dev" | 29 | ------------------- | | Create a database: "hil" | 30 | # | -------------------------------- 31 | # | # 32 | # | # 33 | --------------------- | ------------------- 34 | | Configur hil.cfg | ----------------| cd ~/hil_dev | 35 | --------------------- ------------------- 36 | # 37 | # 38 | # 39 | --------------------------- 40 | | Initialize the database | 41 | --------------------------- 42 | # 43 | # 44 | # 45 | --------------------- 46 | | Start HIL Servers | 47 | --------------------- 48 | # 49 | # 50 | # 51 | ----------------------- 52 | | Populate Objects in | 53 | | HIL | 54 | ----------------------- 55 | 56 | 57 | HAPPY HACKING !!! 58 | -------------------------------------------------------------------------------- /docs/INSTALL-devel-tldr.rst: -------------------------------------------------------------------------------- 1 | INSTALL-devel-tldr 2 | =================== 3 | 4 | Following is the simplest way to get started with hacking HIL on Centos system. 5 | Assuming that 6 | 7 | -- You have a github account. 8 | -- You have already forked the HIL repo. 9 | -- You would be running HIL in a python virtual environment with SQLite DB as backend. 10 | 11 | 12 | Install Dependencies:: 13 | 14 | yum install epel-release bridge-utils gcc httpd ipmitool libvirt \ 15 | libxml2-devel libxslt-devel mod_wsgi net-tools python-pip python-psycopg2 \ 16 | python-virtinst python-virtualenv qemu-kvm telnet vconfig virt-install 17 | 18 | 19 | Clone repo:: 20 | 21 | git clone https://github.com/**username**/hil 22 | cd hil 23 | 24 | Setting python virtual environment:: 25 | 26 | virtualenv .venv 27 | source .venv/bin/activate 28 | pip install -e .[tests] 29 | 30 | Configure HIL:: 31 | 32 | cp examples/hil.cfg.dev-no-hardware hil.cfg 33 | 34 | 35 | Initialize database:: 36 | 37 | hil-admin db create 38 | 39 | Start server:: 40 | 41 | hil-admin run-dev-server 42 | 43 | 44 | From a separate terminal window:: 45 | 46 | cd ~/hil/ 47 | virtualenv .venv 48 | source .venv/bin/activate 49 | pip install -e . 50 | 51 | 52 | Testing the setup:: 53 | 54 | hil node list all 55 | 56 | If the above command reports an empty list. 57 | HIL is successfully installed and ready for hacking. 58 | 59 | 60 | -------------------------------------------------------------------------------- /docs/README.rst: -------------------------------------------------------------------------------- 1 | HIL - Introduction 2 | ==================== 3 | 4 | HIL is a low-level tool for reserving physical machines and connecting 5 | them via isolated networks. It does not prescribe a particular 6 | method for imaging/managing said machines, allowing the user to use 7 | any solution of their choosing. 8 | 9 | HIL keeps track of available resources in a database, which a system 10 | administrator must populate initially. 11 | 12 | **This includes information such as:** 13 | 14 | - What machines are available 15 | - What network interfaces they have 16 | - Where those NICs are connected (what port on what switch) 17 | 18 | **From there, a regular user may:** 19 | 20 | - Reserve physical machines 21 | - Create isolated logical networks 22 | - Create "headnodes," which are small virtual machines usable for 23 | management/provisioning purposes 24 | - Connect network interfaces belonging to physical and/or headnodes to 25 | logical networks. 26 | - Reboot their machines, view the serial consoles -- aditionaly such management 27 | features may exist in the future. 28 | 29 | **A typical user workflow might look like:** 30 | 31 | 1. Reserve some machines. 32 | #. Create a logical "provisioning" network. 33 | #. Connect a NIC from each machine to the provisioning network. In particular, 34 | one could connect a NIC from which the machine will attempt to boot. 35 | #. Create a headnode, and attach it to the provisioning network 36 | #. Log in to the headnode, set up a PXE server, reboot the nodes, and deploy an 37 | operating system on them via the network. 38 | 39 | Requirements 40 | ------------- 41 | 42 | Required software/hardware for running a production HIL include: 43 | 44 | * Network switches: 45 | 46 | * At least one switch from the Cisco Nexus 5xxx or Dell PowerConnect 55xx families 47 | * For environments including more than one switch, all VLANs must be trunked to all managed switches 48 | 49 | * A single node that has the following: 50 | 51 | * A webserver capable of supporting the WSGI standard (Apache/mod_wsgi is the only one tested) 52 | * python 2.7, with the ability to install packages via pip 53 | * Access to: 54 | 55 | * The Internet or intranet (a way for users to connect to the HIL service) 56 | * The administrative telnet IP on the managed switches 57 | 58 | * Currently only CentOS and RHEL 7.x have been tested, though any node that otherwise meets these requirements should function. 59 | 60 | * Database: a Postgres database server. Sqlite works but is not recommended for production. 61 | 62 | For IPMI proxy functionality 63 | : 64 | * Network access from the HIL service node to the IPMI interfaces of node under management 65 | * Nodes that support IPMI v2+ 66 | * A recent version of ipmitool installed on the HIL service node 67 | 68 | For headnode functionality: 69 | 70 | * A recent Linux version for the HIL service node that has libvirt with KVM installed 71 | * Some number of VM templates 72 | * A trunk port connected between the switch and HIL service node that carries all VLANs accessible from HIL 73 | 74 | Documentation 75 | -------------- 76 | 77 | * The full documentation is availalbe at `ReadTheDocs `_ in a beautiful and easy to navigate web interface. 78 | * `The docs directory `_ contains all the documentation in .rst and .md format 79 | * `Examples `_ contains examples of config files, templates for creating headnode VM images and a script to register nodes with HIL. 80 | 81 | 82 | Mass Open Cloud 83 | ---------------- 84 | 85 | This project is part of the larger `Mass Open Cloud 86 | `_. For a description of the team and other 87 | information, see 88 | ``_. 89 | 90 | -------------------------------------------------------------------------------- /docs/client-library.md: -------------------------------------------------------------------------------- 1 | # HIL Client Library 2 | 3 | ## Description 4 | The HIL API Client Library for Python is designed for Python client-application developers. It offers simple access to HIL APIs. 5 | 6 | ## How to Install HIL Modules? 7 | ``` 8 | $ pip install git+https://github.com/cci-moc/hil 9 | ``` 10 | 11 | ## How to Get Started? 12 | ``` 13 | import os 14 | from hil.client.client import Client, RequestsHTTPClient 15 | 16 | ep = os.environ.get('HIL_ENDPOINT') 17 | basic_username = os.getenv('HIL_USERNAME') 18 | basic_password = os.getenv('HIL_PASSWORD') 19 | 20 | http_client = RequestsHTTPClient() 21 | http_client.auth = (basic_username, basic_password) 22 | C = Client(ep, http_client) 23 | print C.project.create("test-project") 24 | 25 | ``` 26 | 27 | ## More Examples. 28 | [leasing script](https://github.com/CCI-MOC/hil/blob/master/examples/leasing/node_release_script.py) 29 | -------------------------------------------------------------------------------- /docs/consistency-model.md: -------------------------------------------------------------------------------- 1 | # Consistency model 2 | 3 | Here is the consistency model for headnodes. 4 | 5 | - ``headnode_create``: After running this, you can then run 6 | ``headnode_create_nic``, ``headnode_delete_nic``, 7 | ``headnode_connect_network``, ``headnode_detach_network`` as much as you 8 | want, until you run ``headnode_start``. The headnode's VM is then 9 | created, started, and connected to the appropriate networks. As soon as 10 | you do this, the headnode becomes 'frozen', and no more changes to it are 11 | allowed. (Currently, the headnode is marked dirty/clean instead of 12 | unfrozen/frozen. This lines up with the semantics in one way, in that a 13 | dirty headnode hasn't been fully applied yet. But, they act different 14 | enough that this will probably change. This change will not affect 15 | external behavior.) 16 | 17 | - ``headnode_delete``: This deletes the headnode immediately, detaching it 18 | from all networks it was attached to. Due to current limitations, this 19 | operation cannot be run at all. Eventually, this call should succeed as 20 | long as the headnode is powered off. 21 | 22 | - ``headnode_start``, ``headnode_stop``: These cycle power on the headnode. 23 | It's also possible that, eventually, we might allow networking changes to 24 | powered-off headnodes. (It's semantically reasonable, but might be tricky 25 | in implementation.) 26 | -------------------------------------------------------------------------------- /docs/driver-model.rst: -------------------------------------------------------------------------------- 1 | Driver Model 2 | ============ 3 | 4 | HIL supports different types of devices (like network switches, out of band 5 | management controllers) through a driver model. This document describes the 6 | general concepts of that driver model and where it is currently applied. 7 | 8 | The HIL API manipulates each type of device (currently just network devices) 9 | through the same set of API calls, meaning that code within the main HIL tree 10 | need not worry about platform-specific details (like how to add a 11 | VLAN to a Cisco 5500 switch port). 12 | 13 | Most objects within HIL are persisted to the database, and thus classes map to 14 | tables. The driver makes use of SQLAlchemy's `joined table inheritance 15 | `_ to map 16 | class hierarchies to the database. 17 | 18 | Each type of object that supports different drivers has a top-level superclass, 19 | e.g. ``Switch`` or ``OBM``, and the drivers themselves are subclasses of that 20 | superclass (typically defined from within extensions). Under most 21 | circumstances, SQLAlchemy makes this "just work"; when fetching the object from 22 | the database, the right subclass will be automatically used. However, when 23 | *creating* an object for the first time, HIL must be told which driver to use 24 | (is the switch a powerconnect or a nexus?). For these cases, the api calls in 25 | question (node_register, switch_register...) will expect a type field to be 26 | provided. The module ``hil.class_resolver`` provides facilities for finding 27 | the appropriate subclass based on the contents of this type field. 28 | 29 | The switch drivers shipped with HIL itself are defined in the modules beneath 30 | ``hil.ext.switches``. These include drivers for the Dell Powerconnect 31 | 5500-series (in ``hil.ext.switches.dell``) and the Cisco Nexus 5500 ( 32 | ``hil.ext.switches.nexus``), as well as a mock driver useful for testing 33 | (``hil.ext.switches.mock``). 34 | -------------------------------------------------------------------------------- /docs/extensions.rst: -------------------------------------------------------------------------------- 1 | Extensions 2 | ========== 3 | 4 | HIL supports a simple extension mechanism, to allow external plugins 5 | to implement things we don't want in the HIL core. One obvious example 6 | of this is drivers. 7 | 8 | Extensions are python modules. The ``extensions`` section in ``hil.cfg`` 9 | specifies a list of modules to import on startup, for example:: 10 | 11 | [extensions] 12 | hil.ext.driver.switch.dell = 13 | hil.ext.driver.switch.complex_vlan = 14 | hil.ext.driver.obm.ipmi = 15 | some_3rd_party.hil.drivers.obm.robotic_power_button_pusher 16 | 17 | If the extension requires any kind of initialization, it may define a function 18 | ``setup``, which will be executed after all extensions have been loaded. 19 | This function must accept arbitrary arguments (for forwards compatibility), 20 | but at present must not rely on the presence or value of any argument. 21 | 22 | If the extension needs its own configuration options, it may read them from a 23 | section with the same name as the module, e.g.: 24 | 25 | [extensions] 26 | some_3rd_party.hil.drivers.obm.robotic_power_button_pusher 27 | 28 | [some_3rd_party.hil.drivers.obm.robotic_power_button_pusher] 29 | push_duration = 3 seconds 30 | 31 | Extensions should not make use any part of the HIL source tree that does not 32 | explicitly invite it (i.e. everything by default is *Private*). Components 33 | which may be used from extensions will explicitly say so in their 34 | documentation, (and describe in detail how they may be used). 35 | Extension-approved components currently include: 36 | 37 | * Most of hil.network_allocator 38 | * hil.auth 39 | * From hil.model: 40 | * db.Model 41 | * Switch 42 | * The migration framework; see `Migrations `_ for an overview. 43 | 44 | See the docstrings for each component for details. 45 | 46 | Additionally, extensions may add wsgi middleware to the flask 47 | application from their ``setup`` function. For example: 48 | 49 | app.wsgi_app = my_middleware(app.wsgi_app) 50 | 51 | Note that the order in which the ``setup`` functions are run is not 52 | defined. As such, if multiple extensions add wsgi middleware the 53 | order in which they are applied is also undefined. Using more than one 54 | such extension is discouraged. An ordering *may* be defined in the 55 | future. 56 | -------------------------------------------------------------------------------- /docs/index.rst: -------------------------------------------------------------------------------- 1 | .. HIL documentation master file, created by 2 | sphinx-quickstart on Tue Sep 20 12:23:26 2016. 3 | You can adapt this file completely to your liking, but it should at least 4 | contain the root `toctree` directive. 5 | 6 | Hardware Isolation Layer - Documentation 7 | ========================================= 8 | 9 | Contents: 10 | 11 | .. toctree:: 12 | :maxdepth: 1 13 | 14 | Introduction 15 | HIL Architecture Overview 16 | Installation 17 | Installation - Developer - TL;DR 18 | Installation - Developer 19 | Upgrade 20 | HIL as a client 21 | Extensions 22 | API Description 23 | REST API 24 | Developer Guidelines 25 | Deployment 26 | Consistency Model 27 | Install and configure PostgreSQL CentOS7 28 | Keystone Authentication 29 | Logging 30 | Migrations 31 | Network drivers 32 | Types of Networks 33 | Network Teardown 34 | Out-of-Band Management 35 | Testing for HIL 36 | VLAN Primer 37 | Driver Model 38 | 39 | 40 | 41 | 42 | Search 43 | ------- 44 | 45 | * :ref:`search` 46 | -------------------------------------------------------------------------------- /docs/keystone-auth.md: -------------------------------------------------------------------------------- 1 | # Keystone Authentication 2 | 3 | An authentication backend for Openstack's Keystone is maintained in this 4 | source tree as `hil.ext.auth.keystone`. This document describes its 5 | configuration and usage in detail. 6 | 7 | NOTE: The HIL command line interface only supports the Keystone v3 API. 8 | The server supports anything supported by [keystonemiddleware][1]. 9 | 10 | ## Usage 11 | 12 | Once HIL has been configured to work with Keystone, an administrator 13 | must manually add Openstack projects to HIL before they can access the 14 | HIL API. The HIL project names must correspond to the Openstack UUIDs. 15 | For example, an administrator may execute the command: 16 | 17 | hil project_create 00de7c85e594473db7461cdf7367166a 18 | 19 | To grant the Openstack project with that UUID access to HIL. 20 | 21 | Note that the plugin recognizes any user with an `admin` role on any 22 | project as a HIL administrator, similar to the default policy for core 23 | Openstack projects. This is true even for projects not that do not exist 24 | within HIL; such projects will not be able to own resources (such as 25 | nodes networks, etc), but may perform admin-only operations (such as 26 | creating projects). 27 | 28 | The HIL command line interface will look for the same `OS_*` 29 | environment variables used by the Openstack command line tools; these 30 | may be set by a user to authenticate when using the CLI. 31 | 32 | A script to set these variables correctly can be downloaded from the 33 | Openstack web dashboard via "Access & Security." 34 | 35 | ## Configuration 36 | 37 | As with any other extension, you must load the extension in `hil.cfg`: 38 | 39 | [extensions] 40 | hil.ext.auth.keystone = 41 | 42 | The backend must then be configured to talk to your Keystone server. 43 | The Keystone project maintains documentation on how to do this at: 44 | 45 | 46 | 47 | Configuring HIL to talk to Keystone deviates in the following ways: 48 | 49 | * The paste configuration is not used; you can simply ignore the 50 | sections that refer to paste. 51 | * The options that the Keystone documentation puts in the section 52 | `[keystone_authtoken]` should instead be placed in the extension's 53 | section in `hil.cfg`, i.e. `[hil.ext.auth.keystone]`. 54 | 55 | [1]: http://docs.openstack.org/developer/keystonemiddleware/ 56 | 57 | ## Debugging Tips 58 | 59 | If authentication is not working with HIL, first check if authentication to OpenStack is working. Using the OpenStack CLI, run the command: 60 | ``openstack token issue -f value -c id``. 61 | If a text token is returned, then authentication to OpenStack is working. 62 | 63 | Testing authentication directly to the HIL API is also helpful. 64 | Using the token from the tip above, run: 65 | ``curl -H 'x-auth-token: ' /nodes/free``. 66 | If the response lists the nodes in the current HIL setup, then the Keystone middleware has been setup correctly. 67 | -------------------------------------------------------------------------------- /docs/logging.md: -------------------------------------------------------------------------------- 1 | # Logging 2 | 3 | ## Overview 4 | 5 | HIL supports basic logging of the API calls received in the below format, 6 | recording the user who issued the API call (or guest if the user was 7 | unauthenticated) and the time. Log files are rotated daily. 8 | 9 | ``` 10 | 2016-03-31 14:55:07,961 - hil.rest - INFO - (guest) - API call: list_projects() 11 | ``` 12 | 13 | ## Setup 14 | Logging can be configured with the following options in `hil.cfg`. 15 | 16 | ``` 17 | [general] 18 | log_level = DEBUG 19 | log_dir = /var/log/hil 20 | ``` 21 | 22 | `log_dir` specifies the directory where the log files will be stored. 23 | The HIL user must have write permissions to this directory. 24 | If the option is omitted logging to files is disabled. 25 | 26 | `log_level` specifies the logging level to record. Valid options are: 27 | `CRITICAL`, `DEBUG`, `ERROR`, `FATAL`, `INFO`, `WARN`, `WARNING`. 28 | The default value is `WARNING`, but an option of `INFO` is recommended 29 | for an API log (A log level of `INFO` is set for API calls). 30 | 31 | For more information on logging visit the 32 | [python 2 documentation](https://docs.python.org/2/howto/logging.html#when-to-use-logging). -------------------------------------------------------------------------------- /docs/maintenance-pool.md: -------------------------------------------------------------------------------- 1 | # Maintenance Pool 2 | 3 | ## Overview 4 | 5 | The maintenance pool is an optional service implemented by server administrators 6 | that ideally performs extra operations on nodes after they have been removed from a project 7 | by `hil project_detach_node`. If enabled, HIL will move the node from its original project 8 | into the designated maintenance project. Then, HIL will POST to the URL of the service with 9 | the name of the node. 10 | 11 | Maintenance service example: Once the POST is received, the maintenance service 12 | logs when the node was detached, wipes the disks, sets the boot device to PXE, and then 13 | frees the node from the maintenance pool with `hil project_detach_node`. 14 | 15 | ## Configuration 16 | 17 | The maintenance pool will be active if the `maintenance` section exists in hil.cfg. 18 | If so, `maintenance_project` must be set equal to the 19 | name of the maintenance project registered in HIL and `url` must point at the maintenance 20 | service. 21 | -------------------------------------------------------------------------------- /docs/network-teardown.md: -------------------------------------------------------------------------------- 1 | # Network Teardown 2 | 3 | The script `create_dell_vlans` can be used to pre-populate the bridges 4 | and vlan nics needed for the HIL to operate, but right now we don't 5 | have an automated way to delete them. You shouldn't need to do this to 6 | use the HIL, but if for some reason you want to delete the nics related 7 | to a given vlan, you can do: 8 | 9 | brctl delif br-vlan${vlan_number} em3.${vlan_number} 10 | vconfig rem em3.${vlan_number} 11 | ifconfig br-vlan${vlan_number} down 12 | brctl delbr br-vlan${vlan_number} 13 | 14 | We're using `em3` as an example; do a `brctl show` to find out what 15 | the right nic actually is. 16 | -------------------------------------------------------------------------------- /docs/networks.md: -------------------------------------------------------------------------------- 1 | # Types of Networks in the HIL 2 | 3 | 4 | Networks in the HIL have three fields: 5 | 6 | - 'owner' represents what project owns it; or 'admin' if it was 7 | administrator-created. If there are still networks owned by a project, 8 | then that project cannot be deleted. Only the owner has the ability to 9 | delete a network. 10 | 11 | - 'access' represents which projects' nodes can access it, or `[]` (the 12 | empty list) if all projects' nodes can. 13 | 14 | - 'allocated' represents whether the underlying network ID was taken from the 15 | network driver's allocation pool. (The other option is that it was manually 16 | specified by an administrator.) This is important for network deletion. 17 | 18 | The command line tool's `network_create` treats an empty string to 19 | mean that the network should be allocated; any other argument is 20 | treated as a network ID. 21 | 22 | These fields are not really independent of each other. Here are the legal 23 | combinations, with some explanation: 24 | 25 | - (admin, all, yes): Public network internal to HIL 26 | - (admin, all, no): Public network that connects outside the HIL 27 | - (admin, project, yes): External provisioning network for one project 28 | - (admin, project, no): (kind of useless, but legal) 29 | - (project, project, yes): Normal project-created network 30 | 31 | 32 | Here are the illegal ones: 33 | 34 | - (project, [anything], no): Normal users cannot just assert control of external 35 | VLANs. 36 | 37 | - (project, all, yes): In the same vein, user-created public networks are also 38 | not allowed. 39 | 40 | - (project, different project, yes): Projects can grant access to other networks later using 'network_grant_project_access', but must have the project that is the owner of the network on the access list. 41 | 42 | -------------------------------------------------------------------------------- /docs/overview.md: -------------------------------------------------------------------------------- 1 | # HIL Architecture Overview 2 | 3 | The HIL (Hardware Isolation Layer) is a tool used to perform network 4 | isolation of physical machines, so that different parties can be 5 | given physical capacity within the same data center, without needing 6 | to trust each other. 7 | 8 | ## Operation 9 | 10 | From a user's perspective, the HIL allows one to: 11 | 12 | * allocate physical nodes 13 | * create login/management nodes (sometimes called headnodes) 14 | * configure networks, in particular: 15 | * create logical networks 16 | * connect physical nodes to those logical networks (on a per-nic basis) 17 | * connect login/management nodes to those networks. 18 | 19 | Right now, we're using 802.1q VLANs to achieve network isolation. The 20 | HIL communicates with a managed switch, to which the physical 21 | hardware is attached. When networking operations are performed, the 22 | HIL sends commands to the switch which configure the relevant ports 23 | as needed to create the logical networks. 24 | 25 | ## Anatomy of a Running Installation 26 | 27 | 28 | SWITCH 29 | _____________ 30 | ---------------] access{N} | 31 | =node-1= | | 32 | ---------------] | 33 | | | 34 | | | ( ) 35 | | | ( ^br-vlanM^------^trunk-nic.M^ ) 36 | ---------------] access{M} | ( %hn-B% ) 37 | =node-2= | | ( ) 38 | ---------------] | ( %hn-A% ) 39 | | | ( ^br-vlanN^------^trunk-nic.N^ ) 40 | | | ( ) 41 | | | ( ) 42 | ---------------] trunk [--------=hil-master= 43 | =node-3= | | 44 | ---------------]___________[ 45 | 46 | Legend: 47 | 48 | ( running in software ) 49 | 50 | ^virtual/logical network interface^ 51 | =physical node= 52 | %virtualized node% 53 | ]/[ switch's Ethernet ports 54 | --- connection (virtual or physical) 55 | access{X} Denotes that the adjacent port is set to access mode, with vlan #X. 56 | trunk Denotes that the adjacent port is set to trunk mode. 57 | 58 | 59 | A typical installation of the HIL will have the following components: 60 | 61 | * The HIL API server and headnode VM host 62 | * A managed switch 63 | * One or more physical nodes, each of which has one or more network 64 | interfaces. 65 | 66 | These components will be configured as follows: 67 | 68 | * All of the physical nodes will have some subset (possibly all) of 69 | their nics connected to the managed switch. 70 | * The ports that these are connected to will be set to access mode. 71 | * The HIL headnode host will have one nic connected to the managed switch. 72 | * The corresponding port will be set to trunk mode, with all vlans 73 | enabled. 74 | * The HIL master will be running the libvirt daemon, which will have at least 75 | one VM, powered off, which can be cloned and started to provide 76 | login/management nodes. 77 | * A network object in the HIL corresponds to a vlan id. (In future versions, 78 | we will also allow other mechanisms, such as VXLAN.) Network operations 79 | have the following effects 80 | * Ports added to the network will have their access vlan set to the vlan id 81 | associated with the network. 82 | * Ports removed from the network will be set to access no vlans. 83 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/.gitignore: -------------------------------------------------------------------------------- 1 | mnt 2 | *.raw 3 | *.img 4 | *.qcow2 5 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/Makefile: -------------------------------------------------------------------------------- 1 | # $(distro).mk defines IMG_IN and MIRROR. 2 | include $(distro).mk 3 | 4 | all: build 5 | $(IMG_IN): 6 | wget $(MIRROR)/$(IMG_IN) 7 | check: $(IMG_IN) SHA256SUMS.$(distro) 8 | sha256sum -c SHA256SUMS.$(distro) 9 | $(IMG_IN).raw: $(IMG_IN) 10 | qemu-img convert -O raw $< $@ 11 | build: $(IMG_IN).raw 12 | mkdir mnt 13 | mount -o loop,offset=$$((512 * 2048)) $< mnt 14 | mount -t sysfs none mnt/sys 15 | mount -t proc none mnt/proc 16 | chroot mnt /usr/bin/passwd 17 | umount mnt/sys 18 | umount mnt/proc 19 | umount mnt 20 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/README.rst: -------------------------------------------------------------------------------- 1 | This directory contains example scripts for generating a base headnode image 2 | for HIL. The resulting image is identical to either the Ubuntu 14.04 cloud 3 | image, or the one for CentOS 7, with one exception: there is a known root 4 | password. The standard cloud image needs credentials to be provided via 5 | cloud-init, which is not necessarily available in a HIL environment. 6 | 7 | Running ``sudo make distro=$DISTRO`` (where $DISTRO is either ``ubuntu`` or 8 | ``centos``) in this directory will download the cloud image, prompt 9 | you for a password, and make a modified version (same filename with a ``.raw`` 10 | at the end) with the root password you supply. 11 | 12 | The image will still run cloud-init, but when it boots (which may take some 13 | time) you will be able to log in as root with the chosen password. 14 | 15 | This can easily be extended to use other distros' cloud images; copy one of the 16 | .mk files, adjust the values of MIRROR and IMG_IN, and creata a 17 | SHA256SUMS.distro for your distro. 18 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/SHA256SUMS.centos: -------------------------------------------------------------------------------- 1 | e324e3ab1d24a1bbf035ddb365e7f9058c0b454acf48d7aa15c5519fae5998ab CentOS-7-x86_64-GenericCloud-1503.qcow2 2 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/SHA256SUMS.ubuntu: -------------------------------------------------------------------------------- 1 | 68e1365c896052731d2bd71579d4ada91af25040f2d1865c3b88e05ff9aaa136 *trusty-server-cloudimg-amd64-disk1.img 2 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/centos.mk: -------------------------------------------------------------------------------- 1 | MIRROR := http://cloud.centos.org/centos/7/images/ 2 | IMG_IN := CentOS-7-x86_64-GenericCloud-1503.qcow2 3 | -------------------------------------------------------------------------------- /examples/cloud-img-with-passwd/ubuntu.mk: -------------------------------------------------------------------------------- 1 | MIRROR := http://cloud-images.ubuntu.com/trusty/current 2 | IMG_IN := trusty-server-cloudimg-amd64-disk1.img 3 | -------------------------------------------------------------------------------- /examples/dbinit.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | """ 3 | Register nodes with HIL. 4 | 5 | This is intended to be used as a template for either creating a mock HIL setup 6 | for development or to be modified to register real-life nodes that follow a 7 | particular pattern. 8 | 9 | In the example environment for which this module is written, there are 10 10 | nodes which have IPMI interfaces that are sequentially numbered starting with 11 | 10.0.0.0, have a username of "ADMIN_USER" and password of "ADMIN_PASSWORD". 12 | The ports are also numbered sequentially and are named following a dell switch 13 | scheme, which have ports that look like "gi1/0/5" 14 | 15 | It could be used in an environment similar to the one which 16 | ``hil.cfg`` corresponds, though could also be used for development with the 17 | ``hil.cfg.dev*`` 18 | """ 19 | 20 | from subprocess import check_call 21 | 22 | N_NODES = 6 23 | 24 | ipmi_user = "ADMIN_USER" 25 | ipmi_pass = "ADMIN_PASSWORD" 26 | switch = "mock01" 27 | obmd_base_uri = 'http://obmd.example.com/nodes/' 28 | obmd_admin_token = 'secret' 29 | 30 | 31 | def hil(*args): 32 | """Convenience function that calls the hil command line tool with 33 | the given arguments. 34 | """ 35 | args = map(str, args) 36 | print args 37 | check_call(['hil'] + args) 38 | 39 | 40 | hil('switch', 'register', switch, 'mock', 'ip', 'user', 'pass') 41 | 42 | for node in range(N_NODES): 43 | nic_port = "gi1/0/%d" % (node) 44 | nic_name = 'nic1' 45 | hil('node', 'register', 46 | node, 47 | obmd_base_uri + str(node), 48 | obmd_admin_token) 49 | hil('node', 'nic', 'register', node, nic_name, 'FillThisInLater') 50 | hil('port', 'register', switch, nic_port) 51 | hil('port', 'nic', 'add', switch, nic_port, node, nic_name) 52 | -------------------------------------------------------------------------------- /examples/deployment.cfg: -------------------------------------------------------------------------------- 1 | # This is an example configuration file for deployment testing HIL. The 2 | # Deployment Tests will read its configuration from a file called 3 | # ``deployment.cfg``, in the current working directory. To get started, simply 4 | # copy this file, and modify as needed. 5 | 6 | # Note that, while this looks a lot like hil.cfg, it works slightly 7 | # differently---it has one or two extra options, and will ONLY work with the 8 | # simple_vlan driver. 9 | 10 | [general] 11 | # The driver to use for the network switch. The value must be the name of one 12 | # of the modules in hil.drivers: 13 | driver = simple_vlan 14 | 15 | [headnode] 16 | # The trunked network interface on the host. This is the nic that the VMs will 17 | # be bridged to to gain access to their networks. 18 | trunk_nic = em3 19 | base_imgs = base-headnode, base-headnode-2 20 | # libvirt instance to connect to. This is the only value that is tested, and 21 | # the installation guide assumes that you are using it. 22 | libvirt_endpoint = qemu:///system 23 | 24 | [driver simple_vlan] 25 | switch = {"switch": "dell", "ip": "192.168.0.1", "user": "foo", "pass": "bar"} 26 | # The port that the switch is trunking to the head node. Note that this is 27 | # currently set manually, but must be known for deployment testing. 28 | trunk_port = gi1/0/4 29 | 30 | [vlan] 31 | # Another valid list: 14,35-40, 100-900, 904, 65-85 32 | vlans = 100-109 33 | -------------------------------------------------------------------------------- /examples/hil.cfg.dev-no-hardware: -------------------------------------------------------------------------------- 1 | # This is an example hil.cfg file suitable for use for development without a 2 | # full set of hardware (switches, nodes, libvirtd...). It can be used as-is, 3 | # but using the vlan_pool allocator may also be desirable (see the comments 4 | # below). 5 | [general] 6 | log_level = debug 7 | # log_dir = . 8 | 9 | [auth] 10 | require_authentication = False 11 | 12 | [headnode] 13 | trunk_nic = eth0 14 | base_imgs = img1, img2, img3, img4 15 | libvirt_endpoint = qemu:///system 16 | 17 | [client] 18 | endpoint = http://127.0.0.1:5000 19 | 20 | [database] 21 | uri = sqlite:///hil.db 22 | #uri = postgresql://:@
/ 23 | 24 | [devel] 25 | dry_run=True 26 | 27 | [extensions] 28 | hil.ext.switches.mock = 29 | hil.ext.network_allocators.null = 30 | hil.ext.auth.null = 31 | # Depending on what you're doing, you may want to change this to a 32 | # different authentication backend: 33 | # hil.ext.auth.database = 34 | # hil.ext.auth.keystone = 35 | # 36 | # If using the keystone backend, you'll also need to fill out this section: 37 | # [hil.ext.auth.keystone] 38 | # # This is equivalent to the [keystone_auth] section described at: 39 | # # http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html 40 | # # 41 | # # Below is a reasoanble configuration *for development*. It is not suitable 42 | # # for production, If for no other reason than because it forces plaintext 43 | # # (non-TLS) connections to keystone: 44 | # auth_url = http://127.0.0.1:35357/v3 45 | # auth_protocol = http 46 | # username = admin 47 | # password = s3cr3t 48 | # project_name = admin 49 | # admin_user = admin 50 | # admin_password = s3cr3t 51 | 52 | # Depending on what you're trying to do, you may want to use the vlan_pool 53 | # network allocator instead of the null allocator. To do this, comment out the 54 | # null allocator extension above, and uncomment the following: 55 | # 56 | # hil.ext.network_allocators.vlan_pool = 57 | # 58 | #[hil.ext.network_allocators.vlan_pool] 59 | #vlans = 100-200, 300-500 60 | -------------------------------------------------------------------------------- /examples/leasing/README: -------------------------------------------------------------------------------- 1 | Leasing scripts is used to free a node 2 | from a specific project after a certain amount of time. 3 | 4 | The procedure is as follows: 5 | Cron job will call the leasing script in pre-defined time 6 | intervals (e.g., every 30 seconds). Leasing script checks 7 | node's status in the status file and updates the number 8 | of times cron job has called the leasing script while the node 9 | has been asigned to a project. 10 | If this number of times passes the threshold, the node 11 | will be freed from that project. 12 | So, threshould * cron_job_interval = the time that the node 13 | will be released after. For example, if threshold is 5 and intervals 14 | are set to 1 minutes, then after 5*1 = 5 minutes, the node will 15 | be released. 16 | 17 | Tenants who need more time should ask time extension. 18 | One can change the "assigned to a project" time for all nodes 19 | (by chanigng threshold in config file) or for one node 20 | (by changing the line in the status file related to the node.) 21 | 22 | Script reads a config file from /etc/leasing.cfg 23 | which includes the name of the nodes, time threshold, 24 | admin user name and password, status file and endpoint 25 | which specifies the ip address and port number which hil runs at. 26 | 27 | status file includes each node's status, which node is in 28 | which project, and which node is free or for how long the node 29 | has been assigned to a project. 30 | 31 | Note: this script does NOT support other auth backends 32 | such as keystone. 33 | 34 | ******************* 35 | 36 | Config file format (/etc/leasing.cfg): Read the comments in the config file. 37 | 38 | ****************** 39 | 40 | Status file format (e.g., var/lib/leasing): 41 | node_name project_name cron_job_called_script_times 42 | -------------------------------------------------------------------------------- /examples/leasing/leasing: -------------------------------------------------------------------------------- 1 | dell0 lease_test 1 2 | nexus_12 lease_test 2 3 | cisco_5 free_pool 0 4 | IBM_3 free_pool 0 5 | -------------------------------------------------------------------------------- /examples/leasing/leasing.cfg: -------------------------------------------------------------------------------- 1 | [hil] 2 | # node_list: names of the nodes 3 | node_list: dell0, nexus_12, cisco_5, IBM_3 4 | # threshold: number of times that cron job should call the leasing 5 | # script before releasing the node 6 | threshold: 3 7 | # user_name: HIL project's admin username 8 | user_name: USER 9 | # password: HIL project's admin password 10 | password: PASSWORD 11 | # endpoint: the ip address and the port number 12 | # which hil is running on 13 | endpoint: http://127.0.0.1:5000 14 | # status_file: the path to the status file (e.g., /var/lib/leasing) 15 | status_file: /var/lib/leasing 16 | -------------------------------------------------------------------------------- /examples/puppet_headnode/README.md: -------------------------------------------------------------------------------- 1 | These puppet manifests set up an ubuntu 14.04 headnode to pxe boot 2 | nodes into the CentOS 6.6 installer, with a kickstart file which will 3 | automate the install. 4 | 5 | # Setup 6 | 7 | 1. Create a headnode. The headnode must have a nic that will be 8 | recognized as eth1, which must be on a HIL network that the nodes 9 | will pxe boot off of. 10 | 2. Download the CentOS 6.6 minimal ISO, verify the checksum, and then 11 | copy it to root's home directory: 12 | 13 | ./download_iso.sh 14 | sha256sum -c sha256sums.txt 15 | cp *.iso /root 16 | 17 | 3. Install puppet: 18 | 19 | apt-get install puppet 20 | 21 | 4. Git clone the hil to /root, cd into the examples/puppet_headnode/. 22 | 23 | cd /root 24 | git clone https://github.com/CCI-MOC/hil 25 | cd hil/examples/puppet_headnode 26 | 27 | 5. You may then wish to modify some of the files therein; in 28 | particular: 29 | 30 | * `manifests/static/pxelinux_cfg` is the pxelinux config file that 31 | will be used to boot nodes into the installer. The `ksdevice=...` 32 | parameter must refer to the nic that will be used to fetch the 33 | kickstart file, which must be the nic that is on a network with 34 | the headnode. (typically the boot nic). Adjust this if needed. 35 | * Similarly, the kickstart file `manifests/static/ks.cfg` contains 36 | information on setting up the network, including during the 37 | install. This should be modified to match your system. See the 38 | comments in that file for more detail. 39 | * **very** importantly, change the default root password in the 40 | ks.cfg. It's important to do this *before* performing the install. 41 | 42 | 6. Finally, apply the manifests: 43 | 44 | puppet apply manifests/site.pp 45 | 46 | Note that the hil repo *must* be located under /root; the puppet 47 | manifests hard-code paths to certain files. 48 | 49 | 7. Reboot the headnode. 50 | 51 | # Use 52 | 53 | The manifests install a script `make-links`, which expects a list of mac 54 | addresses to be supplied on its standard input, one per line, e.g: 55 | 56 | 01:23:45:67:89:ab 57 | cd:ef:01:23:45:67 58 | ... 59 | 60 | Each of these should be the mac address off of which you expect a node 61 | to boot. `make-links` will then make some symlinks, the effect of which 62 | is that the corresponding nodes will boot into the CentOS installer on 63 | their next boot (by default, they will chainload to the disk). You can 64 | then use the HIL API to force-reboot the nodes. 65 | 66 | Upon completion of the install, the corresponding links will be deleted, 67 | and the node will boot into the new OS for the first time. 68 | -------------------------------------------------------------------------------- /examples/puppet_headnode/download_iso.sh: -------------------------------------------------------------------------------- 1 | wget http://mirror.hmc.edu/centos/6.6/isos/x86_64/CentOS-6.6-x86_64-minimal.iso 2 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/boot_notify.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """WSGI app that helps manage pxe booting a node.""" 3 | 4 | import re 5 | import os 6 | 7 | from flask import Flask 8 | app = Flask(__name__) 9 | 10 | KS_CFG = '/var/lib/tftpboot/centos/ks.cfg' 11 | 12 | H = r'[0-9a-fA-F]' # hex digit 13 | mac_regex = re.compile(r'^('+H+H+r':){5}'+H+H+r'$') 14 | 15 | 16 | @app.route('/', methods=['DELETE']) 17 | def boot_disk(mac_addr): 18 | """Remove the temporary pxelinux.cfg. 19 | 20 | This is invoked from within ks.cfg, to avoid booting back into the 21 | installer on reboot. 22 | """ 23 | if re.match(mac_regex, mac_addr) is None: 24 | return 'Bad mac address', 400 25 | 26 | filename = '01-' + '-'.join(mac_addr.split(':')) 27 | filename = filename.lower() 28 | os.remove('/var/lib/tftpboot/pxelinux.cfg/' + filename) 29 | return 'OK' 30 | 31 | 32 | @app.route('/ks.cfg') 33 | def kickstart(): 34 | """Serve ks.cfg""" 35 | with open(KS_CFG) as f: 36 | return f.read() 37 | 38 | 39 | if __name__ == '__main__': 40 | app.run(host='0.0.0.0', port=80) 41 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/default: -------------------------------------------------------------------------------- 1 | default disk 2 | label disk 3 | LOCALBOOT 0 4 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/dhcpd.conf: -------------------------------------------------------------------------------- 1 | # The ddns-updates-style parameter controls whether or not the server will 2 | # attempt to do a DNS update when a lease is confirmed. We default to the 3 | # behavior of the version 2 packages ('none', since DHCP v2 didn't 4 | # have support for DDNS.) 5 | ddns-update-style none; 6 | 7 | # option definitions common to all supported networks... 8 | #option domain-name "moc.local"; 9 | #option domain-name-servers server.moc.local; 10 | default-lease-time 600; 11 | max-lease-time 7200; 12 | 13 | # If this DHCP server is the official DHCP server for the local 14 | # network, the authoritative directive should be uncommented. 15 | authoritative; 16 | 17 | # Use this to send dhcp log messages to a different log file (you also 18 | # have to hack syslog.conf to complete the redirection). 19 | log-facility local7; 20 | 21 | # A slightly different configuration for an internal subnet. 22 | subnet 192.168.1.0 netmask 255.255.255.0 { 23 | range 192.168.1.20 192.168.1.30; 24 | # option domain-name-servers server.moc.local; 25 | # option domain-name "moc.local"; 26 | # option routers 192.168.1.1; 27 | option broadcast-address 192.168.1.255; 28 | default-lease-time 600; 29 | max-lease-time 7200; 30 | } 31 | 32 | # Fixed IP addresses can also be specified for hosts. These addresses 33 | # should not also be listed as being available for dynamic assignment. 34 | # Hosts for which fixed IP addresses have been specified can boot using 35 | # BOOTP or DHCP. Hosts for which no fixed address is specified can only 36 | # be booted with DHCP, unless there is an address range on the subnet 37 | # to which a BOOTP client is connected which has the dynamic-bootp flag 38 | # set. 39 | host base-headnode { 40 | hardware ethernet 52:54:00:10:e8:e8; 41 | fixed-address 192.168.1.15; 42 | } 43 | 44 | # PXE options set 45 | allow booting; 46 | allow bootp; 47 | option option-128 code 128 = string; 48 | option option-129 code 129 = text; 49 | next-server 192.168.1.15; 50 | filename "pxelinux.0"; 51 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/inetd.conf: -------------------------------------------------------------------------------- 1 | # /etc/inetd.conf: see inetd(8) for further informations. 2 | # 3 | # Internet superserver configuration database 4 | # 5 | # 6 | # Lines starting with "#:LABEL:" or "##" should not 7 | # be changed unless you know what you are doing! 8 | # 9 | # If you want to disable an entry so it isn't touched during 10 | # package updates just comment it out with a single '#' character. 11 | # 12 | # Packages should modify this file by using update-inetd(8) 13 | # 14 | # 15 | # 16 | #:INTERNAL: Internal services 17 | #discard stream tcp nowait root internal 18 | #discard dgram udp wait root internal 19 | #daytime stream tcp nowait root internal 20 | #time stream tcp nowait root internal 21 | 22 | #:STANDARD: These are standard services. 23 | 24 | #:BSD: Shell, login, exec and talk are BSD protocols. 25 | 26 | #:MAIL: Mail, news and uucp services. 27 | 28 | #:INFO: Info services 29 | 30 | #:BOOT: TFTP service is provided primarily for booting. Most sites 31 | # run this only on machines acting as "boot servers." 32 | 33 | #:RPC: RPC based services 34 | 35 | #:HAM-RADIO: amateur-radio services 36 | 37 | #:OTHER: Other services 38 | 39 | tftp dgram udp wait root /usr/sbin/in.tftpd /usr/sbin/in.tftpd -s /var/lib/tftpboot 40 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/interfaces: -------------------------------------------------------------------------------- 1 | #This file describes the network interfaces available on your system 2 | # and how to activate them. For more information, see interfaces(5). 3 | 4 | # The loopback network interface 5 | auto lo 6 | iface lo inet loopback 7 | 8 | 9 | # The default interface 10 | auto eth0 11 | iface eth0 inet dhcp 12 | 13 | 14 | auto eth1 15 | iface eth1 inet static 16 | address 192.168.1.15 17 | netmask 255.255.255.0 18 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/isc-dhcp-server: -------------------------------------------------------------------------------- 1 | # Defaults for isc-dhcp-server initscript 2 | # sourced by /etc/init.d/isc-dhcp-server 3 | # installed at /etc/default/isc-dhcp-server by the maintainer scripts 4 | 5 | # 6 | # This is a POSIX shell fragment 7 | # 8 | 9 | # Path to dhcpd's config file (default: /etc/dhcp/dhcpd.conf). 10 | #DHCPD_CONF=/etc/dhcp/dhcpd.conf 11 | 12 | # Path to dhcpd's PID file (default: /var/run/dhcpd.pid). 13 | #DHCPD_PID=/var/run/dhcpd.pid 14 | 15 | # Additional options to start dhcpd with. 16 | # Don't use options -cf or -pf here; use DHCPD_CONF/ DHCPD_PID instead 17 | #OPTIONS="" 18 | 19 | # On what interfaces should the DHCP server (dhcpd) serve DHCP requests? 20 | # Separate multiple interfaces with spaces, e.g. "eth0 eth1". 21 | INTERFACES="eth1" 22 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/ks.cfg: -------------------------------------------------------------------------------- 1 | # clear all existing partitions. This gives us a known state to work with. 2 | clearpart --all --drives=sda 3 | 4 | # set up the disk; This is mostly copied from what you get by doing a 5 | # manual install with automated partitioning ("use whole disk"). 6 | part /boot --fstype=ext4 --size=500 7 | part pv.008002 --grow --size=1 8 | volgroup vg_moccompute --pesize=4096 pv.008002 9 | logvol swap --recommended --name lv_swap --vgname vg_moccompute 10 | logvol / --fstype=ext4 --name lv_root --vgname=vg_moccompute --grow --size=1 11 | 12 | # Set up the network. The example below assumes that eth1 is connected to the 13 | # outside world, and will be used to fetch packages during the installation. It 14 | # also assumes eth0 is the boot nic. Both are brought up via dhcp on boot, so 15 | # the node will be accessible both from the outside world, and from the headnode 16 | # via the pxe network. 17 | # 18 | # If the names of your nics don't match the above, adjust them here, and also in 19 | # the post intsall script at the end of this file. 20 | # 21 | # The --activate option causes the nic to be brought up *during* the install. We 22 | # need both external network access (for things like downloading packages), as 23 | # well as access to the headnode on the management side (See the comments in the 24 | # post-install script, at the end of this file). As such, we apply the 25 | # --activate option to both of the relevant nics: 26 | network --onboot yes --device eth1 --bootproto dhcp --activate 27 | network --onboot yes --device eth0 --bootproto dhcp --activate 28 | 29 | # red hat docs say these options are required: 30 | authconfig --useshadow --pasalgo=sha512 31 | bootloader --location=mbr 32 | keyboard us 33 | lang en_US 34 | timezone --utc US/Eastern 35 | 36 | # WARNING WARNING: You should change this to something more secure *before* 37 | # perofrming an install; without any other changes to the node will come up with 38 | # ssh enabled, using password auth, and allowing root logins -- We recommend 39 | # changing that as soon as possible, but don't do so during the install here to 40 | # keep this example simple. 41 | rootpw r00tme 42 | 43 | # we want the machine to reboot on it's own when finished with installation: 44 | reboot 45 | 46 | # we do *not* want to be prompted for anything; if we need to be, installation should fail: 47 | cmdline 48 | 49 | # set up repositories 50 | url --url http://mirrors.mit.edu/centos/6.5/os/x86_64 51 | 52 | %packages 53 | @core 54 | @server-policy 55 | 56 | %post 57 | 58 | # Notify the head node that we're done with the install. The script 59 | # boot_notify.py responds to this by deleting the temporary pxelinux.cfg symlink 60 | # for our mac address. This way, the next time we PXE boot, we'll chainload to 61 | # our already installed operating system (on the disk). If your nodes' boot nic 62 | # is called something other than eth0, you'll need to change it here. 63 | curl -X DELETE http://192.168.1.15/$(ip link show eth0|grep ether|awk '{print $2}') 64 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/make-links: -------------------------------------------------------------------------------- 1 | #!/bin/bash -e 2 | cd /var/lib/tftpboot/pxelinux.cfg 3 | tr ABCDEF abcdef |sed 's/^/01-/ ; s/:/-/g'|xargs -n 1 ln -s ../centos/pxelinux.cfg 4 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/pxelinux_cfg: -------------------------------------------------------------------------------- 1 | # D-I config version 2.0 2 | default centos 3 | prompt 0 4 | timeout 0 5 | 6 | label centos 7 | kernel centos/vmlinuz 8 | append ks=http://192.168.1.15/ks.cfg initrd=centos/initrd.img ksdevice=eth0 9 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/rc.local: -------------------------------------------------------------------------------- 1 | #!/bin/sh -e 2 | # 3 | # rc.local 4 | # 5 | # This script is executed at the end of each multiuser runlevel. 6 | # Make sure that the script will "exit 0" on success or any other 7 | # value on error. 8 | # 9 | # In order to enable or disable this script just change the execution 10 | # bits. 11 | # 12 | # By default this script does nothing. 13 | 14 | /usr/local/bin/boot_notify.py & 15 | exit 0 16 | -------------------------------------------------------------------------------- /examples/puppet_headnode/manifests/static/tftpd-hpa: -------------------------------------------------------------------------------- 1 | # /etc/default/tftpd-hpa 2 | 3 | TFTP_USERNAME="tftp" 4 | TFTP_DIRECTORY="/var/lib/tftpboot" 5 | TFTP_ADDRESS="[::]:69" 6 | TFTP_OPTIONS="--secure" 7 | 8 | RUN_DAEMON="yes" 9 | OPTIONS="-l -s /var/lib/tftpboot" 10 | -------------------------------------------------------------------------------- /examples/puppet_headnode/sha256sum.txt: -------------------------------------------------------------------------------- 1 | 5458f357e8a55e3a866dd856896c7e0ac88e7f9220a3dd74c58a3b0acede8e4d CentOS-6.6-x86_64-minimal.iso 2 | -------------------------------------------------------------------------------- /examples/site-layout.json: -------------------------------------------------------------------------------- 1 | { 2 | "switches": [ 3 | { 4 | "switch": "dell-0", 5 | "type": "http://schema.massopencloud.org/haas/v0/switches/powerconnect55xx", 6 | "hostname": "dell-0.example.com", 7 | "username": "alice", 8 | "password": "secret" 9 | } 10 | ], 11 | "nodes" : [ 12 | { 13 | "name": "node-1", 14 | "nics": [ 15 | { 16 | "name": "nic1", 17 | "mac" : "de:ad:be:ef:20:14", 18 | "port": "gi1/0/1", 19 | "switch": "dell-0" 20 | }, 21 | { 22 | "name": "nic2", 23 | "mac" : "de:ad:be:ef:20:15", 24 | "port": "gi1/0/2", 25 | "switch": "dell-1" 26 | } 27 | ], 28 | "obm": { 29 | "type": "ipmi", 30 | "host": "192.168.1.1", 31 | "user": "foo", 32 | "pass": "bar" 33 | }, 34 | "obmd": { 35 | "uri": "http://obmd.example.com/nodes/node-1", 36 | "admin_token": "secret" 37 | } 38 | }, 39 | { 40 | "name": "node-2", 41 | "nics": [ 42 | { 43 | "name": "nic1", 44 | "mac" : "de:ad:be:ef:20:16", 45 | "port": "gi1/0/3", 46 | "switch": "dell-1" 47 | } 48 | ], 49 | "obm": { 50 | "type": "ipmi", 51 | "host": "192.168.1.2", 52 | "user": "foo", 53 | "pass": "bar" 54 | }, 55 | "obmd": { 56 | "uri": "http://obmd.example.com/nodes/node-2", 57 | "admin_token": "secret" 58 | } 59 | } 60 | ] 61 | } 62 | -------------------------------------------------------------------------------- /examples/testsuite.cfg-deployment: -------------------------------------------------------------------------------- 1 | # This is an example testsuite.cfg for running the deployment tests (in 2 | # ``tests/deployment``). it is designed to accompany 3 | # ``site-layout.json`` in this directory. You will most likely have to 4 | # modify both files according to your local environment. 5 | [general] 6 | log_level = debug 7 | 8 | [headnode] 9 | # Set this to the appropriate interface on your hil master: 10 | # trunk_nic = eth0 11 | 12 | # Note that the test suite requires these two base images are available; this 13 | # *cannot* be set to a different value for running the tests. 14 | base_imgs = base-headnode, base-headnode-2 15 | libvirt_endpoint = qemu:///system 16 | 17 | [extensions] 18 | hil.ext.network_allocators.vlan_pool = 19 | hil.ext.switches.dell = 20 | 21 | [hil.ext.network_allocators.vlan_pool] 22 | # Set to a range appropriate for your local environment. A pool of at least 10 23 | # VLANs is recommended for running the tests. The tests don't use quite that 24 | # many at present, but may do so in the future. 25 | # 26 | # vlans = 100-110 27 | -------------------------------------------------------------------------------- /hil.wsgi: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | """WSGI script for the HIL api server.""" 3 | 4 | # imported for the side-effect of registering the request handlers: 5 | from hil import api # pylint: disable=unused-import 6 | 7 | from hil import config, server, migrations 8 | 9 | config.setup('/etc/hil.cfg') 10 | server.init() 11 | migrations.check_db_schema() 12 | 13 | # we're importing this just to expose the variable, making this a valid 14 | # wsgi script. The "noqa" prevents a pep8 error about not being at the 15 | # top of the file. 16 | # 17 | # pylint: disable=unused-import 18 | from hil.rest import app as application # noqa 19 | -------------------------------------------------------------------------------- /hil/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/__init__.py -------------------------------------------------------------------------------- /hil/class_resolver.py: -------------------------------------------------------------------------------- 1 | """Support module for looking up drivers by name 2 | 3 | The driver model (see ``docs/driver-model.rst``) requires us to be able 4 | to resolve driver names to classes; this module provides support for 5 | building and querying lookup tables for this. 6 | 7 | Any class which wishes to be exposed via this interface should: 8 | 9 | 1. Inherit, directly or inderectly, from the class defining the type of 10 | driver (e.g. Switch, OBM...) 11 | 2. have an attribute ``api_name``, which should be the name under which 12 | the class would like to be exposed. 13 | """ 14 | 15 | 16 | _class_map = {} 17 | 18 | 19 | def concrete_class_for(superclass, name): 20 | """Looks up the concrete class registered under the name ``name`` 21 | 22 | Returns the class, or None if not found. 23 | """ 24 | if (superclass, name) in _class_map: 25 | return _class_map[(superclass, name)] 26 | else: 27 | return None 28 | 29 | 30 | def build_class_map_for(superclass): 31 | """Build a lookup table for drivers implementing ``superclass`` 32 | 33 | ``superclass`` should be a driver at the top level of a driver type 34 | class hierarchy. ``build_class_map_for`` then searches the class hierarchy 35 | beneath ``subclass``, registering each class which has an ``api_name`` 36 | attribute. 37 | """ 38 | def _add_to_class_map(cls): 39 | if hasattr(cls, 'api_name'): 40 | _class_map[(superclass, cls.api_name)] = cls 41 | for subclass in cls.__subclasses__(): 42 | _add_to_class_map(subclass) 43 | for cls in superclass.__subclasses__(): 44 | _add_to_class_map(cls) 45 | -------------------------------------------------------------------------------- /hil/cli/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/cli/__init__.py -------------------------------------------------------------------------------- /hil/cli/cli.py: -------------------------------------------------------------------------------- 1 | """This module implements the HIL CLI""" 2 | import click 3 | import sys 4 | import pkg_resources 5 | 6 | from hil.client.base import FailedAPICallException 7 | from hil.commands.util import ensure_not_root 8 | from hil.cli import node, project, network, switch, port, user, misc, headnode 9 | 10 | VERSION = pkg_resources.require('hil')[0].version 11 | 12 | 13 | @click.group() 14 | @click.version_option(version=VERSION) 15 | def cli(): 16 | """The HIL Command line. 17 | 18 | Every subcommand supports --help option to see all arguments 19 | (positional and optional) and additional help for that subcommand. 20 | """ 21 | 22 | 23 | commands = [node.node, project.project, network.network, switch.switch, 24 | port.port, user.user, misc.networking_action, headnode.headnode] 25 | 26 | for command in commands: 27 | cli.add_command(command) 28 | 29 | 30 | def main(): 31 | """CLI entry point""" 32 | ensure_not_root() 33 | try: 34 | cli() 35 | except FailedAPICallException as e: 36 | sys.exit('Error: %s\n' % e.message) 37 | except Exception as e: 38 | sys.exit(e) 39 | -------------------------------------------------------------------------------- /hil/cli/client_setup.py: -------------------------------------------------------------------------------- 1 | """This module sets up a HIL client""" 2 | 3 | import sys 4 | import os 5 | 6 | from hil.client.client import Client, RequestsHTTPClient, KeystoneHTTPClient 7 | 8 | 9 | def setup_http_client(): 10 | """Set `http_client` to a valid instance of `HTTPClient` 11 | 12 | and pass it as parameter to initialize the client library. 13 | 14 | Sets http_client to an object which makes HTTP requests with 15 | authentication. It chooses an authentication backend as follows: 16 | 17 | 1. If the environment variables HIL_USERNAME and HIL_PASSWORD 18 | are defined, it will use HTTP basic auth, with the corresponding 19 | user name and password. 20 | 2. If the `python-keystoneclient` library is installed, and the 21 | environment variables: 22 | 23 | * OS_AUTH_URL 24 | * OS_USERNAME 25 | * OS_PASSWORD 26 | * OS_PROJECT_NAME 27 | 28 | are defined, Keystone is used. 29 | 3. Otherwise, do not supply authentication information. 30 | 31 | This may be extended with other backends in the future. 32 | 33 | `http_client` is also passed as a parameter to the client library. 34 | Until all calls are moved to client library, this will support 35 | both ways of intereacting with HIL. 36 | """ 37 | # First try basic auth: 38 | ep = os.environ.get('HIL_ENDPOINT') 39 | 40 | if ep is None: 41 | sys.exit("Error: HIL_ENDPOINT not set \n") 42 | 43 | basic_username = os.getenv('HIL_USERNAME') 44 | basic_password = os.getenv('HIL_PASSWORD') 45 | if basic_username is not None and basic_password is not None: 46 | # For calls with no client library support yet. 47 | # Includes all headnode calls; registration of nodes and switches. 48 | http_client = RequestsHTTPClient() 49 | http_client.auth = (basic_username, basic_password) 50 | # For calls using the client library 51 | return Client(ep, http_client), http_client 52 | # Next try keystone: 53 | try: 54 | from keystoneauth1.identity import v3 55 | from keystoneauth1 import session 56 | os_auth_url = os.getenv('OS_AUTH_URL') 57 | os_password = os.getenv('OS_PASSWORD') 58 | os_username = os.getenv('OS_USERNAME') 59 | os_user_domain_id = os.getenv('OS_USER_DOMAIN_ID') or 'default' 60 | os_project_name = os.getenv('OS_PROJECT_NAME') 61 | os_project_domain_id = os.getenv('OS_PROJECT_DOMAIN_ID') or 'default' 62 | if None in (os_auth_url, os_username, os_password, os_project_name): 63 | raise KeyError("Required openstack environment variable not set.") 64 | auth = v3.Password(auth_url=os_auth_url, 65 | username=os_username, 66 | password=os_password, 67 | project_name=os_project_name, 68 | user_domain_id=os_user_domain_id, 69 | project_domain_id=os_project_domain_id) 70 | sess = session.Session(auth=auth) 71 | http_client = KeystoneHTTPClient(sess) 72 | return Client(ep, http_client), http_client 73 | except (ImportError, KeyError): 74 | pass 75 | # Finally, fall back to no authentication: 76 | http_client = RequestsHTTPClient() 77 | return Client(ep, http_client), http_client 78 | 79 | 80 | client = setup_http_client()[0] 81 | http_client = setup_http_client()[1] 82 | -------------------------------------------------------------------------------- /hil/cli/helper.py: -------------------------------------------------------------------------------- 1 | """Helper functions for the CLI live here""" 2 | import json 3 | import sys 4 | import os 5 | from prettytable import PrettyTable 6 | 7 | try: 8 | HIL_TIMEOUT = int(os.getenv('HIL_TIMEOUT', 10)) 9 | except ValueError: 10 | sys.exit("Please set environment variable HIL_TIMEOUT to a number") 11 | 12 | 13 | def print_json(raw_output): 14 | """Format raw_output as json, print it and exit""" 15 | print(json.dumps(raw_output)) 16 | sys.exit(0) 17 | 18 | 19 | def make_table(field_names, rows): 20 | """Generate a PrettyTable and return it. 21 | If there's only field, then it will add the count of items in the header. 22 | """ 23 | if len(field_names) == 1: 24 | field_names = [field_names[0] + ' (' + str(len(rows)) + ')'] 25 | 26 | output_table = PrettyTable(field_names) 27 | for row in rows: 28 | output_table.add_row(row) 29 | return output_table 30 | -------------------------------------------------------------------------------- /hil/cli/misc.py: -------------------------------------------------------------------------------- 1 | """Miscellaneous commands go here""" 2 | import click 3 | from hil.cli.client_setup import client 4 | from prettytable import PrettyTable 5 | from hil.cli.helper import print_json 6 | 7 | 8 | @click.group(name='networking-action') 9 | def networking_action(): 10 | """Commands related to networking-actions""" 11 | 12 | 13 | @networking_action.command('show') 14 | @click.argument('status_id') 15 | @click.option('--json', 'jsonout', is_flag=True) 16 | def show_networking_action(status_id, jsonout): 17 | """Displays the status of the networking action""" 18 | raw_output = client.node.show_networking_action(status_id) 19 | 20 | if jsonout: 21 | print_json(raw_output) 22 | 23 | net_actions_table = PrettyTable() 24 | net_actions_table.field_names = ['Field', 'Value'] 25 | 26 | if 'node' in raw_output: 27 | net_actions_table.add_row(['Node', raw_output['node']]) 28 | if 'nic' in raw_output: 29 | net_actions_table.add_row(['NIC', raw_output['nic']]) 30 | if 'new_network' in raw_output: 31 | net_actions_table.add_row(['New Network', raw_output['new_network']]) 32 | if 'type' in raw_output: 33 | net_actions_table.add_row(['Type', raw_output['type']]) 34 | if 'channel' in raw_output: 35 | net_actions_table.add_row(['Channel', raw_output['channel']]) 36 | if 'status' in raw_output: 37 | net_actions_table.add_row(['Status', raw_output['status']]) 38 | 39 | print(net_actions_table) 40 | -------------------------------------------------------------------------------- /hil/cli/port.py: -------------------------------------------------------------------------------- 1 | """Commands related to port are in this module""" 2 | import click 3 | from hil.cli.client_setup import client 4 | from prettytable import PrettyTable 5 | from hil.cli.helper import print_json, make_table 6 | 7 | 8 | @click.group() 9 | def port(): 10 | """Commands related to port""" 11 | 12 | 13 | @port.command(name='show') 14 | @click.argument('switch') 15 | @click.argument('port') 16 | @click.option('--json', 'jsonout', is_flag=True) 17 | def port_show(switch, port, jsonout): 18 | """Show what's connected to """ 19 | 20 | raw_output = client.port.show(switch, port) 21 | 22 | if jsonout: 23 | print_json(raw_output) 24 | 25 | port_table = PrettyTable() 26 | port_table.field_names = ['Field', 'Value'] 27 | 28 | # Gather all networks 29 | networks = '' 30 | for channel, network in raw_output['networks'].iteritems(): 31 | network_string = network + ' (' + channel + ')' 32 | networks += network_string + "\n" 33 | 34 | if 'node' in raw_output: 35 | port_table.add_row(['Node', raw_output['node']]) 36 | if 'nic' in raw_output: 37 | port_table.add_row(['NIC', raw_output['nic']]) 38 | if 'networks' in raw_output: 39 | port_table.add_row(['Networks', networks.rstrip()]) 40 | 41 | print(port_table) 42 | 43 | 44 | @port.command(name='register') 45 | @click.argument('switch') 46 | @click.argument('port') 47 | def port_register(switch, port): 48 | """Register a with """ 49 | client.port.register(switch, port) 50 | 51 | 52 | @port.command(name='delete') 53 | @click.argument('switch') 54 | @click.argument('port') 55 | def port_delete(switch, port): 56 | """Delete a from a """ 57 | client.port.delete(switch, port) 58 | 59 | 60 | @port.command(name='revert') 61 | @click.argument('switch') 62 | @click.argument('port') 63 | @click.option('--json', 'jsonout', is_flag=True) 64 | def port_revert(switch, port, jsonout): 65 | """Detach a on a from all attached networks.""" 66 | raw_output = client.port.port_revert(switch, port) 67 | 68 | if jsonout: 69 | print_json(raw_output) 70 | 71 | print(make_table(field_names=['Field', 'Value'], 72 | rows=[['Status ID', raw_output['status_id']]])) 73 | 74 | 75 | @port.group(name='nic') 76 | def port_nic(): 77 | """Operations affecting a port and a nic""" 78 | 79 | 80 | @port_nic.command(name='add') 81 | @click.argument('switch') 82 | @click.argument('port') 83 | @click.argument('node') 84 | @click.argument('nic') 85 | def port_nic_add(switch, port, node, nic): 86 | """Connect a on a to a on a """ 87 | client.port.connect_nic(switch, port, node, nic) 88 | 89 | 90 | @port_nic.command(name='remove') 91 | @click.argument('switch') 92 | @click.argument('port') 93 | def port_nic_remove(switch, port): 94 | """Detach a on a from whatever's connected to it""" 95 | client.port.detach_nic(switch, port) 96 | -------------------------------------------------------------------------------- /hil/cli/user.py: -------------------------------------------------------------------------------- 1 | """Commands related to user are in this module""" 2 | import click 3 | from hil.cli.client_setup import client 4 | 5 | 6 | @click.group() 7 | def user(): 8 | """Commands related to user""" 9 | 10 | 11 | @user.command(name='create', short_help='Create a new user') 12 | @click.argument('username') 13 | @click.argument('password') 14 | @click.argument('is_admin', type=click.Choice(['admin', 'regular'])) 15 | def user_create(username, password, is_admin): 16 | """Create a user with password . 17 | 18 | may be either "admin" or "regular", and determines whether 19 | the user has administrative privileges. 20 | """ 21 | client.user.create(username, password, is_admin == 'admin') 22 | 23 | 24 | @user.command(name='delete') 25 | @click.argument('username') 26 | def user_delete(username): 27 | """Delete the user """ 28 | client.user.delete(username) 29 | 30 | 31 | @user.group(name='project') 32 | def user_project(): 33 | """add/remove users from project""" 34 | 35 | 36 | @user_project.command('add') 37 | @click.argument('user') 38 | @click.argument('project') 39 | def user_add_project(user, project): 40 | """Add to """ 41 | client.user.add(user, project) 42 | 43 | 44 | @user_project.command('remove') 45 | @click.argument('user') 46 | @click.argument('project') 47 | def user_remove_project(user, project): 48 | """Remove from """ 49 | client.user.remove(user, project) 50 | 51 | 52 | @user.command(name='set-admin') 53 | @click.argument('username') 54 | @click.argument('is_admin', type=click.Choice(['admin', 'regular'])) 55 | def user_set_admin(username, is_admin): 56 | """Changes the admin status of user . 57 | 58 | may by either "admin" or "regular", and determines whether 59 | a user is authorized for administrative privileges. 60 | """ 61 | client.user.set_admin(username, is_admin == 'admin') 62 | -------------------------------------------------------------------------------- /hil/client/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/client/__init__.py -------------------------------------------------------------------------------- /hil/client/extensions.py: -------------------------------------------------------------------------------- 1 | """Client support for extension related api calls.""" 2 | from hil.client.base import ClientBase 3 | 4 | 5 | class Extensions(ClientBase): 6 | """Consists of calls to query and manipulate extension related 7 | 8 | objects and relations/ 9 | """ 10 | 11 | def list_active(self): 12 | """List all active extensions. """ 13 | url = self.object_url('active_extensions') 14 | return self.check_response(self.httpClient.request("GET", url)) 15 | -------------------------------------------------------------------------------- /hil/client/network.py: -------------------------------------------------------------------------------- 1 | """Client support for network related api calls.""" 2 | import json 3 | from hil.client.base import ClientBase 4 | from hil.client.base import check_reserved_chars 5 | 6 | 7 | class Network(ClientBase): 8 | """Consists of calls to query and manipulate network related 9 | 10 | objects and relations. 11 | """ 12 | 13 | def list(self): 14 | """Lists all networks under HIL """ 15 | url = self.object_url('networks') 16 | return self.check_response(self.httpClient.request("GET", url)) 17 | 18 | @check_reserved_chars() 19 | def list_network_attachments(self, network, project): 20 | """Lists nodes connected to a network""" 21 | url = self.object_url('network', network, 'attachments') 22 | if project == "all": 23 | return self.check_response(self.httpClient.request("GET", url)) 24 | 25 | params = {'project': project} 26 | return self.check_response( 27 | self.httpClient.request("GET", url, params=params)) 28 | 29 | @check_reserved_chars() 30 | def show(self, network): 31 | """Shows attributes of a network. """ 32 | url = self.object_url('network', network) 33 | return self.check_response(self.httpClient.request("GET", url)) 34 | 35 | @check_reserved_chars(slashes_ok=['net_id']) 36 | def create(self, network, owner, access, net_id): 37 | """Create a link-layer . 38 | 39 | See docs/networks.md for details. 40 | """ 41 | url = self.object_url('network', network) 42 | payload = json.dumps({ 43 | 'owner': owner, 'access': access, 44 | 'net_id': net_id 45 | }) 46 | return self.check_response( 47 | self.httpClient.request("PUT", url, data=payload) 48 | ) 49 | 50 | @check_reserved_chars() 51 | def delete(self, network): 52 | """Delete a . """ 53 | url = self.object_url('network', network) 54 | return self.check_response(self.httpClient.request("DELETE", url)) 55 | 56 | @check_reserved_chars() 57 | def grant_access(self, project, network): 58 | """Grants access to . """ 59 | url = self.object_url( 60 | 'network', network, 'access', project 61 | ) 62 | return self.check_response(self.httpClient.request("PUT", url)) 63 | 64 | @check_reserved_chars() 65 | def revoke_access(self, project, network): 66 | """Removes access of from . """ 67 | url = self.object_url( 68 | 'network', network, 'access', project 69 | ) 70 | return self.check_response(self.httpClient.request("DELETE", url)) 71 | -------------------------------------------------------------------------------- /hil/client/project.py: -------------------------------------------------------------------------------- 1 | """Client support for project related api calls.""" 2 | import json 3 | from hil.client.base import ClientBase 4 | from hil.client.base import check_reserved_chars 5 | 6 | 7 | class Project(ClientBase): 8 | """Consists of calls to query and manipulate project related 9 | 10 | objects and relations. 11 | """ 12 | 13 | def list(self): 14 | """Lists all projects under HIL """ 15 | 16 | url = self.object_url('projects') 17 | return self.check_response(self.httpClient.request("GET", url)) 18 | 19 | @check_reserved_chars() 20 | def nodes_in(self, project_name): 21 | """Lists nodes allocated to project """ 22 | url = self.object_url('project', project_name, 'nodes') 23 | return self.check_response(self.httpClient.request("GET", url)) 24 | 25 | @check_reserved_chars() 26 | def networks_in(self, project_name): 27 | """Lists nodes allocated to project """ 28 | url = self.object_url( 29 | 'project', project_name, 'networks' 30 | ) 31 | return self.check_response(self.httpClient.request("GET", url)) 32 | 33 | @check_reserved_chars() 34 | def create(self, project_name): 35 | """Creates a project named """ 36 | url = self.object_url('project', project_name) 37 | return self.check_response(self.httpClient.request("PUT", url)) 38 | 39 | @check_reserved_chars() 40 | def delete(self, project_name): 41 | """Deletes a project named """ 42 | url = self.object_url('project', project_name) 43 | return self.check_response(self.httpClient.request("DELETE", url)) 44 | 45 | @check_reserved_chars() 46 | def connect(self, project_name, node_name): 47 | """Adds a node to a project. """ 48 | url = self.object_url( 49 | 'project', project_name, 'connect_node' 50 | ) 51 | self.payload = json.dumps({'node': node_name}) 52 | return self.check_response( 53 | self.httpClient.request("POST", url, data=self.payload) 54 | ) 55 | 56 | @check_reserved_chars() 57 | def detach(self, project_name, node_name): 58 | """Detaches a node from a project. """ 59 | url = self.object_url('project', project_name, 'detach_node') 60 | self.payload = json.dumps({'node': node_name}) 61 | return self.check_response( 62 | self.httpClient.request("POST", url, data=self.payload) 63 | ) 64 | -------------------------------------------------------------------------------- /hil/client/switch.py: -------------------------------------------------------------------------------- 1 | """Client support for switch related api calls.""" 2 | import json 3 | from hil.client.base import check_reserved_chars 4 | from hil.client.base import ClientBase 5 | 6 | 7 | class Switch(ClientBase): 8 | """Consists of calls to query and manipulate node related 9 | 10 | objects and relations. 11 | """ 12 | 13 | def list(self): 14 | """List all nodes that HIL manages """ 15 | url = self.object_url('switches') 16 | return self.check_response(self.httpClient.request("GET", url)) 17 | 18 | def register(self, switch, subtype, switchinfo): 19 | """Registers a switch with name and 20 | model , and relevant arguments in <*args> 21 | 22 | switchinfo must be a dictionary. 23 | """ 24 | switchinfo['type'] = subtype 25 | url = self.object_url('switch', switch) 26 | payload = json.dumps(switchinfo) 27 | return self.check_response(self.httpClient.request("PUT", url, 28 | data=payload)) 29 | 30 | @check_reserved_chars() 31 | def delete(self, switch): 32 | """Deletes the switch named .""" 33 | url = self.object_url('switch', switch) 34 | return self.check_response(self.httpClient.request("DELETE", url)) 35 | 36 | @check_reserved_chars() 37 | def show(self, switch): 38 | """Shows attributes of . """ 39 | url = self.object_url('switch', switch) 40 | return self.check_response(self.httpClient.request("GET", url)) 41 | 42 | 43 | class Port(ClientBase): 44 | """Port related operations. """ 45 | 46 | @check_reserved_chars(slashes_ok=['port']) 47 | def register(self, switch, port): 48 | """Register a with . """ 49 | url = self.object_url('switch', switch, 'port', port) 50 | return self.check_response(self.httpClient.request("PUT", url)) 51 | 52 | @check_reserved_chars(slashes_ok=['port']) 53 | def delete(self, switch, port): 54 | """Deletes information of the for """ 55 | url = self.object_url('switch', switch, 'port', port) 56 | return self.check_response(self.httpClient.request("DELETE", url)) 57 | 58 | @check_reserved_chars(slashes_ok=['port']) 59 | def connect_nic(self, switch, port, node, nic): 60 | """Connects of to of . """ 61 | url = self.object_url('switch', switch, 'port', port, 'connect_nic') 62 | payload = json.dumps({'node': node, 'nic': nic}) 63 | return self.check_response( 64 | self.httpClient.request("POST", url, data=payload) 65 | ) 66 | 67 | @check_reserved_chars(slashes_ok=['port']) 68 | def detach_nic(self, switch, port): 69 | """"Detaches of . """ 70 | url = self.object_url('switch', switch, 'port', port, 'detach_nic') 71 | return self.check_response(self.httpClient.request("POST", url)) 72 | 73 | @check_reserved_chars(slashes_ok=['port']) 74 | def show(self, switch, port): 75 | """Show what's connected to """ 76 | url = self.object_url('switch', switch, 'port', port) 77 | return self.check_response(self.httpClient.request("GET", url)) 78 | 79 | @check_reserved_chars(slashes_ok=['port']) 80 | def port_revert(self, switch, port): 81 | """removes all vlans from a switch port""" 82 | url = self.object_url('switch', switch, 'port', port, 'revert') 83 | return self.check_response(self.httpClient.request("POST", url)) 84 | -------------------------------------------------------------------------------- /hil/client/user.py: -------------------------------------------------------------------------------- 1 | """Client library for user-oriented api calls. 2 | 3 | These are only meaningful if the server is configured to use 4 | username & password auth. 5 | """ 6 | import json 7 | from hil.client.base import ClientBase 8 | from hil.client.base import check_reserved_chars 9 | 10 | 11 | class User(ClientBase): 12 | """Consists of calls to query and 13 | 14 | manipulate users related objects and relations. 15 | """ 16 | def list(self): 17 | """List all users""" 18 | url = self.object_url('auth/basic/users') 19 | return self.check_response(self.httpClient.request("GET", url)) 20 | 21 | @check_reserved_chars(dont_check=['password', 'is_admin']) 22 | def create(self, username, password, is_admin): 23 | """Create a user with password . 24 | 25 | is a boolean, 26 | and determines whether a user is authorized for 27 | administrative privileges. 28 | """ 29 | url = self.object_url('auth/basic/user', username) 30 | 31 | payload = json.dumps({ 32 | 'password': password, 'is_admin': is_admin, 33 | }) 34 | return self.check_response( 35 | self.httpClient.request("PUT", url, data=payload) 36 | ) 37 | 38 | @check_reserved_chars() 39 | def delete(self, username): 40 | """Deletes the user . """ 41 | url = self.object_url('auth/basic/user', username) 42 | return self.check_response( 43 | self.httpClient.request("DELETE", url) 44 | ) 45 | 46 | @check_reserved_chars() 47 | def add(self, user, project): 48 | """Adds to a . """ 49 | url = self.object_url('auth/basic/user', user, 'add_project') 50 | payload = json.dumps({'project': project}) 51 | return self.check_response( 52 | self.httpClient.request("POST", url, data=payload) 53 | ) 54 | 55 | @check_reserved_chars() 56 | def remove(self, user, project): 57 | """Removes all access of to . """ 58 | url = self.object_url('auth/basic/user', user, 'remove_project') 59 | payload = json.dumps({'project': project}) 60 | return self.check_response( 61 | self.httpClient.request("POST", url, data=payload) 62 | ) 63 | 64 | @check_reserved_chars(dont_check=['is_admin']) 65 | def set_admin(self, username, is_admin): 66 | """Changes the admin status of . 67 | 68 | is a boolean that determines 69 | whether a user is authorized for 70 | administrative privileges. 71 | """ 72 | 73 | url = self.object_url('auth/basic/user', username) 74 | payload = json.dumps({'is_admin': is_admin}) 75 | return self.check_response( 76 | self.httpClient.request("PATCH", url, data=payload) 77 | ) 78 | -------------------------------------------------------------------------------- /hil/commands/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/commands/__init__.py -------------------------------------------------------------------------------- /hil/commands/db.py: -------------------------------------------------------------------------------- 1 | """Implement the ``hil-admin db`` subcommand.""" 2 | from hil import server 3 | from hil.migrations import command, create_db 4 | 5 | 6 | @command.command 7 | def create(): 8 | """Initialize the database.""" 9 | server.init() 10 | create_db() 11 | -------------------------------------------------------------------------------- /hil/commands/util.py: -------------------------------------------------------------------------------- 1 | """Helpers used in various commands.""" 2 | import os 3 | import sys 4 | 5 | 6 | def ensure_not_root(): 7 | """ 8 | Verify that we aren't running as root, exiting with an error otherwise. 9 | """ 10 | if os.getuid() == 0: 11 | sys.exit("You're running %s as root. Don't do this -- use " 12 | "a regular user account. Exiting." % sys.argv[0]) 13 | -------------------------------------------------------------------------------- /hil/dev_support.py: -------------------------------------------------------------------------------- 1 | """Utilities to aid with development.""" 2 | 3 | import logging 4 | from hil import config 5 | from functools import wraps 6 | 7 | 8 | def have_dry_run(): 9 | """Detect True if we're executing in dry_run mode, False otherwise.""" 10 | return config.cfg.has_option('devel', 'dry_run') 11 | 12 | 13 | def no_dry_run(f): 14 | """A decorator which "disables" a function during a dry run. 15 | 16 | A can specify a `dry_run` option in the `devel` section of `hil.cfg`. 17 | If the option is present (regardless of its value), any function or 18 | method decorated with `no_dry_run` will be "disabled." The call will 19 | be logged (with level `logging.DEBUG`), but will not actually execute. 20 | The function will instead return 'None'. Callers of decorated functions 21 | must accept a None value gracefully. 22 | 23 | The intended use case of `no_dry_run` is to disable functions which 24 | cannot be run because, for example, the HIL is executing on a 25 | developer's workstation, which has no configured switch, libvirt, etc. 26 | 27 | If the `dry_run` option is not specified, this decorator has no effect. 28 | """ 29 | @wraps(f) 30 | def wrapper(*args, **kwargs): 31 | """Wrapper that conditionally disables f, based on config.""" 32 | if have_dry_run(): 33 | logger = logging.getLogger(__name__) 34 | logger.info('dry run, not executing: %s.%s(*%r,**%r)', 35 | f.__module__, f.__name__, args, kwargs) 36 | return None 37 | else: 38 | return f(*args, **kwargs) 39 | return wrapper 40 | -------------------------------------------------------------------------------- /hil/errors.py: -------------------------------------------------------------------------------- 1 | """Exceptions thrown by HIL api calls. 2 | 3 | This module defines several exceptions corresponding to specific errors. 4 | They fall into two basic categories, captured by the classes APIError 5 | and ServerError. 6 | """ 7 | 8 | import json 9 | import flask 10 | from werkzeug.exceptions import HTTPException, InternalServerError 11 | 12 | 13 | class APIError(HTTPException): 14 | """An exception indicating an error that should be reported to the user. 15 | 16 | i.e. If such an error occurs in a rest API call, it should be reported as 17 | part of the HTTP response. 18 | """ 19 | status_code = 400 # Bad Request 20 | 21 | def __init__(self, message=''): 22 | # HTTPException has its own custom __init__ method, but we want the 23 | # usual "First argument is the message" behavior. 24 | HTTPException.__init__(self) 25 | self.message = message 26 | 27 | def get_response(self, environ=None): 28 | """The body of the http response corresponding to this error.""" 29 | # TODO: We're getting deprecation errors about the use of self.message. 30 | # We should figure out what the right way to do this is. 31 | return flask.make_response(json.dumps({ 32 | 'type': self.__class__.__name__, 33 | 'msg': self.message, 34 | }), self.status_code) 35 | 36 | 37 | class ServerError(InternalServerError): 38 | """An error occurred when trying to process the request. 39 | 40 | This is likely not the client's fault; as such the HTTP status is 500. 41 | The semantics are much the same as the corresponding HTTP error. 42 | 43 | In general, we do *not* want to report the details to the client, 44 | though we should log them for our own purposes. 45 | """ 46 | 47 | 48 | class NotFoundError(APIError): 49 | """An exception indicating that a given resource does not exist.""" 50 | status_code = 404 # Not Found 51 | 52 | 53 | class DuplicateError(APIError): 54 | """An exception indicating that a given resource already exists.""" 55 | status_code = 409 # Conflict 56 | 57 | 58 | class AllocationError(ServerError): 59 | """An exception indicating resource exhaustion.""" 60 | 61 | 62 | class BadArgumentError(APIError): 63 | """An exception indicating an invalid request on the part of the user.""" 64 | 65 | 66 | class ProjectMismatchError(APIError): 67 | """An exception indicating that the resources given don't belong to the 68 | same project. 69 | """ 70 | status_code = 409 # Conflict 71 | 72 | 73 | class AuthorizationError(APIError): 74 | """An exception indicating that the user is not authorized to perform 75 | the requested action. 76 | """ 77 | status_code = 401 78 | 79 | 80 | class BlockedError(APIError): 81 | """An exception indicating that the requested action cannot happen until 82 | some other change. For example, deletion is blocked until the components 83 | are deleted, and possibly until the dirty flag is cleared as well. 84 | """ 85 | status_code = 409 # Conflict 86 | 87 | 88 | class IllegalStateError(APIError): 89 | """The request is invalid due to the state of the system. 90 | 91 | The request might be perfectly valid in another context. For example, 92 | trying to remove a nic from a running headnode might raise this error. 93 | """ 94 | status_code = 409 # Conflict 95 | 96 | 97 | class OBMError(ServerError): 98 | """An error occured communicating with the OBM for a node.""" 99 | 100 | 101 | class SwitchError(ServerError): 102 | """Exception thrown by a switch driver indicating failure to perform the 103 | requested operation. 104 | 105 | Switch drviers can subclass this to be more specific about the error. 106 | """ 107 | -------------------------------------------------------------------------------- /hil/ext/__init__.py: -------------------------------------------------------------------------------- 1 | """Extensions which are maintained in-tree. 2 | 3 | All public modules (those not beginning with ``_``) in this package are 4 | extensions. They could, in concept, be maintained separately from HIL 5 | core, but are in-tree as they are maintained by the core developers. 6 | """ 7 | -------------------------------------------------------------------------------- /hil/ext/auth/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/ext/auth/__init__.py -------------------------------------------------------------------------------- /hil/ext/auth/keystone.py: -------------------------------------------------------------------------------- 1 | """Keystone authentication backend. 2 | 3 | This is a thin wrapper around the `keystonemiddleware` library. 4 | """ 5 | from keystonemiddleware.auth_token import filter_factory 6 | from flask import request 7 | from hil.flaskapp import app 8 | from hil.config import cfg, core_schema, string_is_web_url 9 | from hil.model import Project 10 | from hil import auth, rest 11 | import logging 12 | import sys 13 | 14 | logger = rest.ContextLogger(logging.getLogger(__name__), {}) 15 | 16 | core_schema[__name__] = { 17 | 'auth_url': string_is_web_url, 18 | 'auth_protocol': str, 19 | 'username': str, 20 | 'password': str, 21 | 'project_name': str, 22 | 'admin_user': str, 23 | 'admin_password': str, 24 | } 25 | 26 | 27 | class KeystoneAuthBackend(auth.AuthBackend): 28 | """Authenticate with keystone.""" 29 | 30 | def authenticate(self): 31 | # pylint: disable=missing-docstring 32 | 33 | # keystonemiddleware makes auth info available from two places: 34 | # 35 | # 1. Variables in the wsgi environment 36 | # 2. Extra HTTP headers. 37 | # 38 | # In general, the wsgi variable 'HTTP_FOO_BAR' is equivalent to the 39 | # HTTP header 'Foo-Bar'. 40 | # 41 | # We use the wsgi environment's variables below; it shouldn't matter, 42 | # but this way if something goes horribly wrong and arbitrary headers 43 | # aren't stripped out, the client can't just inject these. 44 | if request.environ['HTTP_X_IDENTITY_STATUS'] != 'Confirmed': 45 | return False 46 | 47 | if self._have_admin(): 48 | return True 49 | 50 | project_id = request.environ['HTTP_X_PROJECT_ID'] 51 | if Project.query.filter_by(label=project_id).first() is None: 52 | logger.info("Successful authentication by Openstack project %r, " 53 | "but this project is not registered with HIL", 54 | project_id) 55 | return False 56 | 57 | return True 58 | 59 | def _have_project_access(self, project): 60 | return project.label == request.environ['HTTP_X_PROJECT_ID'] 61 | 62 | def _have_admin(self): 63 | return 'admin' in request.environ['HTTP_X_ROLES'].split(',') 64 | 65 | 66 | def setup(*args, **kwargs): 67 | """Set a KeystoneAuthBackend as the auth backend. 68 | 69 | Loads keystone settings from hil.cfg. 70 | """ 71 | if not cfg.has_section(__name__): 72 | logger.error('No section for [%s] in hil.cfg; authentication will ' 73 | 'not work without this. Please add this section and try ' 74 | 'again.', __name__) 75 | sys.exit(1) 76 | keystone_cfg = {} 77 | for key in cfg.options(__name__): 78 | keystone_cfg[key] = cfg.get(__name__, key) 79 | 80 | # Great job with the API design Openstack! 81 | factory = filter_factory(keystone_cfg) 82 | app.wsgi_app = factory(app.wsgi_app) 83 | 84 | auth.set_auth_backend(KeystoneAuthBackend()) 85 | -------------------------------------------------------------------------------- /hil/ext/auth/migrations/database/96f1e8f87f85_upgrading_user_to_bigint.py: -------------------------------------------------------------------------------- 1 | """upgrading User to bigint 2 | 3 | Revision ID: 96f1e8f87f85 4 | Revises: cb7096e21dfb 5 | Create Date: 2017-07-12 16:24:27.897244 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '96f1e8f87f85' 15 | down_revision = None 16 | branch_labels = ('hil.ext.auth.database',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column( 23 | 'user', 24 | 'id', 25 | existing_type=sa.Integer(), 26 | type_=sa.BIGINT(), 27 | autoincrement=True, 28 | existing_server_default=sa.text(u"nextval('user_id_seq'::regclass)")) 29 | op.alter_column('user_projects', 'user_id', 30 | existing_type=sa.Integer(), 31 | type_=sa.BIGINT(), 32 | existing_nullable=True) 33 | op.alter_column('user_projects', 'project_id', 34 | existing_type=sa.Integer(), 35 | type_=sa.BIGINT(), 36 | existing_nullable=True) 37 | 38 | 39 | def downgrade(): 40 | op.alter_column( 41 | 'user', 42 | 'id', 43 | existing_type=sa.BIGINT(), 44 | type_=sa.Integer(), 45 | autoincrement=True, 46 | existing_server_default=sa.text(u"nextval('user_id_seq'::regclass)")) 47 | op.alter_column('user_projects', 'user_id', 48 | existing_type=sa.BIGINT(), 49 | type_=sa.Integer(), 50 | existing_nullable=True) 51 | op.alter_column('user_projects', 'project_id', 52 | existing_type=sa.BIGINT(), 53 | type_=sa.Integer(), 54 | existing_nullable=True) 55 | -------------------------------------------------------------------------------- /hil/ext/auth/mock.py: -------------------------------------------------------------------------------- 1 | """Mock auth plugin for testing. 2 | 3 | This module provides an auth backend which allows the programmer to mock 4 | project an admin access, for use in testing. See the functions `set_project` 5 | and `set_admin` for details. 6 | """ 7 | from hil import auth, rest 8 | 9 | 10 | class MockAuthBackend(auth.AuthBackend): 11 | """An auth backend for mocking the request's authentication and 12 | authorization status. 13 | 14 | By default, the request does not have access to a project, and does not 15 | have admin access. The functions `set_admin` and `set_project` can be 16 | used to change this. 17 | 18 | If invoked before `authenticate`, `set_auth_success` may be used to change 19 | the return value of `authenticate`, which is useful for testing cases where 20 | the user is not authenticated at all. Note that if the api call functions 21 | are invoked directly, `authenticate` is bypassed, so you will need to 22 | actually spoof a full request. The defualt is True. 23 | """ 24 | 25 | def __init__(self): 26 | self._auth_success = True 27 | 28 | def authenticate(self): 29 | # pylint: disable=missing-docstring 30 | rest.local.auth = { 31 | 'project': None, 32 | 'admin': False, 33 | 'user': 'user', 34 | } 35 | return self._auth_success 36 | 37 | def get_user(self): 38 | """Return the user who is authenticated.""" 39 | return rest.local.auth['user'] 40 | 41 | def set_auth_success(self, ok): 42 | """Set the return value for `authenticate` to `ok`.""" 43 | self._auth_success = ok 44 | 45 | def _have_admin(self): 46 | return rest.local.auth['admin'] 47 | 48 | def _have_project_access(self, project): 49 | return project == rest.local.auth['project'] 50 | 51 | def set_project(self, project): 52 | """Change the project that the request is acting on behalf of.""" 53 | rest.local.auth['project'] = project 54 | 55 | def set_admin(self, admin): 56 | """Change whether the request has admin access. 57 | 58 | admin is a boolean indicating whether the request should have admin 59 | access. 60 | """ 61 | rest.local.auth['admin'] = admin 62 | 63 | def set_user(self, user): 64 | """Set the user the request is running as.""" 65 | rest.local.auth['user'] = user 66 | 67 | 68 | def setup(*args, **kwargs): 69 | """Set a MockAuthBackend as the auth backend.""" 70 | auth.set_auth_backend(MockAuthBackend()) 71 | -------------------------------------------------------------------------------- /hil/ext/auth/null.py: -------------------------------------------------------------------------------- 1 | """'Null' auth backend 2 | 3 | This backend requires no authentication and permits everything. Useful for 4 | testing, do not use in production.""" 5 | from hil import auth 6 | 7 | import logging 8 | from hil.rest import ContextLogger 9 | 10 | logger = ContextLogger(logging.getLogger(__name__), {}) 11 | 12 | 13 | class NullAuthBackend(auth.AuthBackend): 14 | """A null authentication backend. 15 | 16 | Authentication always succeeds, giving admin access. 17 | """ 18 | 19 | def authenticate(self): 20 | # pylint: disable=missing-docstring 21 | logger.info("successful authentication with null backend.") 22 | return True 23 | 24 | def _have_admin(self): 25 | return True 26 | 27 | def _have_project_access(self, project): 28 | return True 29 | 30 | 31 | def setup(*args, **kwargs): 32 | """Set a NullAuthBackend as the auth backend.""" 33 | auth.set_auth_backend(NullAuthBackend()) 34 | -------------------------------------------------------------------------------- /hil/ext/network_allocators/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/ext/network_allocators/__init__.py -------------------------------------------------------------------------------- /hil/ext/network_allocators/migrations/vlan_pool/e06576b2ea9e_vlan_pool_pk_to_bigint.py: -------------------------------------------------------------------------------- 1 | """vlan_pool PK to bigint 2 | 3 | Revision ID: e06576b2ea9e 4 | Revises: 9089fa811a2b 5 | Create Date: 2017-07-21 16:34:50.005560 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'e06576b2ea9e' 15 | down_revision = None 16 | branch_labels = ('hil.ext.network_allocators.vlan_pool',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column('vlan', 'id', 23 | existing_type=sa.Integer(), 24 | type_=sa.BIGINT()) 25 | 26 | 27 | def downgrade(): 28 | op.alter_column('vlan', 'id', 29 | existing_type=sa.BIGINT(), 30 | type_=sa.INTEGER()) 31 | -------------------------------------------------------------------------------- /hil/ext/network_allocators/null.py: -------------------------------------------------------------------------------- 1 | """A null network allocator. 2 | 3 | Network IDs are random and arbitrary. The only supported channel is "null". 4 | """ 5 | 6 | import uuid 7 | from hil.network_allocator import NetworkAllocator, set_network_allocator 8 | 9 | 10 | class NullNetworkAllocator(NetworkAllocator): 11 | """A Null network allocator. 12 | 13 | Conforms to the interface specified for its superclass, NetworkAllocator. 14 | """ 15 | def get_new_network_id(self): 16 | return str(uuid.uuid1()) 17 | 18 | def free_network_id(self, net_id): 19 | pass 20 | 21 | def populate(self): 22 | pass 23 | 24 | def legal_channels_for(self, net_id): 25 | return ["null"] 26 | 27 | def is_legal_channel_for(self, channel_id, net_id): 28 | return channel_id == "null" 29 | 30 | def get_default_channel(self): 31 | return "null" 32 | 33 | def validate_network_id(self, net_id): 34 | return True 35 | 36 | def claim_network_id(self, net_id): 37 | return 38 | 39 | def is_network_id_in_pool(self, net_id): 40 | return True 41 | 42 | 43 | def setup(*args, **kwargs): 44 | """Register a NullNetworkAllocator as the network allocator.""" 45 | set_network_allocator(NullNetworkAllocator()) 46 | -------------------------------------------------------------------------------- /hil/ext/network_allocators/vlan_pool.py: -------------------------------------------------------------------------------- 1 | """VLAN based ``network_allocator`` implementation.""" 2 | 3 | import logging 4 | 5 | from hil.network_allocator import NetworkAllocator, set_network_allocator 6 | from hil.model import db 7 | from hil.config import cfg, core_schema, string_has_vlans 8 | from hil.errors import BlockedError 9 | 10 | from os.path import join, dirname 11 | from hil.migrations import paths 12 | from hil.model import BigIntegerType 13 | 14 | paths[__name__] = join(dirname(__file__), 'migrations', 'vlan_pool') 15 | 16 | core_schema[__name__] = { 17 | 'vlans': string_has_vlans 18 | } 19 | 20 | 21 | def get_vlan_list(): 22 | """Return a list of vlans in the module's config section. 23 | 24 | This is for use by the ``create_bridges`` script. 25 | """ 26 | vlan_str = cfg.get(__name__, 'vlans') 27 | returnee = [] 28 | for r in vlan_str.split(","): 29 | r = r.strip().split("-") 30 | if len(r) == 1: 31 | returnee.append(int(r[0])) 32 | else: 33 | returnee += range(int(r[0]), int(r[1])+1) 34 | return returnee 35 | 36 | 37 | class VlanAllocator(NetworkAllocator): 38 | """A allocator of VLANs. The interface is as specified in 39 | ``NetworkAllocator``. 40 | """ 41 | 42 | def get_new_network_id(self): 43 | vlan = Vlan.query.filter_by(available=True).first() 44 | if not vlan: 45 | return None 46 | vlan.available = False 47 | returnee = str(vlan.vlan_no) 48 | return returnee 49 | 50 | def free_network_id(self, net_id): 51 | vlan = Vlan.query.filter_by(vlan_no=net_id).one_or_none() 52 | if vlan is None: 53 | logger = logging.getLogger(__name__) 54 | logger.error('vlan %s does not exist in database', net_id) 55 | return 56 | vlan.available = True 57 | 58 | def populate(self): 59 | vlan_list = get_vlan_list() 60 | for vlan in vlan_list: 61 | if Vlan.query.filter_by(vlan_no=vlan).count() == 1: 62 | # Already created by a previous call; leave it alone. 63 | continue 64 | db.session.add(Vlan(vlan)) 65 | db.session.commit() 66 | 67 | def legal_channels_for(self, net_id): 68 | return ["vlan/native", 69 | "vlan/" + net_id] 70 | 71 | def is_legal_channel_for(self, channel_id, net_id): 72 | return channel_id in self.legal_channels_for(net_id) 73 | 74 | def get_default_channel(self): 75 | return "vlan/native" 76 | 77 | def validate_network_id(self, net_id): 78 | try: 79 | return 1 <= int(net_id) <= 4096 80 | except ValueError: 81 | return False 82 | 83 | def claim_network_id(self, net_id): 84 | vlan = Vlan.query.filter_by(vlan_no=net_id).one_or_none() 85 | if vlan is None: 86 | return 87 | elif vlan.available: 88 | vlan.available = False 89 | else: 90 | raise BlockedError("Network ID is not available." 91 | " Please choose a different ID.") 92 | 93 | def is_network_id_in_pool(self, net_id): 94 | vlan = Vlan.query.filter_by(vlan_no=net_id).one_or_none() 95 | return vlan is not None 96 | 97 | 98 | class Vlan(db.Model): 99 | """A VLAN for the Dell switch 100 | 101 | This is used to track which vlan numbers are available; when a Network is 102 | created, it must allocate a Vlan, to ensure that: 103 | 104 | 1. The VLAN number it is using is unique, and 105 | 2. The VLAN number is actually allocated to the HIL; on some deployments 106 | we may have specific vlan numbers that we are allowed to use. 107 | """ 108 | id = db.Column(BigIntegerType, primary_key=True) 109 | vlan_no = db.Column(db.Integer, nullable=False, unique=True) 110 | available = db.Column(db.Boolean, nullable=False) 111 | 112 | def __init__(self, vlan_no): 113 | self.vlan_no = vlan_no 114 | self.available = True 115 | 116 | 117 | def setup(*args, **kwargs): 118 | """Register a VlanAllocator as the network allocator.""" 119 | set_network_allocator(VlanAllocator()) 120 | -------------------------------------------------------------------------------- /hil/ext/switches/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CCI-MOC/hil/8c6de2214ddf908c01549b117d5684ac52a93934/hil/ext/switches/__init__.py -------------------------------------------------------------------------------- /hil/ext/switches/common.py: -------------------------------------------------------------------------------- 1 | """Helper methods for switches""" 2 | from hil.config import cfg 3 | from hil import model 4 | from hil.model import db 5 | from hil.errors import BlockedError 6 | import ast 7 | 8 | 9 | def string_to_list(a_string): 10 | """Converts a string representation of list to list. 11 | Args: 12 | a_string: list output recieved as string. 13 | No quotes around any values: e.g: '[abc, def, 786, hil]' 14 | Returns: object of list type. 15 | Empty list is put as None. 16 | """ 17 | if a_string == '[]': 18 | return ast.literal_eval(a_string) 19 | else: 20 | a_string = a_string.replace("[", "['").replace("]", "']") 21 | a_string = a_string.replace(",", "','") 22 | a_list = ast.literal_eval(a_string) 23 | a_list = [ele.strip() for ele in a_list] 24 | return a_list 25 | 26 | 27 | def string_to_dict(a_string): 28 | """Converts a string representation of dictionary 29 | into a dictionary type object. 30 | 31 | Args: 32 | a_string: dictionary recieved as type string 33 | Sample String: No quotes around keys or values. 34 | '{abc:123, def:xyz, space : lot of it , 2345:some number }' 35 | Returns: Object of dictionary type. 36 | """ 37 | if a_string == '{}': 38 | a_dict = ast.literal_eval(a_string) 39 | return a_dict 40 | else: 41 | a_string = a_string.replace("{", "{'").replace("}", "'}") 42 | a_string = a_string.replace(":", "':'").replace(",", "','") 43 | a_dict = ast.literal_eval(a_string) 44 | a_dict = {k.strip(): v.strip() for k, v in a_dict.iteritems()} 45 | return a_dict 46 | 47 | 48 | def should_save(switch_obj): 49 | """checks the config file to see if switch should save or not""" 50 | switch_ext = switch_obj.__class__.__module__ 51 | if cfg.has_option(switch_ext, 'save'): 52 | if not cfg.getboolean(switch_ext, 'save'): 53 | return False 54 | return True 55 | 56 | 57 | def check_native_networks(nic, op_type, channel): 58 | """Check to ensure that native network is the first one to be added 59 | and last one to be removed 60 | """ 61 | table = model.NetworkAttachment 62 | query = db.session.query(table).filter(table.nic_id == nic.id) 63 | 64 | if channel != 'vlan/native' and op_type == 'connect' and \ 65 | query.filter(table.channel == 'vlan/native').count() == 0: 66 | # checks if it is trying to attach a trunked network, and then in 67 | # in the db see if nic does not have any networks attached natively 68 | raise BlockedError("Please attach a native network first") 69 | elif channel == 'vlan/native' and op_type == 'detach' and \ 70 | query.filter(table.channel != 'vlan/native').count() > 0: 71 | # if it is detaching a network, then check in the database if there 72 | # are any trunked vlans. 73 | raise BlockedError("Please remove all trunked Vlans" 74 | " before removing the native vlan") 75 | 76 | 77 | def parse_vlans(raw_vlans): 78 | """Method that converts a comma separated list of vlans and vlan ranges to 79 | a list of individual vlans. 80 | 81 | raw_vlans is a string that can look like: 82 | '12,14-18,23,28,80-90' or '20' or '20,22' or '20-22' 83 | """ 84 | range_str = raw_vlans.split(',') 85 | 86 | vlan_list = [] 87 | for num_str in range_str: 88 | if '-' in num_str: 89 | num_str = num_str.split('-') 90 | for x in range(int(num_str[0]), int(num_str[1])+1): 91 | vlan_list.append(str(x)) 92 | else: 93 | vlan_list.append(num_str) 94 | 95 | return vlan_list 96 | -------------------------------------------------------------------------------- /hil/ext/switches/dell.py: -------------------------------------------------------------------------------- 1 | """A switch driver for the Dell Powerconnect 5500 series. 2 | 3 | Currently the driver uses telnet to connect to the switch's console; in 4 | the long term we want to be using SNMP. 5 | """ 6 | 7 | import logging 8 | from schema import Schema, Optional 9 | import re 10 | 11 | from hil.model import db, Switch 12 | from hil.migrations import paths 13 | from hil.ext.switches import _console 14 | from hil.ext.switches._dell_base import _BaseSession 15 | from os.path import dirname, join 16 | from hil.errors import BadArgumentError 17 | from hil.model import BigIntegerType 18 | from hil.config import core_schema, string_is_bool 19 | 20 | paths[__name__] = join(dirname(__file__), 'migrations', 'dell') 21 | logger = logging.getLogger(__name__) 22 | 23 | core_schema[__name__] = { 24 | Optional('save'): string_is_bool 25 | } 26 | 27 | 28 | class PowerConnect55xx(Switch): 29 | """Dell powerconnect 5500 series switch.""" 30 | 31 | api_name = 'http://schema.massopencloud.org/haas/v0/switches/' \ 32 | 'powerconnect55xx' 33 | 34 | __mapper_args__ = { 35 | 'polymorphic_identity': api_name, 36 | } 37 | 38 | id = db.Column(BigIntegerType, 39 | db.ForeignKey('switch.id'), primary_key=True) 40 | hostname = db.Column(db.String, nullable=False) 41 | username = db.Column(db.String, nullable=False) 42 | password = db.Column(db.String, nullable=False) 43 | 44 | @staticmethod 45 | def validate(kwargs): 46 | Schema({ 47 | 'username': basestring, 48 | 'hostname': basestring, 49 | 'password': basestring, 50 | }).validate(kwargs) 51 | 52 | def session(self): 53 | return _PowerConnect55xxSession.connect(self) 54 | 55 | @staticmethod 56 | def validate_port_name(port): 57 | """ 58 | Valid port names for this switch are of the form gi1/0/11, 59 | te1/0/12, gi1/12, or te1/3 60 | """ 61 | 62 | val = re.compile(r'^(gi|te)\d+/\d+(/\d+)?$') 63 | if not val.match(port): 64 | raise BadArgumentError("Invalid port name. Valid port names for " 65 | "this switch are of the form gi1/0/11, " 66 | "te1/0/12, gi1/12, or te1/3") 67 | return 68 | 69 | def get_capabilities(self): 70 | return ['nativeless-trunk-mode'] 71 | 72 | 73 | class _PowerConnect55xxSession(_BaseSession): 74 | """session object for the power connect 5500 series""" 75 | 76 | def __init__(self, config_prompt, if_prompt, main_prompt, switch, console): 77 | self.config_prompt = config_prompt 78 | self.if_prompt = if_prompt 79 | self.main_prompt = main_prompt 80 | self.switch = switch 81 | self.console = console 82 | 83 | @staticmethod 84 | def connect(switch): 85 | """connect to the switch, and log in.""" 86 | 87 | console = _console.login(switch) 88 | 89 | # Send some string, so we expect the prompt again. Sending only new a 90 | # line doesn't work, it returns some unwanted ANSI sequences in 91 | # console.after 92 | # Eg; main_prompts looks like '\r\n\r\r\x1b[Kconsole#' 93 | # Here \x1b[K is unwanted and causes trouble parsing it. 94 | # Sending some other random string doesn't have this issue. 95 | console.sendline('some-unrecognized-command') 96 | prompts = _console.get_prompts(console) 97 | return _PowerConnect55xxSession(switch=switch, 98 | console=console, 99 | **prompts) 100 | 101 | def _set_terminal_lines(self, lines): 102 | if lines == 'unlimited': 103 | self._sendline('terminal datadump') 104 | elif lines == 'default': 105 | self._sendline('no terminal datadump') 106 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/brocade/03ae4ec647da_brocade_pk_to_bigint.py: -------------------------------------------------------------------------------- 1 | """Brocade PK to bigint 2 | 3 | Revision ID: 03ae4ec647da 4 | Revises: 5a6db7a7222d 5 | Create Date: 2017-07-21 15:19:36.049634 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '03ae4ec647da' 15 | down_revision = '5a6db7a7222d' 16 | branch_labels = ('hil.ext.switches.brocade',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column('brocade', 'id', 23 | existing_type=sa.Integer(), 24 | type_=sa.BIGINT()) 25 | 26 | 27 | def downgrade(): 28 | op.alter_column('brocade', 'id', 29 | existing_type=sa.BIGINT(), 30 | type_=sa.Integer()) 31 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/brocade/5a6db7a7222d_added_brocade_driver.py: -------------------------------------------------------------------------------- 1 | """Added Brocade driver 2 | 3 | Revision ID: 5a6db7a7222d 4 | Revises: 6a8c19565060 5 | Create Date: 2016-04-11 16:26:40.715332 6 | 7 | """ 8 | from alembic import op 9 | import sqlalchemy as sa 10 | 11 | # revision identifiers, used by Alembic. 12 | revision = '5a6db7a7222d' 13 | down_revision = None 14 | branch_labels = None 15 | 16 | # pylint: disable=missing-docstring 17 | 18 | 19 | def upgrade(): 20 | op.create_table( 21 | 'brocade', 22 | sa.Column('id', sa.Integer(), nullable=False, autoincrement=False), 23 | sa.Column('hostname', sa.String(), nullable=False), 24 | sa.Column('username', sa.String(), nullable=False), 25 | sa.Column('password', sa.String(), nullable=False), 26 | sa.Column('interface_type', sa.String(), nullable=False), 27 | sa.PrimaryKeyConstraint('id') 28 | ) 29 | 30 | 31 | def downgrade(): 32 | op.drop_table('brocade') 33 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/dell/099b939261c1_rename_dell_switch_table_for_flask_.py: -------------------------------------------------------------------------------- 1 | """Rename dell switch table for Flask-SQLAlchemy 2 | 3 | See the docstring in 'hil/migrations/versions/6a8c19565060_move_to_flask.py' 4 | 5 | Revision ID: 099b939261c1 6 | Revises: 7 | Create Date: 2016-03-22 04:34:49.141555 8 | 9 | """ 10 | from alembic import op 11 | from hil.model import db 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '099b939261c1' 15 | down_revision = None 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | db.session.close() 23 | metadata = db.inspect(db.engine).get_table_names() 24 | if 'powerconnect55xx' in metadata: 25 | op.rename_table('powerconnect55xx', 'power_connect55xx') 26 | 27 | 28 | def downgrade(): 29 | op.rename_table('power_connect55xx', 'powerconnect55xx') 30 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/dell/b1b0e6d4302e_dell_pk_to_bigint.py: -------------------------------------------------------------------------------- 1 | """Dell PK to bigint 2 | 3 | Revision ID: b1b0e6d4302e 4 | Revises: 099b939261c1 5 | Create Date: 2017-07-21 14:47:33.143382 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'b1b0e6d4302e' 15 | down_revision = '099b939261c1' 16 | branch_labels = ('hil.ext.switches.dell',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column('power_connect55xx', 'id', 23 | existing_type=sa.Integer(), 24 | type_=sa.BIGINT()) 25 | 26 | 27 | def downgrade(): 28 | op.alter_column('power_connect55xx', 'id', 29 | existing_type=sa.BIGINT(), 30 | type_=sa.Integer()) 31 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/mock/b5b31d19257d_rename_mockswitch_table_for_flask_.py: -------------------------------------------------------------------------------- 1 | """Rename mockswitch table for Flask-SQLAlchemy 2 | 3 | See the docstring in 'hil/migrations/versions/6a8c19565060_move_to_flask.py' 4 | 5 | Revision ID: b5b31d19257d 6 | Revises: 7 | Create Date: 2016-03-22 05:11:19.585905 8 | 9 | """ 10 | from alembic import op 11 | from hil.model import db 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'b5b31d19257d' 15 | down_revision = None 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | metadata = db.inspect(db.engine).get_table_names() 23 | if 'mockswitch' in metadata: 24 | op.rename_table('mockswitch', 'mock_switch') 25 | 26 | 27 | def downgrade(): 28 | op.rename_table('mock_switch', 'mockswitch') 29 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/mock/fa9ef2c9b67f_mock_switch_pk_to_bigint.py: -------------------------------------------------------------------------------- 1 | """mock switch PK to bigint 2 | 3 | Revision ID: fa9ef2c9b67f 4 | Revises: b5b31d19257d 5 | Create Date: 2017-07-24 15:46:29.332253 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'fa9ef2c9b67f' 15 | down_revision = 'b5b31d19257d' 16 | branch_labels = ('hil.ext.switches.mock',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column('mock_switch', 'id', 23 | existing_type=sa.Integer(), 24 | type_=sa.BIGINT()) 25 | 26 | 27 | def downgrade(): 28 | op.alter_column('mock_switch', 'id', 29 | existing_type=sa.BIGINT(), 30 | type_=sa.Integer()) 31 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/n3000/357bcff65fb3_n3000_pk_to_bigint.py: -------------------------------------------------------------------------------- 1 | """N3000 PK to bigint 2 | 3 | Revision ID: 357bcff65fb3 4 | Revises: b96d46bbfb12 5 | Create Date: 2017-07-21 16:17:47.356052 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '357bcff65fb3' 15 | down_revision = 'b96d46bbfb12' 16 | branch_labels = ('hil.ext.switches.n3000',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column('dell_n3000', 'id', 23 | existing_type=sa.Integer(), 24 | type_=sa.BIGINT()) 25 | 26 | 27 | def downgrade(): 28 | op.alter_column('dell_n3000', 'id', 29 | existing_type=sa.BIGINT(), 30 | type_=sa.Integer()) 31 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/n3000/b96d46bbfb12_add_dell_n3048_driver.py: -------------------------------------------------------------------------------- 1 | """Add Dell N3048 Driver 2 | 3 | Revision ID: b96d46bbfb12 4 | Revises: 3b2dab2e0d7d 5 | Create Date: 2017-06-08 22:04:10.197640 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'b96d46bbfb12' 15 | down_revision = None 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.create_table('dell_n3000', 23 | sa.Column('id', sa.Integer(), nullable=False), 24 | sa.Column('hostname', sa.String(), nullable=False), 25 | sa.Column('username', sa.String(), nullable=False), 26 | sa.Column('password', sa.String(), nullable=False), 27 | sa.Column('dummy_vlan', sa.String(), nullable=False), 28 | sa.ForeignKeyConstraint(['id'], ['switch.id'], ), 29 | sa.PrimaryKeyConstraint('id') 30 | ) 31 | 32 | 33 | def downgrade(): 34 | op.drop_table('dell_n3000') 35 | -------------------------------------------------------------------------------- /hil/ext/switches/migrations/nexus/09d96bf567aa_nexus_pks_to_bigint.py: -------------------------------------------------------------------------------- 1 | """Nexus PKs to bigint 2 | 3 | Revision ID: 09d96bf567aa 4 | Revises: 9089fa811a2b 5 | Create Date: 2017-07-21 15:43:24.005782 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '09d96bf567aa' 14 | down_revision = None 15 | branch_labels = ('hil.ext.switches.nexus',) 16 | 17 | # pylint: disable=missing-docstring 18 | 19 | 20 | def upgrade(): 21 | op.alter_column('nexus', 'id', 22 | existing_type=sa.Integer(), 23 | type_=sa.BIGINT()) 24 | 25 | 26 | def downgrade(): 27 | op.alter_column('nexus', 'id', 28 | existing_type=sa.BIGINT(), 29 | type_=sa.Integer()) 30 | -------------------------------------------------------------------------------- /hil/ext/switches/mock.py: -------------------------------------------------------------------------------- 1 | """A switch driver that maintains local state only. 2 | 3 | Meant for use in the test suite. 4 | """ 5 | 6 | from collections import defaultdict 7 | from hil.model import Switch, SwitchSession 8 | from hil.migrations import paths 9 | import schema 10 | import re 11 | from sqlalchemy import Column, ForeignKey, String 12 | from os.path import dirname, join 13 | from hil.errors import BadArgumentError 14 | from hil.model import BigIntegerType 15 | 16 | paths[__name__] = join(dirname(__file__), 'migrations', 'mock') 17 | 18 | LOCAL_STATE = defaultdict(lambda: defaultdict(dict)) 19 | 20 | 21 | class MockSwitch(Switch, SwitchSession): 22 | """A switch which stores configuration in memory. 23 | 24 | This class conforms to the interface specified by ``hil.model.Switch``. 25 | It's implementation is connectionless, so it is it's own session object as 26 | suggested int the superclass's documentation. 27 | """ 28 | 29 | api_name = 'http://schema.massopencloud.org/haas/v0/switches/mock' 30 | 31 | __mapper_args__ = { 32 | 'polymorphic_identity': api_name, 33 | } 34 | 35 | id = Column(BigIntegerType, ForeignKey('switch.id'), primary_key=True) 36 | hostname = Column(String, nullable=False) 37 | username = Column(String, nullable=False) 38 | password = Column(String, nullable=False) 39 | 40 | @staticmethod 41 | def validate(kwargs): 42 | schema.Schema({ 43 | 'username': basestring, 44 | 'hostname': basestring, 45 | 'password': basestring, 46 | }).validate(kwargs) 47 | 48 | @staticmethod 49 | def validate_port_name(port): 50 | 51 | """ 52 | Valid port names for this switch are of the form gi1/0/11, 53 | te1/0/12, gi1/12, or te1/3 54 | """ 55 | 56 | val = re.compile(r'^(gi|te)\d+/\d+(/\d+)?$') 57 | if not val.match(port): 58 | raise BadArgumentError("Invalid port name. Valid port names for " 59 | "this switch are of the form gi1/0/11, " 60 | "te1/0/12, gi1/12, or te1/3") 61 | return 62 | 63 | def session(self): 64 | return self 65 | 66 | def modify_port(self, port, channel, new_network): 67 | state = LOCAL_STATE[self.label] 68 | 69 | if new_network is None: 70 | del state[port][channel] 71 | else: 72 | state[port][channel] = new_network 73 | 74 | def revert_port(self, port): 75 | if LOCAL_STATE[self.label][port]: 76 | del LOCAL_STATE[self.label][port] 77 | 78 | def disconnect(self): 79 | pass 80 | 81 | def get_port_networks(self, ports): 82 | state = LOCAL_STATE[self.label] 83 | ret = {} 84 | for port in ports: 85 | ret[port] = [] 86 | for chan, net in state[port.label].iteritems(): 87 | if net is not None: 88 | ret[port].append((chan, net)) 89 | return ret 90 | 91 | def get_capabilities(self): 92 | return ['nativeless-trunk-mode'] 93 | -------------------------------------------------------------------------------- /hil/flaskapp.py: -------------------------------------------------------------------------------- 1 | """Module declaring the flask app 2 | 3 | This exposes exactly one symbol, `app`, which is our flask app. Declaring 4 | this in the same module as other things quickly leads to headaches trying 5 | to avoid circular dependencies. 6 | 7 | In particular, it's common to have structures like: 8 | 9 | * Module A needs to access the app 10 | * Module B Also needs to access the app 11 | * Module A requires module B 12 | 13 | If the app is defined in module A, this results in a dependency cycle. 14 | The easiest way to avoid this is to have the app defined in a module that 15 | does (almost) nothing else, and thus has few dependencies. 16 | """ 17 | import flask 18 | 19 | app = flask.Flask(__name__.split('.')[0]) 20 | -------------------------------------------------------------------------------- /hil/migrations.py: -------------------------------------------------------------------------------- 1 | """Database migration support. 2 | 3 | Contains code for: 4 | 5 | * adding migration related commands to the hil-admin cli 6 | * initializing alembic 7 | * validating the database schema 8 | """ 9 | from flask_migrate import Migrate, MigrateCommand 10 | from hil.flaskapp import app 11 | from hil.model import db 12 | from hil.network_allocator import get_network_allocator 13 | from os.path import join, dirname 14 | import sys 15 | 16 | from alembic.config import Config 17 | from alembic.script import ScriptDirectory 18 | 19 | # This is a dictionary mapping the names of modules to directories containing 20 | # their alembic version scripts. Extensions may add entries to this with their 21 | # own module names as keys. 22 | # 23 | # Extensions which use this facility must also use their module name as a 24 | # a branch_label on their migration scripts. 25 | paths = { 26 | 'hil': join(dirname(__file__), 'migrations', 'versions'), 27 | } 28 | 29 | migrate = Migrate(app, db, 30 | # Use the package's directory. This ensures that the 31 | # migration scripts are available when the package is 32 | # installed system-wide: 33 | directory=join(dirname(__file__), 'migrations')) 34 | command = MigrateCommand 35 | 36 | 37 | @migrate.configure 38 | def _configure_alembic(config): 39 | """Customize alembic configuration.""" 40 | # Configure the path for version scripts to include all of the directories 41 | # named in the `paths` dictionary, above: 42 | 43 | # It is important that the entry for HIL core ('hil') is first; I(zenhack) 44 | # assume this has something to do with search order, but this hangs 45 | # otherwise. 46 | paths_scratch = paths.copy() 47 | core_path = paths_scratch.pop('hil') 48 | configval = ' '.join([core_path] + list(paths_scratch.values())) 49 | 50 | config.set_main_option('version_locations', configval) 51 | return config 52 | 53 | 54 | # Alembic will create this table itself if need be when doing "stamp" in the 55 | # create_db function below, but unless we declare it, db.drop_all() won't 56 | # know about it, and will leave us with a one-table database. 57 | AlembicVersion = db.Table( 58 | 'alembic_version', db.metadata, 59 | db.Column('version_num', db.String(32), nullable=False) 60 | ) 61 | 62 | 63 | def _expected_heads(): 64 | cfg_path = join(dirname(__file__), 'migrations', 'alembic.ini') 65 | cfg = Config(cfg_path) 66 | _configure_alembic(cfg) 67 | cfg.set_main_option('script_location', dirname(cfg_path)) 68 | script_dir = ScriptDirectory.from_config(cfg) 69 | return set(script_dir.get_heads()) 70 | 71 | 72 | def create_db(): 73 | """Create and populate the initial database. 74 | 75 | The database connection must have been previously initialzed via 76 | `hil.model.init_db`. 77 | """ 78 | with app.app_context(): 79 | db.create_all() 80 | for head in _expected_heads(): 81 | # Record the version of each branch. Each extension which uses the 82 | # database will have its own branch. 83 | db.session.execute( 84 | AlembicVersion.insert().values(version_num=head) 85 | ) 86 | get_network_allocator().populate() 87 | db.session.commit() 88 | 89 | 90 | def check_db_schema(): 91 | """Verify that the database schema is present and up-to-date. 92 | 93 | If not, an error message is printed and the program is aborted. 94 | """ 95 | tablenames = db.inspect(db.engine).get_table_names() 96 | 97 | if 'alembic_version' not in tablenames: 98 | sys.exit("ERROR: Database schema is not initialized; have you run " 99 | "hil-admin db create?") 100 | 101 | actual_heads = {row[0] for row in 102 | db.session.query(AlembicVersion).all()} 103 | 104 | if _expected_heads() != actual_heads: 105 | sys.exit("ERROR: Database schema version is incorrect; try " 106 | "running hil-admin db upgrade heads.") 107 | -------------------------------------------------------------------------------- /hil/migrations/alembic.ini: -------------------------------------------------------------------------------- 1 | # A generic, single database configuration. 2 | 3 | [alembic] 4 | # template used to generate migration files 5 | # file_template = %%(rev)s_%%(slug)s 6 | 7 | # set to 'true' to run the environment during 8 | # the 'revision' command, regardless of autogenerate 9 | # revision_environment = false 10 | 11 | # Logging configuration 12 | [loggers] 13 | keys = root,sqlalchemy,alembic 14 | 15 | [handlers] 16 | keys = console 17 | 18 | [formatters] 19 | keys = generic 20 | 21 | [logger_root] 22 | level = WARN 23 | handlers = console 24 | qualname = 25 | 26 | [logger_sqlalchemy] 27 | level = WARN 28 | handlers = 29 | qualname = sqlalchemy.engine 30 | 31 | [logger_alembic] 32 | level = INFO 33 | handlers = 34 | qualname = alembic 35 | 36 | [handler_console] 37 | class = StreamHandler 38 | args = (sys.stderr,) 39 | level = NOTSET 40 | formatter = generic 41 | 42 | [formatter_generic] 43 | format = %(levelname)-5.5s [%(name)s] %(message)s 44 | datefmt = %H:%M:%S 45 | -------------------------------------------------------------------------------- /hil/migrations/env.py: -------------------------------------------------------------------------------- 1 | """ 2 | This file is boilerplate generated by Alembic; see the alembic 3 | documentation for more information: 4 | 5 | https://alembic.readthedocs.org/en/latest/ 6 | 7 | The Flask-Migrate extension may also be of interest: 8 | 9 | https://flask-migrate.readthedocs.org/en/latest/ 10 | """ 11 | from __future__ import with_statement 12 | from alembic import context 13 | from sqlalchemy import engine_from_config, pool 14 | from logging.config import fileConfig 15 | import logging 16 | from flask import current_app 17 | 18 | # this is the Alembic Config object, which provides 19 | # access to the values within the .ini file in use. 20 | config = context.config 21 | 22 | # Interpret the config file for Python logging. 23 | # This line sets up loggers basically. 24 | fileConfig(config.config_file_name) 25 | logger = logging.getLogger('alembic.env') 26 | 27 | # add your model's MetaData object here 28 | # for 'autogenerate' support 29 | # from myapp import mymodel 30 | # target_metadata = mymodel.Base.metadata 31 | config.set_main_option('sqlalchemy.url', 32 | current_app.config.get('SQLALCHEMY_DATABASE_URI')) 33 | target_metadata = current_app.extensions['migrate'].db.metadata 34 | 35 | # other values from the config, defined by the needs of env.py, 36 | # can be acquired: 37 | # my_important_option = config.get_main_option("my_important_option") 38 | # ... etc. 39 | 40 | 41 | def run_migrations_offline(): 42 | """Run migrations in 'offline' mode. 43 | 44 | This configures the context with just a URL 45 | and not an Engine, though an Engine is acceptable 46 | here as well. By skipping the Engine creation 47 | we don't even need a DBAPI to be available. 48 | 49 | Calls to context.execute() here emit the given string to the 50 | script output. 51 | 52 | """ 53 | url = config.get_main_option("sqlalchemy.url") 54 | context.configure(url=url) 55 | 56 | with context.begin_transaction(): 57 | context.run_migrations() 58 | 59 | 60 | def run_migrations_online(): 61 | """Run migrations in 'online' mode. 62 | 63 | In this scenario we need to create an Engine 64 | and associate a connection with the context. 65 | 66 | """ 67 | 68 | def process_revision_directives(context, revision, directives): 69 | """ 70 | this callback is used to prevent an auto-migration from being generated 71 | when there are no changes to the schema. 72 | reference: http://alembic.readthedocs.org/en/latest/cookbook.html 73 | """ 74 | if getattr(config.cmd_opts, 'autogenerate', False): 75 | script = directives[0] 76 | if script.upgrade_ops.is_empty(): 77 | directives[:] = [] 78 | logger.info('No changes in schema detected.') 79 | 80 | engine = engine_from_config(config.get_section(config.config_ini_section), 81 | prefix='sqlalchemy.', 82 | poolclass=pool.NullPool) 83 | 84 | connection = engine.connect() 85 | context.configure(compare_type=True, 86 | connection=connection, 87 | target_metadata=target_metadata, 88 | process_revision_directives=process_revision_directives, 89 | **current_app.extensions['migrate'].configure_args) 90 | 91 | try: 92 | with context.begin_transaction(): 93 | context.run_migrations() 94 | finally: 95 | connection.close() 96 | 97 | 98 | if context.is_offline_mode(): 99 | run_migrations_offline() 100 | else: 101 | run_migrations_online() 102 | -------------------------------------------------------------------------------- /hil/migrations/script.py.mako: -------------------------------------------------------------------------------- 1 | """${message} 2 | 3 | Revision ID: ${up_revision} 4 | Revises: ${down_revision} 5 | Create Date: ${create_date} 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | ${imports if imports else ""} 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = ${repr(up_revision)} 15 | down_revision = ${repr(down_revision)} 16 | branch_labels = ${repr(branch_labels)} 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | ${upgrades if upgrades else "pass"} 23 | 24 | 25 | def downgrade(): 26 | ${downgrades if downgrades else "pass"} 27 | -------------------------------------------------------------------------------- /hil/migrations/versions/02f7e9607e16_delete_legacy_obm_support.py: -------------------------------------------------------------------------------- 1 | """Delete legacy obm support 2 | 3 | Revision ID: 02f7e9607e16 4 | Revises: d65a9dc873d7 5 | Create Date: 2018-09-04 17:00:44.359952 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '02f7e9607e16' 15 | down_revision = ( 16 | 'd65a9dc873d7', # hil core 17 | '655e037522d0', # mock obm 18 | 'fcb23cd2e9b7', # ipmi 19 | ) 20 | branch_labels = ('hil',) 21 | 22 | # pylint: disable=missing-docstring 23 | 24 | 25 | def upgrade(): 26 | engine = op.get_bind() 27 | for table in 'mock_obm', 'ipmi': 28 | if engine.dialect.has_table(engine, table): 29 | op.drop_table(table) 30 | op.drop_constraint(u'node_obm_id_fkey', 'node', type_='foreignkey') 31 | op.drop_column('node', 'obm_id') 32 | op.drop_table('obm') 33 | 34 | 35 | def downgrade(): 36 | op.create_table( 37 | 'obm', 38 | sa.Column('id', sa.BIGINT(), autoincrement=True, nullable=False), 39 | sa.Column('type', sa.VARCHAR(), autoincrement=False, nullable=False), 40 | sa.PrimaryKeyConstraint('id', name=u'obm_pkey') 41 | ) 42 | 43 | op.add_column( 44 | 'node', 45 | sa.Column('obm_id', sa.BIGINT(), autoincrement=False, nullable=False), 46 | ) 47 | op.create_foreign_key( 48 | u'node_obm_id_fkey', 'node', 'obm', ['obm_id'], ['id'], 49 | ) 50 | -------------------------------------------------------------------------------- /hil/migrations/versions/264ddaebdfcc_make_labels_unique.py: -------------------------------------------------------------------------------- 1 | """make labels unique 2 | 3 | Revision ID: 264ddaebdfcc 4 | Revises: 89ff8a6d72b2 5 | Create Date: 2018-03-12 11:15:09.729850 6 | 7 | """ 8 | 9 | from alembic import op 10 | # import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '264ddaebdfcc' 15 | down_revision = '89ff8a6d72b2' 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.create_unique_constraint(None, 'network', ['label']) 23 | op.create_unique_constraint(None, 'node', ['label']) 24 | op.create_unique_constraint(None, 'project', ['label']) 25 | op.create_unique_constraint(None, 'switch', ['label']) 26 | 27 | 28 | def downgrade(): 29 | op.drop_constraint(None, 'switch', type_='unique') 30 | op.drop_constraint(None, 'project', type_='unique') 31 | op.drop_constraint(None, 'node', type_='unique') 32 | op.drop_constraint(None, 'network', type_='unique') 33 | -------------------------------------------------------------------------------- /hil/migrations/versions/3b2dab2e0d7d_add_type_field_to_networkingaction.py: -------------------------------------------------------------------------------- 1 | """Add type field to NetworkingAction 2 | 3 | Revision ID: 3b2dab2e0d7d 4 | Revises: 89630e3872ec 5 | Create Date: 2016-11-14 14:57:19.247255 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | from hil.model import NetworkingAction 12 | 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '3b2dab2e0d7d' 16 | down_revision = '57f4c30b0ad4' 17 | branch_labels = None 18 | 19 | # pylint: disable=missing-docstring 20 | 21 | 22 | def upgrade(): 23 | # We first introduce the table with null 'type' fields allowed. 24 | # Any existing actions will have null type fields, so we then 25 | # update them to 'modify_port', which was previously the only 26 | # possible action. Then, we add the NOT NULL constraint once 27 | # we know it won't run afoul of any existing rows. 28 | op.add_column('networking_action', 29 | sa.Column('type', sa.String(), 30 | nullable=True)) 31 | op.execute(sa.update(NetworkingAction).values({'type': 'modify_port'})) 32 | op.alter_column('networking_action', 'type', nullable=False) 33 | 34 | 35 | def downgrade(): 36 | op.drop_column('networking_action', 'type') 37 | -------------------------------------------------------------------------------- /hil/migrations/versions/57f4c30b0ad4_added_metadata.py: -------------------------------------------------------------------------------- 1 | """added metadata 2 | 3 | Revision ID: 57f4c30b0ad4 4 | Revises: 89630e3872ec 5 | Create Date: 2016-11-08 08:36:01.183860 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '57f4c30b0ad4' 15 | down_revision = '89630e3872ec' 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.create_table('metadata', 23 | sa.Column('id', sa.Integer(), nullable=False), 24 | sa.Column('label', sa.String(), nullable=False), 25 | sa.Column('value', sa.String(), nullable=True), 26 | sa.Column('owner_id', sa.Integer(), nullable=False), 27 | sa.ForeignKeyConstraint(['owner_id'], ['node.id'], ), 28 | sa.PrimaryKeyConstraint('id') 29 | ) 30 | 31 | 32 | def downgrade(): 33 | op.drop_table('metadata') 34 | -------------------------------------------------------------------------------- /hil/migrations/versions/655e037522d0_mock_obm_pks_to_bigint.py: -------------------------------------------------------------------------------- 1 | """mock obm PKs to bigint 2 | 3 | Revision ID: 655e037522d0 4 | Revises: df8d9f423f2b 5 | Create Date: 2017-07-21 11:24:18.746152 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '655e037522d0' 15 | down_revision = 'df8d9f423f2b' 16 | branch_labels = ('hil.ext.obm.mock',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | engine = op.get_bind() 23 | if not engine.dialect.has_table(engine, 'mock_obm'): 24 | return 25 | op.alter_column('mock_obm', 'id', 26 | existing_type=sa.INTEGER(), 27 | type_=sa.BigInteger()) 28 | 29 | 30 | def downgrade(): 31 | op.alter_column('mock_obm', 'id', 32 | existing_type=sa.BigInteger(), 33 | type_=sa.INTEGER()) 34 | -------------------------------------------------------------------------------- /hil/migrations/versions/6a8c19565060_move_to_flask.py: -------------------------------------------------------------------------------- 1 | """Rename tables to account for Flask-SQLAlchemy's auto-naming. 2 | 3 | Unlike our own (old) table name generator, Flask-SQLAlchemy inserts 4 | underscores in names that are CamelCase (i.e. table names are snake_case). 5 | There's no reason to keep the old behavior, but we need this migration script 6 | otherwise. 7 | 8 | Revision ID: 6a8c19565060 9 | Revises: None 10 | Create Date: 2016-03-15 23:40:11.411599 11 | """ 12 | from alembic import op 13 | 14 | # revision identifiers, used by Alembic. 15 | revision = '6a8c19565060' 16 | down_revision = None 17 | branch_labels = None 18 | 19 | # pylint: disable=missing-docstring 20 | 21 | 22 | def upgrade(): 23 | op.rename_table('networkattachment', 'network_attachment') 24 | # The _id_seq is a postgres-specific thing; it has to do with the 25 | # AUTO INCREMENT functionality. 26 | op.rename_table('networkattachment_id_seq', 'network_attachment_id_seq') 27 | op.rename_table('networkingaction', 'networking_action') 28 | op.rename_table('networkingaction_id_seq', 'networking_action_id_seq') 29 | 30 | 31 | def downgrade(): 32 | op.rename_table('network_attachment', 'networkattachment') 33 | op.rename_table('network_attachment_id_seq', 'networkattachment_id_seq') 34 | op.rename_table('networking_action', 'networkingaction') 35 | op.rename_table('networking_action_id_seq', 'networkingaction_id_seq') 36 | -------------------------------------------------------------------------------- /hil/migrations/versions/7acb050f783c_add_obmd_fields.py: -------------------------------------------------------------------------------- 1 | """Add obmd fields 2 | 3 | Revision ID: 7acb050f783c 4 | Revises: 9089fa811a2b 5 | Create Date: 2018-01-09 18:29:58.413692 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = '7acb050f783c' 15 | down_revision = '9089fa811a2b' 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.add_column( 23 | 'node', 24 | sa.Column('obmd_admin_token', sa.String(), nullable=True), 25 | ) 26 | op.add_column( 27 | 'node', 28 | sa.Column('obmd_node_token', sa.String(), nullable=True), 29 | ) 30 | op.add_column( 31 | 'node', 32 | sa.Column('obmd_uri', sa.String(), nullable=True), 33 | ) 34 | 35 | 36 | def downgrade(): 37 | op.drop_column('node', 'obmd_admin_token') 38 | op.drop_column('node', 'obmd_node_token') 39 | op.drop_column('node', 'obmd_uri') 40 | -------------------------------------------------------------------------------- /hil/migrations/versions/89630e3872ec_network_acl.py: -------------------------------------------------------------------------------- 1 | """network ACL 2 | 3 | Revision ID: 89630e3872ec 4 | Revises: 6a8c19565060 5 | Create Date: 2016-05-06 09:24:26.911562 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | # revision identifiers, used by Alembic. 13 | revision = '89630e3872ec' 14 | down_revision = '6a8c19565060' 15 | branch_labels = None 16 | 17 | # pylint: disable=missing-docstring 18 | 19 | 20 | def upgrade(): 21 | op.create_table( 22 | 'network_projects', 23 | sa.Column('project_id', sa.Integer(), nullable=True), 24 | sa.Column('network_id', sa.Integer(), nullable=True), 25 | sa.ForeignKeyConstraint(['network_id'], ['network.id'], ), 26 | sa.ForeignKeyConstraint(['project_id'], ['project.id'], ) 27 | ) 28 | network_projects = sa.sql.table( 29 | 'network_projects', 30 | sa.Column('project_id', sa.Integer(), nullable=True), 31 | sa.Column('network_id', sa.Integer(), nullable=True), 32 | ) 33 | conn = op.get_bind() 34 | res = conn.execute( 35 | "select id, access_id from network where access_id >= 1") 36 | results = res.fetchall() 37 | networks = [{'network_id': r[0], 'project_id': r[1]} for r in results] 38 | op.bulk_insert(network_projects, networks) 39 | op.alter_column(u'network', 'creator_id', new_column_name='owner_id') 40 | op.drop_constraint(u'network_access_id_fkey', 41 | 'network', 42 | type_='foreignkey') 43 | op.drop_column(u'network', 'access_id') 44 | 45 | 46 | def downgrade(): 47 | op.add_column(u'network', 48 | sa.Column('access_id', 49 | sa.INTEGER(), 50 | autoincrement=False, 51 | nullable=True)) 52 | op.alter_column(u'network', 'owner_id', new_column_name='creator_id') 53 | op.create_foreign_key(u'network_access_id_fkey', 'network', 'project', 54 | ['access_id'], ['id']) 55 | op.drop_constraint(u'network_projects_project_id_fkey', 56 | 'network_projects', 57 | type_='foreignkey') 58 | op.drop_constraint(u'network_projects_network_id_fkey', 59 | 'network_projects', 60 | type_='foreignkey') 61 | op.drop_table('network_projects') 62 | -------------------------------------------------------------------------------- /hil/migrations/versions/89ff8a6d72b2_add_uuid_and_status_to_networkingaction.py: -------------------------------------------------------------------------------- 1 | """add uuid and status to networkingaction 2 | 3 | Revision ID: 89ff8a6d72b2 4 | Revises: 7acb050f783c 5 | Create Date: 2018-01-18 14:04:09.553012 6 | 7 | """ 8 | 9 | from alembic import op 10 | from sqlalchemy.orm import Session 11 | import sqlalchemy as sa 12 | import uuid 13 | from hil import model 14 | 15 | # revision identifiers, used by Alembic. 16 | revision = '89ff8a6d72b2' 17 | down_revision = '7acb050f783c' 18 | branch_labels = None 19 | 20 | # pylint: disable=missing-docstring 21 | 22 | 23 | def upgrade(): 24 | op.add_column('networking_action', sa.Column('status', sa.String(), 25 | nullable=True)) 26 | op.add_column('networking_action', sa.Column('uuid', sa.String(), 27 | nullable=True)) 28 | op.create_index(op.f('ix_networking_action_uuid'), 'networking_action', 29 | ['uuid'], unique=False) 30 | 31 | conn = op.get_bind() 32 | session = Session(bind=conn) 33 | for item in session.query(model.NetworkingAction): 34 | item.uuid = str(uuid.uuid4()) 35 | item.status = 'PENDING' 36 | session.commit() 37 | session.close() 38 | 39 | op.alter_column('networking_action', 'status', nullable=False) 40 | op.alter_column('networking_action', 'uuid', nullable=False) 41 | 42 | 43 | def downgrade(): 44 | op.execute("DELETE from networking_action " 45 | "WHERE status = 'DONE' or status = 'ERROR'") 46 | op.drop_index(op.f('ix_networking_action_uuid'), 47 | table_name='networking_action') 48 | op.drop_column('networking_action', 'uuid') 49 | op.drop_column('networking_action', 'status') 50 | -------------------------------------------------------------------------------- /hil/migrations/versions/aa9106430f1c_testing_only_avoid_manual_intervention.py: -------------------------------------------------------------------------------- 1 | """Testing only: insert data to avoid manual intervention. 2 | 3 | The normal upgrade process requires an admin to run a helper 4 | script that we've written, which exports node info to OBMd, 5 | and adds the relevant fields. In the test suite, we don't have 6 | the opportunity to do this, so we expose (via an environment 7 | variable) the ability to spoof the data, so that the tests can 8 | pass. If this environment variable is not set, this script is 9 | a no-op. 10 | 11 | Revision ID: aa9106430f1c 12 | Revises: 264ddaebdfcc 13 | Create Date: 2018-04-07 19:10:35.243712 14 | 15 | 16 | """ 17 | 18 | import os 19 | from alembic import op 20 | from sqlalchemy.orm import Session 21 | from hil import model 22 | 23 | 24 | # revision identifiers, used by Alembic. 25 | revision = 'aa9106430f1c' 26 | down_revision = '264ddaebdfcc' 27 | branch_labels = None 28 | 29 | # pylint: disable=missing-docstring 30 | 31 | 32 | def upgrade(): 33 | if os.getenv('SPOOF_MANUAL_MIGRATIONS') != 'true': 34 | return 35 | 36 | conn = op.get_bind() 37 | session = Session(bind=conn) 38 | for node in session.query(model.Node): 39 | # If we're in the test, we should expect that neither of these 40 | # has been set: 41 | assert node.obmd_uri is None 42 | assert node.obmd_admin_token is None 43 | 44 | node.obmd_uri = 'http://obmd.example.com/nodes/' + node.label 45 | node.obmd_admin_token = 'secret' 46 | 47 | session.commit() 48 | session.close() 49 | 50 | 51 | def downgrade(): 52 | pass 53 | -------------------------------------------------------------------------------- /hil/migrations/versions/c45f6a96dbe7_nic_primary_key_changed_to_bigint.py: -------------------------------------------------------------------------------- 1 | """NIC primary key changed to BIGINT 2 | 3 | Revision ID: c45f6a96dbe7 4 | Revises: 3b2dab2e0d7d 5 | Create Date: 2017-06-14 10:36:17.744991 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'c45f6a96dbe7' 15 | down_revision = '3b2dab2e0d7d' 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column( 23 | 'nic', 24 | 'id', 25 | existing_type=sa.INTEGER(), 26 | type_=sa.BIGINT(), 27 | autoincrement=True, 28 | existing_server_default=sa.text(u"nextval('nic_id_seq'::regclass)")) 29 | op.alter_column('networking_action', 'nic_id', 30 | existing_type=sa.INTEGER(), 31 | type_=sa.BIGINT(), 32 | existing_nullable=False) 33 | op.alter_column('network_attachment', 'nic_id', 34 | existing_type=sa.INTEGER(), 35 | type_=sa.BIGINT(), 36 | existing_nullable=False) 37 | 38 | 39 | def downgrade(): 40 | op.alter_column('network_attachment', 'nic_id', 41 | existing_type=sa.BIGINT(), 42 | type_=sa.INTEGER(), 43 | existing_nullable=False) 44 | op.alter_column('networking_action', 'nic_id', 45 | existing_type=sa.BIGINT(), 46 | type_=sa.INTEGER(), 47 | existing_nullable=False) 48 | op.alter_column( 49 | 'nic', 50 | 'id', 51 | existing_type=sa.BIGINT(), 52 | type_=sa.INTEGER(), 53 | autoincrement=True, 54 | existing_server_default=sa.text(u"nextval('nic_id_seq'::regclass)")) 55 | -------------------------------------------------------------------------------- /hil/migrations/versions/d65a9dc873d7_mark_obmd_fields_not_nullable.py: -------------------------------------------------------------------------------- 1 | """mark obmd fields not nullable 2 | 3 | Revision ID: d65a9dc873d7 4 | Revises: aa9106430f1c 5 | Create Date: 2018-04-07 19:10:35.243712 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'd65a9dc873d7' 15 | down_revision = 'aa9106430f1c' 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | op.alter_column( 23 | 'node', 24 | 'obmd_admin_token', 25 | existing_type=sa.VARCHAR(), 26 | nullable=False, 27 | ) 28 | op.alter_column( 29 | 'node', 30 | 'obmd_uri', 31 | existing_type=sa.VARCHAR(), 32 | nullable=False, 33 | ) 34 | 35 | 36 | def downgrade(): 37 | op.alter_column( 38 | 'node', 39 | 'obmd_uri', 40 | existing_type=sa.VARCHAR(), 41 | nullable=True, 42 | ) 43 | op.alter_column( 44 | 'node', 45 | 'obmd_admin_token', 46 | existing_type=sa.VARCHAR(), 47 | nullable=True, 48 | ) 49 | -------------------------------------------------------------------------------- /hil/migrations/versions/df8d9f423f2b_rename_mockobm_table_for_flask.py: -------------------------------------------------------------------------------- 1 | """Rename mockobm table for flask 2 | 3 | See the docstring in 'hil/migrations/versions/6a8c19565060_move_to_flask.py' 4 | 5 | Revision ID: df8d9f423f2b 6 | Revises: 6a8c19565060 7 | Create Date: 2016-04-04 02:24:53.812100 8 | 9 | """ 10 | from alembic import op 11 | from hil.model import db 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'df8d9f423f2b' 15 | down_revision = None 16 | branch_labels = None 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | metadata = db.inspect(db.engine).get_table_names() 23 | if 'mockobm' in metadata: 24 | op.rename_table('mockobm', 'mock_obm') 25 | 26 | 27 | def downgrade(): 28 | op.rename_table('mock_obm', 'mockobm') 29 | -------------------------------------------------------------------------------- /hil/migrations/versions/fcb23cd2e9b7_ipmi_obm_pks_to_bigint.py: -------------------------------------------------------------------------------- 1 | """ipmi obm Pks to bigint 2 | 3 | Revision ID: fcb23cd2e9b7 4 | Revises: 9089fa811a2b 5 | Create Date: 2017-07-21 11:47:26.250168 6 | 7 | """ 8 | 9 | from alembic import op 10 | import sqlalchemy as sa 11 | 12 | 13 | # revision identifiers, used by Alembic. 14 | revision = 'fcb23cd2e9b7' 15 | down_revision = None 16 | branch_labels = ('hil.ext.obm.ipmi',) 17 | 18 | # pylint: disable=missing-docstring 19 | 20 | 21 | def upgrade(): 22 | engine = op.get_bind() 23 | if not engine.dialect.has_table(engine, 'ipmi'): 24 | return 25 | op.alter_column('ipmi', 'id', 26 | existing_type=sa.Integer(), 27 | type_=sa.BIGINT()) 28 | 29 | 30 | def downgrade(): 31 | op.alter_column('ipmi', 'id', 32 | existing_type=sa.BIGINT(), 33 | type_=sa.Integer()) 34 | -------------------------------------------------------------------------------- /hil/server.py: -------------------------------------------------------------------------------- 1 | """Manage server-side startup""" 2 | import sys 3 | 4 | # api must be loaded to register the api callbacks, even though we don't 5 | # use it directly from this module. 6 | from hil import api # pylint: disable=unused-import 7 | 8 | from hil import model, auth 9 | from hil.class_resolver import build_class_map_for 10 | from hil.network_allocator import get_network_allocator 11 | 12 | 13 | def register_drivers(): 14 | """Put all of the loaded drivers somewhere where the server can find them. 15 | 16 | This must be run *after* extensions have been loaded. 17 | """ 18 | build_class_map_for(model.Switch) 19 | 20 | 21 | def validate_state(): 22 | """Do some sanity checking before kicking things off. In particular: 23 | 24 | * Make sure we have extensions loaded for: 25 | * a network allocator 26 | * an auth backend 27 | 28 | (More checks may be added in the future). 29 | 30 | If any of the checks fail, ``validate_state`` aborts the program. 31 | """ 32 | if get_network_allocator() is None: 33 | sys.exit("ERROR: No network allocator registered; make sure your " 34 | "hil.cfg loads an extension which provides the network " 35 | "allocator.") 36 | if auth.get_auth_backend() is None: 37 | sys.exit("ERROR: No authentication/authorization backend registered; " 38 | "make sure your hil.cfg loads an extension which provides " 39 | "the auth backend.") 40 | 41 | 42 | def stop_orphan_consoles(): 43 | """Stop any orphaned console logging processes. 44 | 45 | These may exist if HIL was shut down uncleanly. 46 | """ 47 | # Stop all orphan console logging processes on startup 48 | nodes = model.Node.query.all() 49 | for node in nodes: 50 | node.disable_obm() 51 | 52 | 53 | def init(): 54 | """Set up the api server's internal state. 55 | 56 | This is a convenience wrapper that calls the other setup routines in 57 | this module in the correct order, as well as ``model.init_db`` 58 | """ 59 | register_drivers() 60 | validate_state() 61 | model.init_db() 62 | -------------------------------------------------------------------------------- /scripts/create_bridges: -------------------------------------------------------------------------------- 1 | from hil import config 2 | from hil.config import cfg 3 | from subprocess import call 4 | from hil.ext.network_allocators.vlan_pool import get_vlan_list 5 | 6 | config.load() 7 | 8 | def create_bridge(vlan_no): 9 | vlan_no = str(vlan_no) 10 | trunk_nic = cfg.get('headnode', 'trunk_nic') 11 | bridge = 'br-vlan%s' % vlan_no 12 | vlan_nic = '%s.%s' % (trunk_nic, vlan_no) 13 | call(['brctl', 'addbr', bridge]) 14 | call(['vconfig', 'add', trunk_nic, vlan_no]) 15 | call(['brctl', 'addif', bridge, vlan_nic]) 16 | call(['ifconfig', bridge, 'up', 'promisc']) 17 | call(['ifconfig', vlan_nic, 'up', 'promisc']) 18 | 19 | for vlan in get_vlan_list(): 20 | create_bridge(vlan) 21 | -------------------------------------------------------------------------------- /scripts/create_bridges.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=Service to create bridges 3 | Before=libvirtd.service 4 | After=network-online.target 5 | After=network.target 6 | 7 | [Service] 8 | WorkingDirectory = /etc/ 9 | ExecStart=/usr/bin/create_bridges 10 | Type=oneshot 11 | 12 | [Install] 13 | WantedBy=multi-user.target 14 | -------------------------------------------------------------------------------- /scripts/hil-complete.sh: -------------------------------------------------------------------------------- 1 | _hil_completion() { 2 | COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \ 3 | COMP_CWORD=$COMP_CWORD \ 4 | _HIL_COMPLETE=complete $1 ) ) 5 | return 0 6 | } 7 | 8 | complete -F _hil_completion -o default hil; 9 | -------------------------------------------------------------------------------- /scripts/hil_network.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=HIL Network Server 3 | After=network.target 4 | After=postgresql 5 | 6 | [Service] 7 | User=hil_user 8 | Group=hil_user 9 | WorkingDirectory=/var/lib/hil/ 10 | ExecStart=/usr/bin/hil-admin serve-networks 11 | Type=simple 12 | ExecReload=/bin/kill -HUP $MAINPID 13 | Restart=on-failure 14 | RestartSec=5s 15 | 16 | [Install] 17 | WantedBy=multi-user.target 18 | 19 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [easy_install] 2 | 3 | [tool:pytest] 4 | norecursedirs = .git build dist docs hil hil.egg-info __pycache__ temp .venv 5 | python_files = *.py 6 | #addopts = --cov-report term-missing --cov hil --boxed 7 | # "--boxed" gives a fresh process to each test in case of crashes or if we want new 8 | # state for each run. 9 | # Sparsely documented here: https://pypi.python.org/pypi/pytest-xdist#boxed 10 | addopts = --boxed 11 | -------------------------------------------------------------------------------- /specs/README.md: -------------------------------------------------------------------------------- 1 | # Overview 2 | 3 | This directory contains specifications for different aspects/features of HIL. 4 | 5 | The idea is that if a feature could be controversial or require significant 6 | effort, a specification gives the HIL team a place to come to consensus on the 7 | design before significant effort is expended. A spec also documents the feature 8 | and rationale for future developers/users. 9 | 10 | A spec does not have to be lengthy: Try to capture enough info that people can 11 | make an informed decision. 12 | 13 | # When not needed 14 | 15 | - Bug fixes/minor features do not require a spec, and should be submitted 16 | directly as a Pull Request (PR). 17 | 18 | - If there is extensive discussion/debate in an issue/PR that could suffice. 19 | In that case, please be sure to write `Implements #` or 20 | `Implements #` in your first commit message or PR description to 21 | allow the two to be correlated. 22 | 23 | If you are wondering whether your effort requires a spec, please ask one of the 24 | core team members. 25 | 26 | # Procedure 27 | 28 | To create a spec: 29 | 1. Create a new file in this directory using the template below. 30 | 2. Submit a Pull Request with that change. 31 | 3. When/if the spec is accepted, be sure to include `Implements #` in 32 | the body of the implementing PR to ensure that reviewers can refer back to the 33 | specification. 34 | 35 | # Template 36 | 37 | Please include at least these entries in your specification, adding others if 38 | you think them necessary: 39 | 40 | ``` 41 | # Problem 42 | ----------- 43 | 44 | Please describe the problem you are trying to address 45 | 46 | # Solution 47 | ----------- 48 | 49 | What is your proposed solution? 50 | 51 | # Alternative Solutions 52 | ----------------------- 53 | 54 | If there are alternative ways of doing this, what are the tradeoffs? 55 | 56 | # Arch Impact 57 | ----------------- 58 | 59 | Are there any new architectural assumptions we are now making? 60 | 61 | # Security 62 | ---------- 63 | 64 | Does this impact security? 65 | ``` 66 | -------------------------------------------------------------------------------- /specs/switch-driver-capabilities.md: -------------------------------------------------------------------------------- 1 | 2 | # Expose network driver capabilites 3 | ----------- 4 | 5 | [Issue 410](https://github.com/CCI-MOC/hil/issues/410) 6 | 7 | We want to expose switch driver capabilities. Capabilities could include: 8 | 9 | - Jumbo frames are per-switch on our Dell switches and per-port on our Ciscos (#755) 10 | - QoS (#384) 11 | - Disabling STP (#779) 12 | - DHCP/ARP Snooping (#471) 13 | - Enabling the tagging of DHCP packets with the port number (from @okrieg or @pjd-nu) 14 | 15 | # Solution 16 | ----------- 17 | 18 | * All switch drivers would have a method called `get_capabilities()`. 19 | 20 | * This method would return a list with all capabilities supported by the switch. 21 | Its implementation could be flexible; it could simply return a hardcoded list, or generate it from some database. 22 | 23 | 24 | * any API that exposes a switch capablity would do 25 | `assert capability in switch.get_capabilities()` and then proceed. 26 | 27 | * `show_switch()` will be updated to show switch capabilities. 28 | 29 | * `show_node()` can be updated to show the capabilities supported by a nic. This 30 | will be useful for end users to pick nodes based on capabilities. Admins would 31 | see all capabilities while non-admin users would only see certain limited capabilities. 32 | What to show and what not to show under this can be decided as we add support for 33 | a new switch capability. 34 | 35 | 36 | # Alternative Solutions 37 | ----------------------- 38 | We discussed having another method called `has_capability()` that would return if a 39 | capability exists in the list returned by `get_capabilities()`. But at this point 40 | it seems redundant, because `ensure_legal_operations()` can just do the check by itself, 41 | instead of requiring a new method. 42 | 43 | # Arch Impact 44 | ----------------- 45 | 46 | Doesn't affect the current architecture, but opens up the debate how network topologies might 47 | look like if a network spans across switches with different capabilities. 48 | 49 | # Security 50 | ---------- 51 | 52 | This doesn't seem to have any impact on security. 53 | ``` 54 | -------------------------------------------------------------------------------- /tests/custom_lint.py: -------------------------------------------------------------------------------- 1 | """Tests performing linter-like checks""" 2 | 3 | import ast 4 | from os.path import dirname, join 5 | from subprocess import check_output 6 | 7 | 8 | # Root of the repository. Note that this is relative to this file, 9 | # so if this file is moved, this may need to be changed: 10 | source_root = dirname(dirname(__file__)) 11 | 12 | 13 | def test_logger_format_strings(): 14 | """Scan for proper use of logger format strings 15 | 16 | Per @zenhack's comment on issue #629: 17 | 18 | > All over the codebase you can find statments like: 19 | > 20 | > logger.error('Foo: %r' % bar) 21 | > 22 | > The % operator being python's format-string splicing operator. The 23 | > problem with this is that the logging functions do the formation 24 | > string splicing themselves, i.e. what you want in this case is: 25 | > 26 | > logger.error('Foo: %r', bar) 27 | > 28 | > This opens up the possibility of format-string injection 29 | > vulnerabilities. Frankly, this is too easy to do, especially 30 | > since in other contexts % is the correct thing. We ought to 31 | > (a) make sure all instances of this mistake are fixed, and (b) 32 | > come up with a way to catch this mistake automatically going 33 | > forward; perhaps some kind of linter. 34 | 35 | This is that linter; it scans the source tree looking for places 36 | where the logging functions are called with any first argument 37 | that isn't a string literal. 38 | """ 39 | 40 | files = check_output([join(source_root, 'ci', 'list_tracked_pyfiles.sh')])\ 41 | .strip().split('\n') 42 | 43 | for filename in files: 44 | with open(join(source_root, filename)) as f: 45 | tree = ast.parse(f.read(), filename=filename) 46 | LogCallVisitor(filename).visit(tree) 47 | 48 | 49 | class LogCallVisitor(ast.NodeVisitor): 50 | """Ast node visitor used by test_logger_format_strings.""" 51 | 52 | def __init__(self, filename): 53 | self.filename = filename 54 | 55 | def visit_Call(self, node): 56 | """ 57 | This function is called on all "Call" nodes in the ast, i.e. 58 | anything where an expression is being called: 59 | 60 | foo(bar) 61 | foo.baz(bar) 62 | foo[quux](bar, baz) 63 | """ 64 | # First, filter this out to the set of calls we care about: 65 | # 66 | # 1. Make sure this a call to an attribute (method), e.g. 67 | # foo.bar(baz): 68 | if not isinstance(node.func, ast.Attribute): 69 | return 70 | # 2. Make sure the name of the method is one of the recognized 71 | # logging method names. In theory this could give us 72 | # false positives if someone names another function after 73 | # one of these, or false negatives if we store one of these 74 | # in a variable (don't do that). We could be smarter about 75 | # figuring out what function is being called, but this is 76 | # probably good enough: 77 | logfunc_names = { 78 | 'critical', 79 | 'error', 80 | 'warn', 81 | 'warning', 82 | 'info', 83 | 'debug', 84 | } 85 | if node.func.attr not in logfunc_names: 86 | return 87 | 88 | # We've decided this is a logging call; sanity check it: 89 | assert len(node.args) != 0, ( 90 | "Logging function called with zero arguments at %r " 91 | "line %d column %d." % (self.filename, 92 | node.lineno, 93 | node.col_offset) 94 | ) 95 | assert isinstance(node.args[0], ast.Str), ( 96 | "Logging function called with non-string literal format " 97 | "string at %r line %d column %d." % (self.filename, 98 | node.lineno, 99 | node.col_offset) 100 | ) 101 | -------------------------------------------------------------------------------- /tests/deployment/headnodes.py: -------------------------------------------------------------------------------- 1 | """Unit tests for headnodes. 2 | 3 | These require an actual libvirt daemon (and full HIL setup), and are 4 | somewhat particular to the MOC's development environment. They may be 5 | difficult to run in other contexts. 6 | """ 7 | 8 | import json 9 | 10 | from hil.test_common import config_testsuite, fail_on_log_warnings, \ 11 | fresh_database, with_request_context, headnode_cleanup, \ 12 | network_create_simple, server_init 13 | from hil.dev_support import have_dry_run 14 | from hil import config, api 15 | import pytest 16 | 17 | 18 | @pytest.fixture 19 | def configure(): 20 | """Configure HIL""" 21 | config_testsuite() 22 | config.load_extensions() 23 | 24 | 25 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 26 | fresh_database = pytest.fixture(fresh_database) 27 | server_init = pytest.fixture(server_init) 28 | 29 | 30 | with_request_context = pytest.yield_fixture(with_request_context) 31 | 32 | 33 | headnode_cleanup = pytest.fixture(headnode_cleanup) 34 | pytestmark = pytest.mark.usefixtures('configure', 35 | 'server_init', 36 | 'fresh_database', 37 | 'with_request_context', 38 | 'headnode_cleanup') 39 | 40 | 41 | class TestHeadNode: 42 | """Headnode related deployment tests.""" 43 | 44 | def test_headnode(self): 45 | """Test each of the headnode related operations 46 | 47 | * create 48 | * connect_hnic, 49 | * connect_network 50 | * start 51 | * stop 52 | * delete 53 | """ 54 | api.project_create('anvil-nextgen') 55 | network_create_simple('spider-web', 'anvil-nextgen') 56 | api.headnode_create('hn-0', 'anvil-nextgen', 'base-headnode') 57 | api.headnode_create_hnic('hn-0', 'hnic-0') 58 | api.headnode_connect_network('hn-0', 'hnic-0', 'spider-web') 59 | if have_dry_run(): 60 | pytest.xfail("Running in dry-run mode; can't talk to libvirt.") 61 | assert json.loads(api.show_headnode('hn-0'))['vncport'] is None 62 | api.headnode_start('hn-0') 63 | assert json.loads(api.show_headnode('hn-0'))['vncport'] is not None 64 | api.headnode_stop('hn-0') 65 | api.headnode_delete('hn-0') 66 | 67 | def test_headnode_deletion_while_running(self): 68 | """Test deleting a headnode while it's running.""" 69 | api.project_create('anvil-nextgen') 70 | api.headnode_create('hn-0', 'anvil-nextgen', 'base-headnode-2') 71 | api.headnode_start('hn-0') 72 | api.headnode_delete('hn-0') 73 | -------------------------------------------------------------------------------- /tests/stress.py: -------------------------------------------------------------------------------- 1 | """Stress tests. 2 | 3 | Tests here are catch problems like resource leaks, that only become apparent 4 | after a certain amount of use. 5 | """ 6 | 7 | from hil.test_common import config_testsuite, fresh_database, \ 8 | fail_on_log_warnings, server_init 9 | from hil import api, config, rest 10 | 11 | import json 12 | import pytest 13 | 14 | 15 | @pytest.fixture 16 | def configure(): 17 | """Configure HIL""" 18 | config_testsuite() 19 | config.load_extensions() 20 | 21 | 22 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 23 | fresh_database = pytest.fixture(fresh_database) 24 | server_init = pytest.fixture(server_init) 25 | 26 | 27 | pytestmark = pytest.mark.usefixtures('configure', 28 | 'fresh_database', 29 | 'server_init') 30 | 31 | 32 | def test_many_http_queries(): 33 | """Put a few objects in the db, then bombard the api with queries. 34 | 35 | This is intended to shake out problems like the resource leak discussed 36 | in issue #454. 37 | """ 38 | # NOTE: Now that the session is managed by Flask-SQLAlchemy, failures here 39 | # are unlikely to be regressions of the issue that #454 fixed; we're no 40 | # longer managing the lifecycle of the session ourselves. It's not obvious 41 | # that this is more than clutter now, but let's not be too trigger happy 42 | # about deleting tests. 43 | with rest.app.test_request_context(): 44 | rest.init_auth() 45 | api.node_register( 46 | node='node-99', 47 | obmd={ 48 | 'uri': 'http://obmd.example.com/nodes/node-99', 49 | 'admin_token': 'secret', 50 | }, 51 | ) 52 | api.node_register( 53 | node='node-98', 54 | obmd={ 55 | 'uri': 'http://obmd.example.com/nodes/node-98', 56 | 'admin_token': 'secret', 57 | }, 58 | ) 59 | api.node_register( 60 | node='node-97', 61 | obmd={ 62 | 'uri': 'http://obmd.example.com/nodes/node-97', 63 | 'admin_token': 'secret', 64 | }, 65 | ) 66 | api.node_register_nic('node-99', 'eth0', 'DE:AD:BE:EF:20:14') 67 | api.node_register_nic('node-98', 'eth0', 'DE:AD:BE:EF:20:15') 68 | api.node_register_nic('node-97', 'eth0', 'DE:AD:BE:EF:20:16') 69 | api.project_create('anvil-nextgen') 70 | api.project_create('anvil-legacy') 71 | api.project_connect_node('anvil-nextgen', 'node-99') 72 | api.project_connect_node('anvil-legacy', 'node-98') 73 | 74 | client = rest.app.test_client() 75 | 76 | def _show_nodes(path): 77 | """Helper for the loop below. 78 | 79 | This does a GET on path, which must return a json list of names of 80 | nodes. It will then query the state of each node. If any request does 81 | not return 200 or has a body which is not valid json, the test will 82 | fail. 83 | """ 84 | resp = client.get(path) 85 | assert resp.status_code == 200 86 | for node in json.loads(resp.get_data()): 87 | resp = client.get('/v0/nodes/%s' % node) 88 | assert resp.status_code == 200 89 | # At least make sure the body parses: 90 | json.loads(resp.get_data()) 91 | 92 | for _i in range(100): 93 | _show_nodes('/v0/nodes/free') 94 | resp = client.get('/v0/projects') 95 | assert resp.status_code == 200 96 | for project in json.loads(resp.get_data()): 97 | _show_nodes('/v0/project/%s/nodes' % project) 98 | -------------------------------------------------------------------------------- /tests/unit/api/maintenance-pool.py: -------------------------------------------------------------------------------- 1 | """Tests related to the maintenance pool.""" 2 | from hil import model, config, api 3 | from hil.test_common import config_testsuite, config_merge, fresh_database, \ 4 | fail_on_log_warnings, additional_db, with_request_context, \ 5 | server_init, LoggedWarningError 6 | from hil.auth import get_auth_backend 7 | import pytest 8 | 9 | OBM_TYPE_MOCK = 'http://schema.massopencloud.org/haas/v0/obm/mock' 10 | 11 | 12 | @pytest.fixture 13 | def configure(): 14 | """Configure HIL""" 15 | config_testsuite() 16 | config_merge({ 17 | 'auth': { 18 | 'require_authentication': 'False', 19 | }, 20 | 'extensions': { 21 | 'hil.ext.auth.null': None, 22 | 'hil.ext.auth.mock': '', 23 | 'hil.ext.switches.mock': '', 24 | 'hil.ext.network_allocators.null': None, 25 | 'hil.ext.network_allocators.vlan_pool': '', 26 | }, 27 | 'hil.ext.network_allocators.vlan_pool': { 28 | 'vlans': '40-80', 29 | }, 30 | 'maintenance': { 31 | 'maintenance_project': 'maintenance', 32 | # Keystone url acts as dummy for posting 33 | 'url': 'http://127.0.0.1:9999/test/' 34 | } 35 | }) 36 | config.load_extensions() 37 | 38 | 39 | fresh_database = pytest.fixture(fresh_database) 40 | additional_database = pytest.fixture(additional_db) 41 | fail_on_log_warnings = pytest.fixture(fail_on_log_warnings) 42 | server_init = pytest.fixture(server_init) 43 | 44 | 45 | with_request_context = pytest.yield_fixture(with_request_context) 46 | 47 | 48 | @pytest.fixture 49 | def set_admin_auth(): 50 | """Set admin auth for all calls""" 51 | get_auth_backend().set_admin(True) 52 | 53 | 54 | @pytest.fixture 55 | def maintenance_proj_init(): 56 | """Create maintenance project.""" 57 | api.project_create('maintenance') 58 | 59 | 60 | def new_node(name): 61 | """Create a mock node named ``name``""" 62 | api.node_register( 63 | node=name, 64 | obm={ 65 | "type": OBM_TYPE_MOCK, 66 | "host": "ipmihost", 67 | "user": "root", 68 | "password": "tapeworm", 69 | }, 70 | obmd={ 71 | 'uri': 'http://obmd.example.com/node/' + name, 72 | 'admin_token': 'secret', 73 | }, 74 | ) 75 | 76 | 77 | default_fixtures = ['fail_on_log_warnings', 78 | 'configure', 79 | 'fresh_database', 80 | 'server_init', 81 | 'with_request_context', 82 | 'set_admin_auth'] 83 | 84 | pytestmark = pytest.mark.usefixtures(*default_fixtures) 85 | 86 | 87 | class TestProjectDetachNodeMaintenance: 88 | """Test project_detach_node with maintenance pool enabled. 89 | The main point of this test is to ensure that the node goes 90 | into the maintenance pool if it is not already, and that the 91 | POST request is successfully detected with an intentional error.""" 92 | 93 | def test_project_detach_node_maintenance(self, maintenance_proj_init): 94 | """Test that project_detach_node removes the node from the project. 95 | Note that the maintenance server has a fake url. We expect it to 96 | fail during the connection.""" 97 | api.project_create('anvil-nextgen') 98 | new_node('node-99') 99 | api.project_connect_node('anvil-nextgen', 'node-99') 100 | # Should raise error due to arbitrary POST url: 101 | with pytest.raises(LoggedWarningError): 102 | api.project_detach_node('anvil-nextgen', 'node-99') 103 | maintenance_proj = api.get_or_404(model.Project, 'maintenance') 104 | node = api.get_or_404(model.Node, 'node-99') 105 | assert node.project == maintenance_proj 106 | -------------------------------------------------------------------------------- /tests/unit/class_resolver.py: -------------------------------------------------------------------------------- 1 | """Test the hil.class_resolver module.""" 2 | import hil 3 | from hil.class_resolver import concrete_class_for, build_class_map_for 4 | from hil import model 5 | from hil.test_common import fail_on_log_warnings 6 | import pytest 7 | 8 | mockapi_name = 'http://schema.massopencloud.org/haas/v0/' 9 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 10 | 11 | 12 | @pytest.fixture(autouse=True) 13 | def mock_extensions(): 14 | """Import the mock drivers. 15 | 16 | Just used for the side-effect of registering the subclasses. 17 | """ 18 | # pylint: disable=unused-variable 19 | import hil.ext.switches.mock 20 | 21 | 22 | class Food(object): 23 | """A superclass to test with class_resolver""" 24 | 25 | 26 | class Apple(Food): 27 | """A subclass to test with class_resolver""" 28 | api_name = 'apple' 29 | 30 | 31 | class Orange(Food): 32 | """A subclass to test with class_resolver""" 33 | api_name = 'orange' 34 | 35 | 36 | class Drink(object): 37 | """A superclass to test with class_resolver""" 38 | 39 | 40 | class Juice(Drink): 41 | """A subclass to test with class_resolver""" 42 | 43 | 44 | class OrangeJuice(Juice): 45 | """A subclass to test with class_resolver""" 46 | api_name = 'orange' 47 | 48 | 49 | class _AppleJuice(Juice): 50 | """A subclass with no api_name, to test with class_resolver""" 51 | # _AppleJuice is an implementation detail; we don't give it 52 | # an ``api_name`` because we don't want to expose it to users... for some 53 | # reason. 54 | 55 | 56 | class GrapeJuice(Juice): 57 | """A subclass to test with class_resolver""" 58 | api_name = 'grape' 59 | 60 | 61 | def test_class_resolver(): 62 | """Test class_resolver with our test classes, above.""" 63 | build_class_map_for(Food) 64 | build_class_map_for(Drink) 65 | 66 | assert concrete_class_for(Food, 'apple') is Apple 67 | assert concrete_class_for(Food, 'orange') is Orange 68 | assert concrete_class_for(Food, 'grape') is None 69 | assert concrete_class_for(Drink, 'apple') is None 70 | assert concrete_class_for(Drink, 'orange') is OrangeJuice 71 | assert concrete_class_for(Drink, 'grape') is GrapeJuice 72 | 73 | 74 | def test_class_Switch(): 75 | """Test class_resolver with MockSwitch""" 76 | build_class_map_for(model.Switch) 77 | assert concrete_class_for(model.Switch, mockapi_name + "switches/mock") \ 78 | is hil.ext.switches.mock.MockSwitch 79 | -------------------------------------------------------------------------------- /tests/unit/cli.py: -------------------------------------------------------------------------------- 1 | """Tests for the command line tools. 2 | 3 | Note that this is not just `hil.cli`, but `hil.command` as well. 4 | """ 5 | import pytest 6 | import tempfile 7 | import os 8 | import signal 9 | from subprocess import check_call, check_output, Popen, CalledProcessError, \ 10 | STDOUT 11 | from time import sleep 12 | from hil.test_common import fail_on_log_warnings 13 | 14 | 15 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 16 | 17 | 18 | @pytest.fixture(autouse=True) 19 | def make_config(request): 20 | """Generate a temporary config file.""" 21 | tmpdir = tempfile.mkdtemp() 22 | cwd = os.getcwd() 23 | os.chdir(tmpdir) 24 | with open('hil.cfg', 'w') as f: 25 | # We need to make sure the database ends up in the tmpdir directory, 26 | # and Flask-SQLAlchemy doesn't seem to want to do relative paths, so 27 | # we can't just do a big string literal. 28 | config = '\n'.join([ 29 | '[client]', 30 | '[headnode]', 31 | 'trunk_nic = eth0', 32 | 'base_imgs = base-headnode, img1, img2, img3, img4', 33 | 'libvirt_endpoint = qemu:///system', 34 | '[database]', 35 | 'uri = sqlite:///%s/hil.db' % tmpdir, 36 | '[extensions]', 37 | 'hil.ext.auth.null =', 38 | 'hil.ext.network_allocators.null =', 39 | ]) 40 | f.write(config) 41 | os.chmod('hil.cfg', 0o600) 42 | 43 | def cleanup(): 44 | """Remove the config file, database, and temp dir.""" 45 | os.remove('hil.cfg') 46 | os.remove('hil.db') 47 | os.chdir(cwd) 48 | os.rmdir(tmpdir) 49 | 50 | request.addfinalizer(cleanup) 51 | 52 | 53 | def test_db_create(): 54 | """Create the database via the cli.""" 55 | check_call(['hil-admin', 'db', 'create']) 56 | 57 | 58 | def runs_for_seconds(cmd, seconds=1): 59 | """Test if the command ``cmd`` runs for at least ``seconds`` seconds. 60 | 61 | ``cmd`` is a list containing the name of a command and its arguments. 62 | 63 | ``seconds`` is a number of seconds (by default 1). 64 | 65 | ``run_for_seconds`` will execute ``cmd``, wait for ``seconds`` seconds, 66 | send SIGTERM to the process, and then wait() for it. If the exit status 67 | indicates that it stopped for any reason other than SIGTERM, 68 | ``run_for_seconds`` returns False, otherwise it returns True. 69 | 70 | This is useful to check that a server process does not immediately die on 71 | startup, though it's a bit of a hack --- checking rigorously would require 72 | extra knowledge of the workings of that process (hooray for the halting 73 | problem). 74 | """ 75 | proc = Popen(cmd) 76 | sleep(seconds) 77 | proc.terminate() 78 | status = proc.wait() 79 | return status == -signal.SIGTERM 80 | 81 | 82 | def test_run_dev_server(): 83 | """Check that hil-admin run_dev_server doesn't immediately die.""" 84 | check_call(['hil-admin', 'db', 'create']) 85 | assert runs_for_seconds( 86 | ['hil-admin', 'run-dev-server', '--port', '5000'], seconds=1) 87 | 88 | 89 | def test_serve_networks(): 90 | """Check that hil-admin serve-networks doesn't immediately die.""" 91 | check_call(['hil-admin', 'db', 'create']) 92 | assert runs_for_seconds(['hil-admin', 'serve-networks'], seconds=1) 93 | 94 | 95 | @pytest.mark.parametrize('command', [ 96 | ['hil-admin', 'run-dev-server', '--port', '5000'], 97 | ['hil-admin', 'serve-networks'], 98 | ]) 99 | def test_db_init_error(command): 100 | """Test that a command fails if the database has not been created.""" 101 | try: 102 | check_output(command, stderr=STDOUT) 103 | assert False, 'Should have failed, but exited successfully.' 104 | except CalledProcessError as e: 105 | assert 'Database schema is not initialized' in e.output, ( 106 | 'Should have printed an error re: database initialization, ' 107 | 'but printed %r' % e.output 108 | ) 109 | -------------------------------------------------------------------------------- /tests/unit/client_unit.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the client library.""" 2 | 3 | import flask 4 | import pytest 5 | from schema import Schema 6 | 7 | from hil import config, rest 8 | from hil.client.base import FailedAPICallException 9 | from hil.client.client import Client 10 | from hil.test_common import HybridHTTPClient, fail_on_log_warnings, \ 11 | server_init, config_testsuite 12 | 13 | fail_on_log_warnings = pytest.fixture(fail_on_log_warnings) 14 | server_init = pytest.fixture(server_init) 15 | 16 | 17 | @pytest.fixture() 18 | def configure(): 19 | """Fixture to load the HIL config.""" 20 | config_testsuite() 21 | config.load_extensions() 22 | 23 | 24 | pytestmark = pytest.mark.usefixtures('fail_on_log_warnings', 25 | 'configure', 26 | 'server_init') 27 | 28 | 29 | def test_non_json_response(): 30 | """The client library should raise an error when the response body is 31 | unexpectedly not JSON. 32 | """ 33 | # Endpoint is arbitrary: 34 | endpoint = 'http:/127.0.0.1:9933' 35 | client = Client(endpoint, HybridHTTPClient(endpoint)) 36 | 37 | # Override one of the API calls with a different implementation: 38 | # pylint: disable=unused-variable 39 | @rest.rest_call('GET', '/nodes/free', Schema({})) 40 | def list_free_nodes(): 41 | """Mock API call for testing; always raises an error.""" 42 | flask.abort(500) 43 | 44 | try: 45 | client.node.list('free') 46 | assert False, 'Client library did not report an error!' 47 | except FailedAPICallException as e: 48 | # Make sure it's the right error: 49 | assert e.error_type == 'unknown', 'Wrong error type.' 50 | -------------------------------------------------------------------------------- /tests/unit/dev_support.py: -------------------------------------------------------------------------------- 1 | """Test the hil.dev_support module.""" 2 | 3 | from hil.dev_support import no_dry_run 4 | import pytest 5 | from hil.test_common import fail_on_log_warnings, config_merge 6 | 7 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 8 | 9 | 10 | # We test two ways of using the decorator: applying it to a freestanding 11 | # function, and applying it to an instance method. 12 | 13 | 14 | def _function(): 15 | """Helper which uses no_dry_run on a plain function.""" 16 | @no_dry_run 17 | def func(): 18 | """Assert false, so we can check if the function is called.""" 19 | assert False 20 | func() 21 | 22 | 23 | def _method(): 24 | """Helper which uses no_dry_run on a method.""" 25 | class Cls: 26 | """Test class to carry the method.""" 27 | 28 | @no_dry_run 29 | def method(self): 30 | """Assert false, so we can check if the method is called.""" 31 | assert False 32 | 33 | obj = Cls() 34 | obj.method() 35 | 36 | 37 | # We test the decorator both with the option enabled and with it disabled. 38 | def _dry(func): 39 | """Call ``func`` with dry_run enabled.""" 40 | config_merge({'devel': {'dry_run': 'True'}}) 41 | func() 42 | 43 | 44 | def _wet(func): 45 | """Call ``func`` with dry_run disabled.""" 46 | config_merge({'devel': {'dry_run': None}}) 47 | with pytest.raises(AssertionError): 48 | func() 49 | 50 | 51 | # Actual test cases: 52 | def test_dry_function(): 53 | """Test dry_run enabled on a function.""" 54 | _dry(_function) 55 | 56 | 57 | def test_wet_function(): 58 | """Test dry_run disabled on a function.""" 59 | _wet(_function) 60 | 61 | 62 | def test_dry_method(): 63 | """Test dry_run enabled on a method.""" 64 | _dry(_method) 65 | 66 | 67 | def test_wet_method(): 68 | """Test dry_run disabled on a method.""" 69 | _wet(_method) 70 | -------------------------------------------------------------------------------- /tests/unit/ext/auth/mock.py: -------------------------------------------------------------------------------- 1 | """Test the mock auth backend""" 2 | from hil import config 3 | from hil.auth import get_auth_backend 4 | from hil.errors import AuthorizationError 5 | from hil.model import db, Project 6 | from hil.test_common import config_testsuite, config_merge, fresh_database, \ 7 | with_request_context, fail_on_log_warnings, server_init 8 | import pytest 9 | 10 | 11 | @pytest.fixture 12 | def configure(): 13 | """Configure HIL""" 14 | config_testsuite() 15 | config_merge({ 16 | 'extensions': { 17 | 'hil.ext.auth.mock': '', 18 | 'hil.ext.auth.null': None, 19 | }, 20 | }) 21 | config.load_extensions() 22 | 23 | 24 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 25 | fresh_database = pytest.fixture(fresh_database) 26 | server_init = pytest.fixture(server_init) 27 | with_request_context = pytest.yield_fixture(with_request_context) 28 | 29 | 30 | @pytest.fixture 31 | def load_projects(): 32 | """Add a couple probjects to the database for us to work with""" 33 | db.session.add(Project("manhattan")) 34 | db.session.add(Project("runway")) 35 | 36 | 37 | @pytest.fixture 38 | def auth_backend(): 39 | """Fixture returning the auth backend""" 40 | return get_auth_backend() 41 | 42 | 43 | pytestmark = pytest.mark.usefixtures('configure', 44 | 'fresh_database', 45 | 'server_init', 46 | 'with_request_context', 47 | 'load_projects') 48 | 49 | 50 | def test_default_no_admin(auth_backend): 51 | """By default, admin access should be denied.""" 52 | with pytest.raises(AuthorizationError): 53 | auth_backend.require_admin() 54 | 55 | 56 | def test_default_no_project(auth_backend): 57 | """By default, access to an arbitrary project should be denied.""" 58 | with pytest.raises(AuthorizationError): 59 | auth_backend\ 60 | .require_project_access(Project.query.first()) 61 | 62 | 63 | def test_set_admin(auth_backend): 64 | """Setting the admin status should affect require_admin.""" 65 | auth_backend.set_admin(True) 66 | auth_backend.require_admin() 67 | auth_backend.set_admin(False) 68 | with pytest.raises(AuthorizationError): 69 | auth_backend.require_admin() 70 | 71 | 72 | def test_set_project_access(auth_backend): 73 | """Setting the project should affect require_project_access.""" 74 | runway = Project.query.filter_by(label="runway").one() 75 | manhattan = Project.query.filter_by(label="manhattan").one() 76 | auth_backend.set_project(runway) 77 | auth_backend.require_project_access(runway) 78 | auth_backend.set_project(manhattan) 79 | auth_backend.require_project_access(manhattan) 80 | with pytest.raises(AuthorizationError): 81 | auth_backend.require_project_access(runway) 82 | 83 | 84 | def test_admin_implies_project_access(auth_backend): 85 | """Admin access implies access to any project.""" 86 | runway = Project.query.filter_by(label="runway").one() 87 | manhattan = Project.query.filter_by(label="manhattan").one() 88 | auth_backend.set_admin(True) 89 | auth_backend.require_project_access(runway) 90 | auth_backend.require_project_access(manhattan) 91 | -------------------------------------------------------------------------------- /tests/unit/ext/switches/common.py: -------------------------------------------------------------------------------- 1 | """Unit tests for hil.ext.switches.common""" 2 | 3 | import pytest 4 | 5 | from hil import config 6 | from hil.test_common import config_testsuite, config_merge 7 | 8 | 9 | @pytest.fixture 10 | def configure(): 11 | """Configure HIL""" 12 | config_testsuite() 13 | config_merge({ 14 | 'auth': { 15 | 'require_authentication': 'True', 16 | }, 17 | 'extensions': { 18 | 'hil.ext.switches.brocade': '', 19 | 'hil.ext.switches.dell': '', 20 | }, 21 | 'hil.ext.switches.brocade': { 22 | 'save': 'True' 23 | }, 24 | 'hil.ext.switches.dell': { 25 | 'save': 'False' 26 | } 27 | }) 28 | config.load_extensions() 29 | 30 | 31 | def test_parse_vlans(): 32 | """Test parse_vlans""" 33 | # Have to import the method here, otherwise anything starting with 34 | # "hil.ext" pollutes the env. 35 | from hil.ext.switches.common import parse_vlans 36 | assert parse_vlans('12,14') == ['12', '14'] 37 | assert parse_vlans('20-22') == ['20', '21', '22'] 38 | assert parse_vlans('1512') == ['1512'] 39 | assert parse_vlans('12,21-24,250,511-514') == [ 40 | '12', '21', '22', '23', '24', '250', '511', '512', '513', '514'] 41 | 42 | 43 | def test_should_save(configure): 44 | """Test should save method""" 45 | from hil.ext.switches.brocade import Brocade 46 | from hil.ext.switches.dell import PowerConnect55xx 47 | from hil.ext.switches.common import should_save 48 | 49 | brocade = Brocade() 50 | dell = PowerConnect55xx() 51 | 52 | assert should_save(brocade) is True 53 | assert should_save(dell) is False 54 | -------------------------------------------------------------------------------- /tests/unit/hil_auth.py: -------------------------------------------------------------------------------- 1 | """Test general properties of the authentication framework. 2 | 3 | This module is called `hil_auth` because pytest is lousy about namespacing; 4 | if we call it `auth`, it chokes on the fact that there's a file `api/auth.py` 5 | as well. grr. 6 | """ 7 | import pytest 8 | from hil import config 9 | from hil.auth import get_auth_backend 10 | from hil.rest import app 11 | from hil.test_common import config_testsuite, config_merge, fresh_database, \ 12 | fail_on_log_warnings, server_init 13 | 14 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 15 | server_init = pytest.fixture(server_init) 16 | fresh_database = pytest.fixture(fresh_database) 17 | 18 | 19 | @pytest.fixture 20 | def configure(): 21 | """Configure HIL""" 22 | config_testsuite() 23 | config_merge({ 24 | 'extensions': { 25 | 'hil.ext.auth.mock': '', 26 | 27 | # This extension is enabled by default in the tests, so we need to 28 | # disable it explicitly: 29 | 'hil.ext.auth.null': None, 30 | 31 | 'hil.ext.switches.mock': '', 32 | }, 33 | }) 34 | config.load_extensions() 35 | 36 | 37 | pytestmark = pytest.mark.usefixtures('configure', 38 | 'fresh_database', 39 | 'server_init') 40 | 41 | 42 | def test_require_auth(): 43 | """require_authenticate=True should deny calls with "no special access." 44 | 45 | This is the default setting. We use list_nodes free as an example here. 46 | """ 47 | auth_backend = get_auth_backend() 48 | auth_backend.set_auth_success(False) 49 | client = app.test_client() 50 | resp = client.get('/v0/node/free') 51 | assert resp.status_code == 401 52 | -------------------------------------------------------------------------------- /tests/unit/model.py: -------------------------------------------------------------------------------- 1 | """Functional tests for model.py""" 2 | 3 | # Some Notes: 4 | # 5 | # * We don't really have any agreed-upon requirements about what __repr__ 6 | # should print, but I'm fairly certain I hit an argument mistmatch at 7 | # some point, which is definitely wrong. The test_repr methods are there just 8 | # to make sure it isn't throwing an exception. 9 | 10 | from hil.model import Node, Nic, Project, Headnode, Hnic, Network, \ 11 | NetworkingAction, Metadata 12 | from hil import config 13 | 14 | from hil.test_common import fresh_database, config_testsuite, ModelTest, \ 15 | fail_on_log_warnings 16 | import pytest 17 | 18 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 19 | 20 | 21 | @pytest.fixture 22 | def configure(): 23 | """Configure HIL.""" 24 | config_testsuite() 25 | config.load_extensions() 26 | 27 | 28 | fresh_database = pytest.fixture(fresh_database) 29 | 30 | 31 | pytestmark = pytest.mark.usefixtures('configure', 'fresh_database') 32 | 33 | 34 | def _test_node(): 35 | """Generate a test node. 36 | 37 | In addition to `TestNode`, several of the other objects require a node, 38 | so we re-use the same one. 39 | """ 40 | return Node(label='node-99', 41 | obmd_uri='https://obmd.example.com/node/node-99', 42 | # arbitrary: 43 | obmd_admin_token='b6a8a4e183fe26936efcd386e938cbfb') 44 | 45 | 46 | class TestNode(ModelTest): 47 | """ModelTest for Node objects.""" 48 | 49 | def sample_obj(self): 50 | return _test_node() 51 | 52 | 53 | class TestNic(ModelTest): 54 | """ModelTest for Nic objects.""" 55 | 56 | def sample_obj(self): 57 | return Nic(_test_node(), 'ipmi', '00:11:22:33:44:55') 58 | 59 | 60 | class TestProject(ModelTest): 61 | """ModelTest for Project objects.""" 62 | 63 | def sample_obj(self): 64 | return Project('manhattan') 65 | 66 | 67 | class TestHeadnode(ModelTest): 68 | """ModelTest for Headnode objects.""" 69 | 70 | def sample_obj(self): 71 | return Headnode(Project('anvil-nextgen'), 72 | 'hn-example', 'base-headnode') 73 | 74 | 75 | class TestHnic(ModelTest): 76 | """ModelTest for Hnic objects.""" 77 | 78 | def sample_obj(self): 79 | return Hnic(Headnode(Project('anvil-nextgen'), 80 | 'hn-0', 'base-headnode'), 81 | 'storage') 82 | 83 | 84 | class TestNetwork(ModelTest): 85 | """ModelTest for Network objects.""" 86 | 87 | def sample_obj(self): 88 | pj = Project('anvil-nextgen') 89 | return Network(pj, [pj], True, '102', 'hammernet') 90 | 91 | 92 | class TestMetadata(ModelTest): 93 | """ModelTest for Metadata objects.""" 94 | 95 | def sample_obj(self): 96 | return Metadata('EK', 'pk', _test_node()) 97 | 98 | 99 | class TestNetworkingAction(ModelTest): 100 | """ModelTest for NetworkingAction objects.""" 101 | 102 | def sample_obj(self): 103 | nic = Nic(_test_node(), 'ipmi', '00:11:22:33:44:55') 104 | project = Project('anvil-nextgen') 105 | network = Network(project, [project], True, '102', 'hammernet') 106 | return NetworkingAction(nic=nic, 107 | new_network=network, 108 | channel='null') 109 | -------------------------------------------------------------------------------- /tests/unit/test_common.py: -------------------------------------------------------------------------------- 1 | """Self-tests for the test_common module.""" 2 | 3 | 4 | import pytest 5 | import logging 6 | from hil.test_common import fail_on_log_warnings, LoggedWarningError 7 | 8 | fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings) 9 | 10 | hil_logger_names = ['hil', 'hil.rest', 'hil.foobar'] 11 | non_hil_logger_names = ['foo', 'bar', 'quux', 'argparse'] 12 | 13 | 14 | @pytest.mark.parametrize('level,loggername', [ 15 | (level, loggername) 16 | for level in ['warn', 'error', 'critical'] 17 | for loggername in ['hil', 'hil.rest', 'hil.foobar'] 18 | ]) 19 | def test_should_raise(level, loggername): 20 | """Raise an exception if hil logs at or above warning level.""" 21 | logger = logging.getLogger(loggername) 22 | logfn = getattr(logger, level) 23 | with pytest.raises(LoggedWarningError): 24 | logfn('Something bad happened!') 25 | 26 | 27 | @pytest.mark.parametrize('level,loggername', [ 28 | (level, loggername) 29 | for level in ['debug', 'info'] 30 | for loggername in hil_logger_names 31 | ]) 32 | def test_no_raise_low_leve(level, loggername): 33 | """Don't raise an exception at info level or lower.""" 34 | logger = logging.getLogger(loggername) 35 | logfn = getattr(logger, level) 36 | logfn('Nothing to worry about') 37 | 38 | 39 | @pytest.mark.parametrize('level,loggername', [ 40 | (level, loggername) 41 | for level in ['debug', 'info', 'warn', 'error', 'critical'] 42 | for loggername in non_hil_logger_names 43 | ]) 44 | def test_no_raise_non_hil(level, loggername): 45 | """Don't raise an exception if some non-hil library logs warnings.""" 46 | logger = logging.getLogger(loggername) 47 | logfn = getattr(logger, level) 48 | logfn("Somebody else's bug.") 49 | 50 | 51 | def test_dont_pollute_other_tests_extensions(): 52 | """Regression test for #697.""" 53 | import sys 54 | for name in sys.modules.keys(): 55 | assert not name.startswith('hil.ext.'), ( 56 | "No extensions should have been loaded, but %r was. This may " 57 | "mean that tests are polluting each others' set of loaded " 58 | "extensions." % name 59 | ) 60 | --------------------------------------------------------------------------------