├── .gitignore ├── .gitreview ├── .zuul.yaml ├── LICENSE ├── README.rst ├── doc ├── requirements.txt └── source │ ├── conf.py │ ├── index.rst │ └── specs ├── requirements.txt ├── setup.cfg ├── setup.py ├── specs ├── antelope │ └── separated-haproxy-service-config.rst ├── kilo │ ├── add-plumgrid-neutron-plugin.rst │ ├── ceph-block-devices.rst │ ├── developer-docs.rst │ ├── implement-ceilometer.rst │ ├── keystone-federation.rst │ ├── keystone-sp-adfs-idp.rst │ ├── master-kilofication.rst │ ├── minimal-kilo.rst │ ├── modularize-neutron-plays.rst │ └── multi-region-swift.rst ├── liberty │ ├── compartementalize-rabbitmq.rst │ ├── enable-venv-support-within-the-roles.rst │ ├── liberty-release.rst │ ├── modularize-neutron-liberty.rst │ ├── named-veths.rst │ ├── plumgrid-support-liberty.rst │ ├── remove-upstream-repo-dependency.rst │ ├── role-haproxy-v2.rst │ ├── tunable-openstack-configuration.rst │ └── upgrade-mariadb-v10.rst ├── mitaka │ ├── build-facts-archive.rst │ ├── convert-aio-bootstrap-to-ansible.rst │ ├── independent-role-repositories.rst │ ├── install-guide.rst │ ├── irr-apt_package_pinning.rst │ ├── irr-galera.rst │ ├── irr-lxc_container_create.rst │ ├── irr-lxc_host.rst │ ├── irr-memcached_server.rst │ ├── irr-openstack_hosts.rst │ ├── irr-pip_install.rst │ ├── irr-pip_lock_down.rst │ ├── irr-rabbitmq.rst │ ├── irr-repo_server.rst │ ├── irr-rsyslog_client.rst │ ├── irr-rsyslog_server.rst │ ├── irr-utility.rst │ ├── lbaasv2.rst │ ├── limit-mysql-config-distribution.rst │ ├── modularize-config.rst │ ├── policy-files-distribution.rst │ ├── role-designate.rst │ ├── role-ironic.rst │ ├── role-zaqar.rst │ └── security-hardening.rst ├── newton │ ├── add-support-for-systemd.rst │ ├── gate-split.rst │ ├── ipv6-project-support.rst │ ├── monasca-ha-monasca-agent.rst │ ├── multi-arch-support.rst │ ├── only-install-venvs.rst │ ├── osa-install-guide-overhaul.rst │ ├── powervm-virt-driver.rst │ ├── role-gnocchi.rst │ ├── role-tacker.rst │ ├── security-rhel7-stig.rst │ ├── standalone-swift.rst │ ├── support-multiple-rabbitmq-clusters.rst │ └── xen-virt-driver.rst ├── ocata │ ├── .keep │ ├── create-ops-guide.rst │ └── octavia.rst ├── pike │ ├── centos-and-dnf.rst │ ├── inventory-pluggable-backends.rst │ ├── monitoring.rst │ ├── opendaylight.rst │ ├── ovs-nsh.rst │ └── replace-ip-generation.rst ├── queens │ ├── ansible-roles-reuse.rst │ ├── blazar.rst │ ├── congress.rst │ ├── deployment-stages.rst │ ├── docs-improvements.rst │ ├── elk-stack.rst │ ├── hybrid-messaging.rst │ ├── hyperconverged-containers.rst │ ├── opendaylight-bgpvpn.rst │ ├── python-build-install-simplification.rst │ └── restructure-repo-management-and-pip-install.yml ├── rocky │ ├── centralized-nginx.rst │ ├── masakari.rst │ ├── openstack-distribution-packages.rst │ └── refactor-inventory.rst ├── templates │ └── template.rst ├── wallaby │ └── ssl-root-ca.rst ├── xena │ └── protecting-plaintext-configs.rst └── zed │ └── internal-tls.rst └── tox.ini /.gitignore: -------------------------------------------------------------------------------- 1 | # Add patterns in here to exclude files created by tools integrated with this 2 | # repository, such as test frameworks from the project's recommended workflow, 3 | # rendered documentation and package builds. 4 | # 5 | # Don't add patterns to exclude files created by preferred personal tools 6 | # (editors, IDEs, your operating system itself even). These should instead be 7 | # maintained outside the repository, for example in a ~/.gitignore file added 8 | # with: 9 | # 10 | # git config --global core.excludesfile '~/.gitignore' 11 | 12 | # Compiled source # 13 | ################### 14 | *.com 15 | *.class 16 | *.dll 17 | *.exe 18 | *.o 19 | *.so 20 | *.pyc 21 | build/ 22 | dist/ 23 | doc/build/ 24 | 25 | # Packages # 26 | ############ 27 | # it's better to unpack these files and commit the raw source 28 | # git has its own built in compression methods 29 | *.7z 30 | *.dmg 31 | *.gz 32 | *.iso 33 | *.jar 34 | *.rar 35 | *.tar 36 | *.zip 37 | 38 | # Logs and databases # 39 | ###################### 40 | *.log 41 | *.sql 42 | *.sqlite 43 | logs/* 44 | 45 | # OS generated files # 46 | ###################### 47 | ._* 48 | .tox 49 | *.egg-info 50 | .eggs 51 | 52 | # Generated by pbr while building docs 53 | ###################################### 54 | AUTHORS 55 | ChangeLog 56 | 57 | # Files created by releasenotes build 58 | releasenotes/build 59 | 60 | # Test temp files 61 | tests/common 62 | tests/*.retry 63 | 64 | # Vagrant artifacts 65 | .vagrant 66 | 67 | # Git clones 68 | openstack-ansible-ops 69 | previous 70 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=review.opendev.org 3 | port=29418 4 | project=openstack/openstack-ansible-specs.git 5 | -------------------------------------------------------------------------------- /.zuul.yaml: -------------------------------------------------------------------------------- 1 | - project: 2 | templates: 3 | - openstack-specs-jobs 4 | - check-requirements 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | This work is licensed under a Creative Commons Attribution 3.0 Unported License. 2 | 3 | http://creativecommons.org/licenses/by/3.0/legalcode 4 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | =========================================== 2 | Design Specifications for OpenStack-Ansible 3 | =========================================== 4 | 5 | This repository is used to hold approved design specifications for additions 6 | to the OpenStack-Ansible project. Reviews of the specs are done in gerrit, 7 | using a similar workflow to how we review and merge changes to the code itself. 8 | 9 | The layout of this repository is:: 10 | 11 | specs// 12 | 13 | You can find an example spec in `specs/templates/template.rst`. 14 | 15 | Specifications are proposed for a given release by adding them to the 16 | `specs/` directory and posting it for review. The implementation 17 | status of a blueprint for a given release can be found by looking at the 18 | blueprint in Launchpad. Not all approved blueprints will get fully implemented. 19 | 20 | Specifications have to be re-proposed for every release. The review may be 21 | quick, but even if something was previously approved, it should be re-reviewed 22 | to make sure it still makes sense as written. 23 | 24 | Prior to the Juno development cycle, this repository was not used for spec 25 | reviews. Reviews prior to Juno were completed entirely through Launchpad 26 | blueprints:: 27 | 28 | https://blueprints.launchpad.net/openstack-ansible 29 | 30 | Please note, Launchpad blueprints are still used for tracking the 31 | current status of blueprints. For more information, see:: 32 | 33 | https://wiki.openstack.org/wiki/Blueprints 34 | 35 | For more information about working with gerrit, see:: 36 | 37 | https://docs.opendev.org/opendev/infra-manual/latest/developers.html#development-workflow 38 | 39 | To validate that the specification is syntactically correct (i.e. get more 40 | confidence in the Jenkins result), please execute the following command:: 41 | 42 | $ tox 43 | 44 | After running ``tox``, the documentation will be available for viewing in HTML 45 | format in the ``doc/build/`` directory. 46 | -------------------------------------------------------------------------------- /doc/requirements.txt: -------------------------------------------------------------------------------- 1 | # The order of packages is significant, because pip processes them in the order 2 | # of appearance. Changing the order has an impact on the overall integration 3 | # process, which may cause wedges in the gate later. 4 | 5 | # WARNING: 6 | # This file is maintained in the openstack-ansible-tests repository. 7 | # https://opendev.org/openstack/openstack-ansible-tests/src/branch/master/sync/doc/requirements.txt 8 | # If you need to modify this file, update the one in the 9 | # openstack-ansible-tests repository. Once it merges there, the changes will 10 | # automatically be proposed to all the repositories which use it. 11 | 12 | sphinx>=2.0.0,!=2.1.0 # BSD 13 | sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD 14 | openstackdocstheme>=2.2.1 # Apache-2.0 15 | reno>=3.1.0 # Apache-2.0 16 | doc8>=0.6.0 # Apache-2.0 17 | -------------------------------------------------------------------------------- /doc/source/index.rst: -------------------------------------------------------------------------------- 1 | .. OpenStack-Ansible documentation master file 2 | 3 | OpenStack-Ansible Project Specifications 4 | ======================================== 5 | 6 | Spec Templates 7 | -------------- 8 | 9 | .. toctree:: 10 | :glob: 11 | :maxdepth: 1 12 | 13 | specs/templates/* 14 | 15 | Antelope Specifications 16 | ----------------------- 17 | 18 | .. toctree:: 19 | :glob: 20 | :maxdepth: 1 21 | 22 | specs/antelope/* 23 | 24 | Zed Specifications 25 | ------------------ 26 | 27 | .. toctree:: 28 | :glob: 29 | :maxdepth: 1 30 | 31 | specs/zed/* 32 | 33 | Xena Specifications 34 | ------------------- 35 | 36 | .. toctree:: 37 | :glob: 38 | :maxdepth: 1 39 | 40 | specs/xena/* 41 | 42 | Wallaby Specifications 43 | ---------------------- 44 | 45 | .. toctree:: 46 | :glob: 47 | :maxdepth: 1 48 | 49 | specs/wallaby/* 50 | 51 | Rocky Specifications 52 | -------------------- 53 | 54 | .. toctree:: 55 | :glob: 56 | :maxdepth: 1 57 | 58 | specs/rocky/* 59 | 60 | Queens Specifications 61 | --------------------- 62 | 63 | .. toctree:: 64 | :glob: 65 | :maxdepth: 1 66 | 67 | specs/queens/* 68 | 69 | Pike Specifications 70 | ------------------- 71 | 72 | .. toctree:: 73 | :glob: 74 | :maxdepth: 1 75 | 76 | specs/pike/* 77 | 78 | Ocata Specifications 79 | --------------------- 80 | 81 | .. toctree:: 82 | :glob: 83 | :maxdepth: 1 84 | 85 | specs/ocata/* 86 | 87 | Newton Specifications 88 | --------------------- 89 | 90 | .. toctree:: 91 | :glob: 92 | :maxdepth: 1 93 | 94 | specs/newton/* 95 | 96 | Mitaka Specifications 97 | --------------------- 98 | 99 | .. toctree:: 100 | :glob: 101 | :maxdepth: 1 102 | 103 | specs/mitaka/* 104 | 105 | Liberty Specifications 106 | ---------------------- 107 | 108 | .. toctree:: 109 | :glob: 110 | :maxdepth: 1 111 | 112 | specs/liberty/* 113 | 114 | Kilo Specifications 115 | ------------------- 116 | 117 | .. toctree:: 118 | :glob: 119 | :maxdepth: 1 120 | 121 | specs/kilo/* 122 | 123 | * :ref:`search` 124 | -------------------------------------------------------------------------------- /doc/source/specs: -------------------------------------------------------------------------------- 1 | ../../specs/ -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # The order of packages is significant, because pip processes them in the order 2 | # of appearance. Changing the order has an impact on the overall integration 3 | # process, which may cause wedges in the gate later. 4 | 5 | # this is required for the docs build jobs 6 | sphinx>=2.0.0,!=2.1.0 # BSD 7 | openstackdocstheme>=2.2.1 # Apache-2.0 8 | doc8>=0.6.0 # Apache-2.0 9 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | [metadata] 2 | name = openstack-ansible-specs 3 | summary = Design Specifications for OpenStack-Ansible 4 | description_file = 5 | README.rst 6 | author = OpenStack 7 | author_email = openstack-discuss@lists.openstack.org 8 | home_page = https://specs.openstack.org/openstack/openstack-ansible-specs/ 9 | classifier = 10 | Intended Audience :: Developers 11 | License :: OSI Approved :: Apache Software License 12 | Operating System :: POSIX :: Linux 13 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | import setuptools 17 | 18 | setuptools.setup( 19 | setup_requires=['pbr>=2.0.0'], 20 | pbr=True) 21 | -------------------------------------------------------------------------------- /specs/kilo/add-plumgrid-neutron-plugin.rst: -------------------------------------------------------------------------------- 1 | Add PLUMgrid plugin to neutron playbooks 2 | ######################################## 3 | :date: 2015-06-2 14:30 4 | :tags: neutron, plugins, networking 5 | 6 | This spec is propsed to insert the capability of using the PLUMgrid 7 | OpenStack Neutron Plugin through the os-ansible neutron playbooks. 8 | 9 | * https://blueprints.launchpad.net/openstack-ansible/+spec/add-plumgrid-neutron-plugin 10 | 11 | Problem Description 12 | =================== 13 | 14 | PLUMgrid is a core neutron networking plugin that has been a part of OpenStack 15 | neutron since Grizzly. It offers a Network Virtualization Platform that uses 16 | direct communication with the Hypervisor layer to provide all the networking 17 | functionality requested through Neutron APIs. The PLUMgrid Neutron Plugin 18 | implements Neutron v2 APIs and helps configure L2/L3 virtual networks 19 | created through the PLUMgrid Platform. It also implements External Networks 20 | and Port Binding Extensions. 21 | 22 | APIs supported by the PLUMgrid plugin: 23 | - Networks 24 | - Subnets 25 | - Ports 26 | - External Networks 27 | - Routers 28 | - Security Groups 29 | - Quotas 30 | - Port Binding 31 | - Provider Networks 32 | 33 | Proposed Change 34 | =============== 35 | 36 | This change is proposed to add the PLUMgrid plugin as a core plugin option 37 | alongside ml2, which will be the default. This configurability should already 38 | be achieved by the BP: modularize-neutron-plays. 39 | 40 | The rest of the installation for PLUMgrid that requires PLUMgrid Controller 41 | and Compute components, that enable management of the plugin, will be added 42 | externally through Ansible Galaxy roles. 43 | 44 | The changes described below assume the previously mentioned BP modularization 45 | changes in place. 46 | 47 | This feature is proposed for both kilo and juno branches, the juno change 48 | will be carried out first: 49 | 50 | 1. For juno, parameters relevant to the PLUMgrid plugin, namely the plumgrid 51 | core plugin and plugin config file, plumgrid.ini will be added to a new 52 | dictionary item in 'neutron_plugins' in 53 | inventory/group_vars/neutron_all.yml This will allow setting the 54 | 'neutron_plugin_type = plumgrid' if desired. 55 | 56 | 2. For kilo, parameters relevant to the PLUMgrid plugin will be added to a 57 | new dictionary item in 'neutron_plugins' in 58 | 'playbooks/roles/os_neutron/defaults/main.yml'. This will allow setting the 59 | 'neutron_plugin_type' to plumgrid if desired. 60 | 61 | Playbook Impact 62 | --------------- 63 | 64 | 1. In juno, the following files are expected to be modified: 65 | 66 | - rpc_deployment/playbooks/openstack/inventory/group_vars/neutron_all.yml 67 | 68 | The following templates will be created in neutron-common role: 69 | 70 | - rpc_deployment/roles/neutron_common/templates/plugins/plumgrid/plumgrid.ini 71 | - rpc_deployment/roles/neutron_common/templates/plugins/plumgrid/plumlib.ini 72 | - rpc_deployment/roles/neutron_common/templates/rootwrap.d/plumlib.filters 73 | 74 | 2. In kilo, these files are expected to be modified: 75 | 76 | - playbooks/roles/os_neutron/defaults/main.yml 77 | 78 | New templates will be added in the os_neutron role: 79 | 80 | - playbooks/roles/os_neutron/templates/plugins/plumgrid/plumgrid.ini 81 | - playbooks/roles/os_neutron/templates/plugins/plumgrid/plumlib.ini 82 | - playbooks/roles/os_neutron/files/rootwrap.d/plumlib.filters 83 | 84 | Upgrade impact 85 | -------------- 86 | 87 | None 88 | 89 | Alternatives 90 | ------------ 91 | 92 | To continue using the default ml2 and linuxbridge-agent neutron deployment 93 | with no possibility of other core neutron plugins. 94 | 95 | Security Impact 96 | --------------- 97 | 98 | N/A 99 | 100 | Performance Impact 101 | ------------------ 102 | 103 | This change is not expected to impact performance. A typical PLUMgrid plugin 104 | installation, will furthermore disable neutron agent installations. Hence the 105 | overall performance is expected to remain the same. 106 | 107 | End User Impact 108 | --------------- 109 | 110 | End users will be able to leverage the enhanced scale and operational capabilities 111 | provided by the PLUMgrid plugin when choosing to install this plugin. Further details 112 | can be found in the References section below. 113 | 114 | Deployer Impact 115 | --------------- 116 | 117 | This will provide Deployers with the option to use PLUMgrid as the neutron 118 | plugin. Upgrading from a previous release to use this new feature will only 119 | be possible through a re-run of the neutron playbooks as well. This change 120 | does not effect running instances within the cloud. 121 | 122 | Developer Impact 123 | ---------------- 124 | 125 | This change adds further installable options and as such does not 126 | effect the default flow of the playbooks. 127 | 128 | 129 | Dependencies 130 | ------------ 131 | 132 | None 133 | 134 | Implementation 135 | ============== 136 | 137 | Assignee(s) 138 | ----------- 139 | 140 | Primary assignee: 141 | https://launchpad.net/~javeria-ak 142 | 143 | Work items 144 | ---------- 145 | 146 | This change will use the modularized neutron playbooks to provide 147 | PLUMgrid as a plugin option. A set of three new template files will 148 | be added to the neutron plays to support plumgrid. 149 | 150 | Dependencies 151 | ------------ 152 | 153 | Dependent on: 154 | 155 | - https://review.openstack.org/184665 156 | - https://blueprints.launchpad.net/openstack-ansible/+spec/modularize-neutron-plays 157 | 158 | Testing 159 | ======= 160 | 161 | There are no additional changes required to test this in the current testing 162 | and or gating framework that also covers the neutron components. 163 | 164 | Documentation Impact 165 | ==================== 166 | 167 | Documentation describing how to modify the configuration parameters 168 | to install PLUMgrid will be required. This will be deployer documentation. 169 | 170 | References 171 | ========== 172 | 173 | * http://www.plumgrid.com/ 174 | 175 | * https://wiki.openstack.org/wiki/PLUMgrid-Neutron 176 | -------------------------------------------------------------------------------- /specs/kilo/ceph-block-devices.rst: -------------------------------------------------------------------------------- 1 | Ceph Block Devices 2 | #################### 3 | :date: 2015-07-23 12:00 4 | :tags: storage, ceph 5 | 6 | This spec is a proposal to add the ability to configure cinder, glance, and 7 | nova running in an openstack-ansible installation to use an existing Ceph 8 | cluster for the creation of volumes, images, and instances using Ceph block 9 | devices. 10 | 11 | * https://blueprints.launchpad.net/openstack-ansible/+spec/ceph-block-devices 12 | 13 | Problem description 14 | =================== 15 | 16 | This implementation should meet the following user requirements: 17 | 18 | * Cinder Volume Creation: As a User I want to be able to allocate block 19 | storage volumes from the Ceph Storage Cluster to individual virtual machines. 20 | * Cinder Boot from Volume: As a User I want to be able to create a virtual 21 | machine that boots from a block storage device hosted on the Ceph Storage 22 | Cluster. 23 | * Cinder Snapshots: As a User I want to be able to create a snapshot of one or 24 | more Cinder Volumes. 25 | * Cinder Backups: As a User I want to be able to use the Ceph Storage Cluster 26 | as a target for cinder backups. 27 | * Glance Storage: As a User I want to be able to use the Ceph Storage Cluster 28 | as a backend for glance. 29 | * Nova Ephemeral Storage: As a User I want to be able to allocate nova instance 30 | storage from the Ceph Storage Cluster. 31 | * Live Migration Support: As an Admin I want to be able to live migrate virtual 32 | machines that depend upon (i.e. boots from/mounts) a block storage device 33 | hosted on the Ceph Storage Cluster. This is inclusive of both Boot from 34 | Volume and Nova ephemeral storage. 35 | 36 | Proposed change 37 | =============== 38 | 39 | 1. Create ceph_client role to handle installation of ceph packages and 40 | and configuration of ceph.conf file. 41 | 42 | 2. Update os_cinder role to conditionally allow cinder-volume to create volumes 43 | in Ceph by setting volume_driver to cinder.volume.drivers.rbd.RBDDriver. 44 | 45 | 3. Update os_nova role to conditionally allow nova to boot from cinder volumes 46 | stored in Ceph. 47 | 48 | 4. Update os_cinder role to conditionally allow cinder-backup to store backups 49 | in Ceph by setting backup_driver to cinder.backup.drivers.ceph. 50 | 51 | 5. Update os_glance role to conditionally allow glance to store images in Ceph 52 | by setting default_store to rbd. 53 | 54 | 6. Update os_nova role to conditionally allow nova to boot virtual machines 55 | directly into Ceph by setting libvirt_images_type to rbd. 56 | 57 | Alternatives 58 | ------------ 59 | 60 | None 61 | 62 | Playbook impact 63 | --------------- 64 | 65 | See Proposed change above. 66 | 67 | Upgrade impact 68 | -------------- 69 | 70 | No default configurations should be altered with the introduction of these 71 | changes, and therefore there should be no impact to the upgrade of an 72 | existing installation. 73 | 74 | Security impact 75 | --------------- 76 | 77 | OpenStack services require users and keys to interface with Ceph. This 78 | implementation should encourage the use of separate Ceph users for each 79 | OpenStack service and ensure that configuration files and keys can only be read 80 | by the intended users. 81 | 82 | Performance impact 83 | ------------------ 84 | 85 | Enabling this functionality may result in performance increases or decreases 86 | across the OpenStack installation. This will highly depend on the hardware and 87 | software configuration of the attached Ceph cluster. 88 | 89 | End user impact 90 | --------------- 91 | 92 | Using Ceph block devices may introduce new features visible to the end user, 93 | such as the ability to live migrate an instance from one hypervisor to another. 94 | Additionally, as stated above, there may be visible performance increases or 95 | decreases depending on several different facters. 96 | 97 | Deployer impact 98 | --------------- 99 | 100 | A deployer will need to explicitly update their inventory and set Ansible 101 | variable overrides to a) enable this functionality and b) correctly interface 102 | with an existing Ceph cluster. 103 | 104 | Developer impact 105 | ---------------- 106 | 107 | None 108 | 109 | Dependencies 110 | ------------ 111 | 112 | None known 113 | 114 | Implementation 115 | ============== 116 | 117 | Assignee(s) 118 | ----------- 119 | 120 | Primary assignee: 121 | https://launchpad.net/~mattt416 (mattt) 122 | 123 | Other contributors: 124 | https://launchpad.net/~git-harry (git-harry) 125 | https://launchpad.net/~david-wilde-rackspace (d34dh0r53) 126 | 127 | Work items 128 | ---------- 129 | 130 | 1. Create ceph_client role. 131 | 132 | 2. Update os_cinder role to conditionally allow cinder-volume to create volumes 133 | in Ceph. 134 | 135 | 3. Update os_nova role to conditionally allow nova to attach cinder volumes 136 | stored in Ceph. 137 | 138 | 4. Update os_cinder role to conditionally allow cinder-backup to store backups 139 | in Ceph. 140 | 141 | 5. Update os_glance role to conditionally allow glance to store images in Ceph. 142 | 143 | 6. Update os_nova role to conditionally allow nova to boot virtual machines 144 | directly into Ceph. 145 | 146 | Testing 147 | ======= 148 | 149 | No gate-related adjustments will be made to openstack-ansible to support 150 | this change as no default configurations are being changed here. Additionally, 151 | that there are strict limitations on what can run in the all-in-one (AIO) gate 152 | instance. 153 | 154 | Documentation impact 155 | ==================== 156 | 157 | Documentation will need updating to include: 158 | 159 | 1. How to enable Ceph block devices for each cinder, glance, and nova services 160 | and what each newly introduced Ansible variable does. 161 | 2. What additional steps are required to be executed on the existing Ceph 162 | cluster to allow the OpenStack installation to interface with the Ceph 163 | cluster. 164 | 165 | References 166 | ========== 167 | 168 | None 169 | -------------------------------------------------------------------------------- /specs/kilo/master-kilofication.rst: -------------------------------------------------------------------------------- 1 | Master Kilofication 2 | ################### 3 | :date: 2015-03-23 13:00 4 | :tags: kilo, update, 5 | 6 | Update the various openstack-ansible playbooks and roles in the master 7 | branch with the changes necessary to implement a fully functional and updated 8 | kilo deployment. 9 | 10 | * https://blueprints.launchpad.net/openstack-ansible/+spec/master-kilofication 11 | 12 | Initial work will be based on the k3 tags in each of the openstack projects 13 | since kilo is not yet officially released. 14 | 15 | Problem description 16 | =================== 17 | 18 | Master is setup to deploy Juno at this time we want the master branch to begin 19 | tracking Kilo. 20 | 21 | 22 | Proposed change 23 | =============== 24 | 25 | As opposed to the minimal-kilo blueprint which is focused on making the minumum 26 | fewest possible changes necessary to point at kilo and have a deployment that 27 | passes gating, this specification is targeted more at updating all config files 28 | and code to bring in the kilo versions of the configs for each service, parsing 29 | each file for differences and making informed decisions about what values to 30 | take to ensure we have a production grade deployment system. 31 | 32 | The approach to dealing with differences (eg changed defaults for a particular 33 | setting) will be to use the kilo value where possible, adding an option to 34 | make any changed setting tunable if it was not already. This gives the option 35 | to users who are upgrading from juno to be able to reset a value back to the 36 | juno default if desired, but also means that greenfield deployments of kilo use 37 | the (hopefully better) kilo value. 38 | 39 | Examples of configs impacted (these will differ depending on the service being 40 | worked on):: 41 | 42 | /etc//.conf 43 | /etc//-api-paste.ini 44 | /etc//policy.json 45 | /etc//-.ini 46 | 47 | 48 | 49 | Alternatives 50 | ------------ 51 | 52 | We could, wherever needed, preserve juno settings rather than taking forward 53 | the kilo settings. This is potentially easier on users in an upgrade scenario, 54 | but does mean that new users deploying kilo would get an already out of date 55 | deployment. It also means that we miss an opportunity to implement best 56 | practices deployments, instead sticking on old, less relevant, values. 57 | 58 | 59 | Playbook impact 60 | --------------- 61 | 62 | There will be no impact on the playbooks. These changes are on the dependency 63 | and role level which only impact the configuration files and role options. 64 | 65 | 66 | Upgrade impact 67 | -------------- 68 | 69 | This change will impact upgrades, but upgrades are out of scope for this spec 70 | which will be addressed separately. Largely it addresses greenfield 71 | deployments of kilo. 72 | 73 | 74 | Security impact 75 | --------------- 76 | 77 | These changes will initially be based on BETA code (k3 and rc1 tags of kilo) 78 | which may have consequences regarding security, but work will be done to test 79 | against production kilo when it is released (and prior to the 11.0.0 release 80 | of openstack-ansible being tagged) 81 | 82 | 83 | Performance impact 84 | ------------------ 85 | 86 | Because the Kilo code base is not tested and released, the performance of the 87 | stack will not be in scope at this time. As future work develops to finalize 88 | the roles used in Kilo, work will be done on a per role basis to ensure 89 | performance. 90 | 91 | 92 | End user impact 93 | --------------- 94 | 95 | N/A 96 | 97 | 98 | Deployer impact 99 | --------------- 100 | 101 | As stated previously, this change will initially introduce new BETA code. 102 | Deployers shouldn't be using master at this time. 103 | 104 | 105 | Developer impact 106 | ---------------- 107 | 108 | This change is to allow development of a production grade kilo deployment 109 | 110 | 111 | Dependencies 112 | ------------ 113 | 114 | The spec will introduce a number of new dependencies. At this time not all are 115 | exactly known. However, we can safely say that all new clients will be used 116 | throughout the stack as well as various middlewares. 117 | 118 | 119 | Implementation 120 | ============== 121 | 122 | Assignee(s) 123 | ----------- 124 | 125 | Various 126 | 127 | Work items 128 | ---------- 129 | 130 | Unknown at this time 131 | 132 | Testing 133 | ======= 134 | 135 | No changes to the current testing and or gating framework will be made. Each 136 | change that is made to a service to bring forward new configs and settings will 137 | be required to pass the same gate tests as are required by our production 138 | systems. 139 | 140 | 141 | Documentation impact 142 | ==================== 143 | 144 | This change will likely have documentation impact. Specifically when 145 | documenting changed values or deprecated config items. 146 | 147 | 148 | References 149 | ========== 150 | 151 | N/A 152 | -------------------------------------------------------------------------------- /specs/kilo/minimal-kilo.rst: -------------------------------------------------------------------------------- 1 | Minimal Kilo 2 | ############ 3 | :date: 2015-03-17 21:34 4 | :tags: kilo, minimum, update, 5 | 6 | Update master to point to the minimum configuration nessisary for 7 | a functional kilo stack. 8 | 9 | * https://blueprints.launchpad.net/openstack-ansible/+spec/minimal-kilo 10 | 11 | This spec is being created to track the work required to get a minimum 12 | viable deployment of kilo. Because the Kilo release of OpenStack has not 13 | yet been released the work done within this blueprint will pull from the 14 | head of master and stabilize on the a given sha for the time being. 15 | 16 | 17 | Problem description 18 | =================== 19 | 20 | Master is setup to deploy Juno at this time we want the master branch 21 | to begin tracking Kilo. 22 | 23 | 24 | Proposed change 25 | =============== 26 | 27 | In order to have a minimally functional Kilo stack there are several issues 28 | that need to be resolved which have been raised within Launchpad. Once 29 | the following issues are resolved Kilo should be a functional deployment 30 | from the stand point of gating. The point of this Spec is to introduce the 31 | least amount of changes into the stack in an effort to enable a Kilo code 32 | base. The changes should pass gating from the a commit basis. Once this 33 | spec is complete other work can follow to make Kilo a production ready 34 | product. 35 | 36 | 37 | Alternatives 38 | ------------ 39 | 40 | There are no alternatives to this approach. Without a bulk commit to address 41 | the minimal changes to get Kilo functional we will not be able to move forward 42 | with development. 43 | 44 | 45 | Playbook impact 46 | --------------- 47 | 48 | There will be no impact on the playbooks. These changes are on the dependency 49 | and role level which only impact the configuration files and role options. 50 | 51 | 52 | Upgrade impact 53 | -------------- 54 | 55 | This change will impact upgrades. The change will introduce new code which 56 | will allow the system to upgrade inplace. That said, this is a transitional 57 | spec which will translate into future work to make Kilo a production ready 58 | product. Upgrades are out of the scope of this spec and it is expected that 59 | Juno to Kilo upgrades will be broken at this point. 60 | 61 | 62 | Security impact 63 | --------------- 64 | 65 | These changes will introduce BETA code which will likely have consequences 66 | regarding security however the changes are not geared at production at this 67 | time and will be revised in a fast follow effort. 68 | 69 | 70 | Performance impact 71 | ------------------ 72 | 73 | Because the Kilo code base is not tested and released the performance of 74 | the stack will not be in scope at this time. As future work develops to 75 | finalize the roles used in Kilo work will be done on a per role basis to 76 | ensure performance. 77 | 78 | 79 | End user impact 80 | --------------- 81 | 82 | N/A 83 | 84 | 85 | Deployer impact 86 | --------------- 87 | 88 | As stated previously, this change will introduce new BETA code. Deployers 89 | shouldn't be using master at this time. 90 | 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | This change is geared at enabling developers to begin working on Kilo. 96 | 97 | 98 | Dependencies 99 | ------------ 100 | 101 | The spec will introduce a number of new dependencies. At this time not all are 102 | exactly known. However, we can safely say that all new clients will be used 103 | throughout the stack as well as various middlewares. 104 | 105 | 106 | Implementation 107 | ============== 108 | 109 | Assignee(s) 110 | ----------- 111 | 112 | Primary assignee: 113 | https://launchpad.net/~kevin-carter 114 | 115 | Other contributors: 116 | https://launchpad.net/~nolan-brubaker 117 | 118 | IRC: cloudnull, palendae 119 | 120 | 121 | Work items 122 | ---------- 123 | 124 | In order to have a minimum viable installation of OpenStack Kilo 125 | the following issues will need to be addressed. 126 | 127 | * `#1428421`_ Keystone.py needs to be updated for kilo 128 | * `#1428431`_ OpenStack Clients need to be updated for Kilo 129 | * `#1428437`_ Update/Removal of pinned Oslo Messaging and Middleware for kilo 130 | * `#1428445`_ Neutron needs plugin references removed for kilo 131 | * `#1428451`_ Heat policy.json file needs to be updated for Kilo 132 | * `#1428469`_ Neutron rootwarp(s) need to be updated for Kilo 133 | * `#1428639`_ Nova requires python-libguestfs in Kilo 134 | 135 | .. _#1428421: https://bugs.launchpad.net/openstack-ansible/+bug/1428421 136 | .. _#1428431: https://bugs.launchpad.net/openstack-ansible/+bug/1428431 137 | .. _#1428437: https://bugs.launchpad.net/openstack-ansible/+bug/1428437 138 | .. _#1428445: https://bugs.launchpad.net/openstack-ansible/+bug/1428445 139 | .. _#1428451: https://bugs.launchpad.net/openstack-ansible/+bug/1428451 140 | .. _#1428469: https://bugs.launchpad.net/openstack-ansible/+bug/1428469 141 | .. _#1428639: https://bugs.launchpad.net/openstack-ansible/+bug/1428639 142 | 143 | 144 | Testing 145 | ======= 146 | 147 | No changes to the current testing and or gating framework will be made. The 148 | minimum viable Kilo deployment will be required to pass the same gate tests 149 | as are required by our production systems. 150 | 151 | 152 | Documentation impact 153 | ==================== 154 | 155 | This change specifically does not have any documentation impact. 156 | 157 | 158 | References 159 | ========== 160 | 161 | N/A 162 | -------------------------------------------------------------------------------- /specs/kilo/modularize-neutron-plays.rst: -------------------------------------------------------------------------------- 1 | Modularizing Neutron plays for agents and non ml2 plugin support 2 | ################################################################ 3 | :date: 2015-03-30 16:35 4 | :tags: neutron, plugins, agents 5 | 6 | This spec is propsed to enhance the current neutron playbooks that take a 7 | static approach to plugin and agent insertion. Where ml2 and a few agents 8 | are used by default. 9 | 10 | * https://blueprints.launchpad.net/openstack-ansible/+spec/modularize-neutron-plays 11 | 12 | Problem Description 13 | ==================== 14 | 15 | Presently a straightforward approach does not exist to add new plugins and add 16 | / remove agents to the neutron setup. A deployer either has to perform these 17 | changes after the whole setup is complete or make his own changes in the 18 | playbooks. 19 | 20 | Proposed Change 21 | ==================== 22 | 23 | This feature is proposed for both master and juno branches, the juno 24 | effort will be carried out first: 25 | 26 | 1. For juno, the openstack/roles/neutron_common.yml will be modified to 27 | install a configurable list of plugins and agents through new variables 28 | defined in inventory/group_vars/neutron_all. The default values to these 29 | new variables with be the current set of installed agents and plugins. 30 | 31 | 2. For master, the playbooks/roles/os_neutron/tasks files will be modified, 32 | particularly neutron_post_install.yml. Addition of new parameters will be made 33 | to playbooks/roles/os_neutron/defaults/main.yml 34 | 35 | Playbook Impact 36 | --------------- 37 | 38 | 1. In juno, the following files are expected to be modified: 39 | 40 | - openstack/roles/neutron_common.yml 41 | - openstack/inventory/group_vars/neutron_all.yml 42 | 43 | 2. In master, these files will be modified: 44 | 45 | - playbooks/roles/os_neutron/tasks/neutron_post_install.yml 46 | - playbooks/roles/os_neutron/defaults/main.yml 47 | 48 | Upgrade Impact 49 | -------------- 50 | 51 | None 52 | 53 | Alternatives 54 | ------------ 55 | 56 | Using the current architecture, prospective new plugins which are not ml2 will 57 | have to take an overwriting the default configuration, after its done, 58 | approach to insert their own changes. 59 | 60 | Security Impact 61 | --------------- 62 | 63 | None known at this time. 64 | 65 | Performance Impact 66 | ------------------ 67 | 68 | This change is not expected to impact performance. Installing the default set 69 | of agents and plugins as done now, will take the same amount of effort. 70 | 71 | End User Impact 72 | --------------- 73 | 74 | This is not expected to impact end users as it deals with the deployment aspect 75 | only. 76 | 77 | Deployer Impact 78 | --------------- 79 | 80 | This will introduce a more modular architecture for deployers to select neutron 81 | plugins/agents from, allowing a wider use case for these playbooks. 82 | 83 | Developer Impact 84 | ---------------- 85 | 86 | Using the default values will require no new developer effort, only those 87 | interested in changing the neutron config will be effected. 88 | 89 | Dependencies 90 | ------------ 91 | 92 | N/A 93 | 94 | Implementation 95 | ============== 96 | 97 | Assignee(s) 98 | ----------- 99 | 100 | Primary assignee: 101 | https://launchpad.net/~javeria-ak 102 | 103 | 104 | Work items 105 | ---------- 106 | 107 | This change will include modifying the existing neutron_common role to pick 108 | up what plugin to install along with what agents. The names and configs for 109 | individual plugins will be created as new variables in 110 | inventory/group_vars/neutron_all.yml 111 | 112 | Dependencies 113 | ------------ 114 | 115 | N/A 116 | 117 | Testing 118 | ======= 119 | 120 | There are no additional changes required to test this in the current testing 121 | and or gating framework. 122 | 123 | 124 | Documentation Impact 125 | ==================== 126 | 127 | A bit of additional documentation describing how to insert new plugins/agents 128 | will be required. This will be deployer documentation. 129 | 130 | References 131 | ========== 132 | 133 | N/A 134 | 135 | -------------------------------------------------------------------------------- /specs/liberty/compartementalize-rabbitmq.rst: -------------------------------------------------------------------------------- 1 | Compartmentalize RabbitMQ 2 | ######################### 3 | :date: 2015-07-14 4 | :tags: rabbitmq 5 | 6 | The purpose of this spec is to adjust our current RabbitMQ setup to better use 7 | the available system resources by creating a vhost and user per-consumer 8 | service within RabbitMQ. 9 | 10 | Include the URL of your launchpad blueprint: 11 | * https://blueprints.launchpad.net/openstack-ansible/+spec/compartmentalize-rabbitmq 12 | 13 | 14 | Problem description 15 | =================== 16 | 17 | Presently all services use the single root virtual host within RabbitMQ and while this 18 | is "OK" for small to mid sized deployments however it would be better to divide 19 | services into logical resource groups within RabbitMQ which will bring with it 20 | additional security. 21 | 22 | 23 | Proposed change 24 | =============== 25 | 26 | All services that utilize RabbitMQ should have their own virtual host, user, and 27 | password. 28 | 29 | Overview: 30 | * Each role would use the upstream Ansible RabbitMQ user module to create a new 31 | user. The username will be customizable with a default being the same as the 32 | name of the service. 33 | * Each role will use the upstream Ansible RabbitMQ vhost module to create a new 34 | virtual host per service. The vhost will be customizable with a default being 35 | the same as the name of the service. 36 | * A Password entry will be created within the ``user_secrets.yml`` file for 37 | each RabbitMQ service user. 38 | * The oslo config section of each service will be updated to use the new vhost 39 | name, username, and password. 40 | 41 | 42 | Alternatives 43 | ------------ 44 | 45 | Leave RabbitMQ the way it is. 46 | 47 | 48 | Playbook impact 49 | --------------- 50 | 51 | The playbooks will have no impact. The changes being proposed are being done 52 | within roles. Ideally this would be a simple default addition, two new tasks, 53 | and a simple change within the oslo_messaging section in the service 54 | configuration files. 55 | 56 | 57 | Upgrade impact 58 | -------------- 59 | 60 | There will be an upgrade impact as the user will need to add the new secret 61 | entries to the ``user_secrets.yml`` file. If this was to be accepted as a 62 | backport to kilo this would have to be targeted to a major version. 63 | 64 | 65 | Security impact 66 | --------------- 67 | 68 | Serpentining the services into different vhosts with different users and passwords 69 | should improve security. And brings our project more inline with what is described 70 | in the OpenStack Messaging Security documentation. 71 | 72 | * http://docs.openstack.org/security-guide/content/messaging-security.html 73 | 74 | 75 | Performance impact 76 | ------------------ 77 | 78 | The separation of service into logical vhosts has been not been reported to have 79 | any noticeable performance impact. 80 | 81 | * http://stackoverflow.com/questions/12518685/ 82 | performance-penalty-of-multiple-vhosts-in-rabbitmq 83 | * http://lists.rabbitmq.com/pipermail/rabbitmq-discuss/2012-September/ 84 | 022618.html 85 | 86 | 87 | End user impact 88 | --------------- 89 | 90 | n/a 91 | 92 | 93 | Deployer impact 94 | --------------- 95 | 96 | The deployer will need to ensure they have passwords entries set within the 97 | ``user_secrets.yml`` file. This should not impact greenfield deployments however 98 | it will need to be something covered in an upgrade. 99 | 100 | 101 | Developer impact 102 | ---------------- 103 | 104 | n/a 105 | 106 | 107 | Dependencies 108 | ------------ 109 | 110 | n/a 111 | 112 | Implementation 113 | ============== 114 | 115 | Assignee(s) 116 | ----------- 117 | 118 | Primary assignee: 119 | https://launchpad.net/~kevin-carter ``cloudnull`` 120 | 121 | 122 | Work items 123 | ---------- 124 | 125 | * Add new RabbitMQ users for all services. 126 | * Add new RabbitMQ vhosts for all services. 127 | * Update all service configuration files to use the new vhost, user, 128 | and password. 129 | 130 | 131 | Testing 132 | ======= 133 | 134 | The testing of this change is a convergence test. The gate job will utilize the 135 | the changes on every commit. 136 | 137 | 138 | Documentation impact 139 | ==================== 140 | 141 | Docs will need to be updated in terms of upgrades to add the new variables. 142 | 143 | 144 | References 145 | ========== 146 | 147 | n/a 148 | -------------------------------------------------------------------------------- /specs/liberty/modularize-neutron-liberty.rst: -------------------------------------------------------------------------------- 1 | Modularizing Neutron plays for agents and non ml2 plugin support 2 | ################################################################ 3 | :date: 2015-09-09 18:00 4 | :tags: neutron, plugins, agents 5 | 6 | This spec is propsed to enhance the current neutron playbooks that take a 7 | static approach to plugin and agent insertion. Where ml2 and a few agents 8 | are used by default. 9 | 10 | * https://blueprints.launchpad.net/openstack-ansible/+spec/modularize-neutron-liberty 11 | 12 | Problem Description 13 | ==================== 14 | 15 | Presently a straightforward approach does not exist to add new plugins and add 16 | / remove agents to the neutron setup. A deployer either has to perform these 17 | changes after the whole setup is complete or make their own changes in the 18 | playbooks locally. This feature has already been implemented in juno branch. 19 | 20 | Proposed Change 21 | ==================== 22 | 23 | The files in playbooks/roles/os_neutron/tasks will be modified, particularly 24 | neutron_pre_install.yml and neutron_post_install.yml. Addition of new 25 | parameters will be made to playbooks/roles/os_neutron/defaults/main.yml 26 | 27 | Playbook Impact 28 | --------------- 29 | 30 | The following playbooks are expected to be modified to support this feature: 31 | 32 | - playbooks/roles/os_neutron/defaults/main.yml 33 | - playbooks/roles/os_neutron/tasks/main.yml 34 | - playbooks/roles/os_neutron/tasks/neutron_pre_install.yml 35 | - playbooks/roles/os_neutron/tasks/neutron_post_install.yml 36 | 37 | Upgrade Impact 38 | -------------- 39 | 40 | None 41 | 42 | Alternatives 43 | ------------ 44 | 45 | Using the current architecture, prospective new core plugins which are not 46 | ml2 will have to take an overwriting the default configuration, after its 47 | done, approach to insert their own changes. 48 | 49 | Security Impact 50 | --------------- 51 | 52 | None known at this time. 53 | 54 | Performance Impact 55 | ------------------ 56 | 57 | This change is not expected to impact performance. Installing the default set 58 | of agents and plugins as done now, will take the same amount of effort. 59 | 60 | End User Impact 61 | --------------- 62 | 63 | This is not expected to impact end users as it deals with the deployment aspect 64 | only. 65 | 66 | Deployer Impact 67 | --------------- 68 | 69 | This will introduce a more modular architecture for deployers to select neutron 70 | plugins/agents from, allowing a wider use case for the OSAD playbooks. 71 | 72 | Developer Impact 73 | ---------------- 74 | 75 | Using the default values will require no new developer effort, only those 76 | interested in changing the neutron config will be effected. 77 | 78 | Dependencies 79 | ------------ 80 | 81 | N/A 82 | 83 | Implementation 84 | ============== 85 | 86 | Assignee(s) 87 | ----------- 88 | 89 | Primary assignee: 90 | https://launchpad.net/~javeria-ak ``javeriak`` 91 | 92 | 93 | Work items 94 | ---------- 95 | 96 | This change will include modifying the existing os_neutron role to pick 97 | up what plugin to install along with what agents. The names and configs for 98 | individual plugins will be created as new variables in 99 | playbooks/roles/os_neutron/defaults/main.yml 100 | 101 | Dependencies 102 | ------------ 103 | 104 | N/A 105 | 106 | Testing 107 | ======= 108 | 109 | There are no additional changes required to test this in the current testing 110 | and or gating framework. 111 | 112 | 113 | Documentation Impact 114 | ==================== 115 | 116 | A bit of additional documentation describing how to insert new plugins/agents 117 | will be required. This will be deployer documentation. 118 | 119 | References 120 | ========== 121 | 122 | * https://blueprints.launchpad.net/openstack-ansible/+spec/modularize-neutron-plays 123 | 124 | -------------------------------------------------------------------------------- /specs/liberty/named-veths.rst: -------------------------------------------------------------------------------- 1 | Named veths 2 | ########### 3 | :date: 2015-08-31 22:00 4 | :tags: lxc, veth, troubleshooting 5 | 6 | This spec aims to make troubleshooting openstack-ansible issues a more 7 | efficient process by using container names to build names for veth interfaces. 8 | 9 | Link to blueprint: 10 | 11 | * https://blueprints.launchpad.net/openstack-ansible/+spec/named-veths 12 | 13 | Problem description 14 | =================== 15 | 16 | All veth interfaces on the host are named using randomly generated names, such 17 | as `vethK070G4`. This can make troubleshooting container networking issues 18 | more challenging since it's difficult to trace a veth name to a particular 19 | network interface within the container. 20 | 21 | Proposed change 22 | =============== 23 | 24 | Names of veth interfaces should be unique and easily correlated to their 25 | containers. However, names of network interfaces have restrictions which must 26 | be handled carefully: 27 | 28 | * 16 characters maximum 29 | * Certain characters, like dashes (-) aren't allowed 30 | 31 | The random characters on the end of the container hostname could be used along 32 | with the interface name to form a veth name. As an example, a container 33 | called `aio1_utility_container-a9ef9551` could have two named veth interfaces: 34 | 35 | * a9ef9551_eth0 36 | * a9ef9551_eth1 37 | 38 | Alternatives 39 | ------------ 40 | 41 | Leave veth names as randomly generated by LXC. 42 | 43 | 44 | Playbook/Role impact 45 | -------------------- 46 | 47 | The veth names will only be adjusted on the host within the LXC configuration 48 | files themselves. Containers won't be affected. The playbooks don't use the 49 | veth names on the host for any actions. 50 | 51 | If veths are not cleaned up properly when a container stops (this is sometimes 52 | called 'dangling veths'), there's a chance that the container won't start 53 | until the dangling veth is manually removed with `ip link del `. 54 | 55 | 56 | Upgrade impact 57 | -------------- 58 | 59 | Upgrades should be unaffected. This change only adjusts the LXC container 60 | configuration files and doesn't change the running configuration of any of the 61 | containers. 62 | 63 | If a container is running and its LXC configuration file is adjusted to use 64 | named veths, it will only utilize those adjustments when it is restarted. If 65 | an upgrade happens to restart only a subset of the containers on the host, 66 | then only those containers will use named veths after they restart. 67 | 68 | 69 | Security impact 70 | --------------- 71 | 72 | This change shouldn't affect security. 73 | 74 | 75 | Performance impact 76 | ------------------ 77 | 78 | This change shouldn't affect performance. 79 | 80 | 81 | End user impact 82 | --------------- 83 | 84 | This change shouldn't affect end users. 85 | 86 | 87 | Deployer impact 88 | --------------- 89 | 90 | Users who deploy OpenStack should be able to troubleshoot network issues more 91 | efficiently. 92 | 93 | For example, if a user was having trouble reaching the nova API container, 94 | they could quickly see which veths were associated with the container. This 95 | would allow users to diagnose network problems with various tools, like 96 | ethtool and tcpdump, without digging into interface indexes or writing scripts. 97 | 98 | If a deployer wants to begin using named veth pairs immediately, then all 99 | containers must be restarted. This is because the LXC configuration files are 100 | adjusted on disk but running containers aren't adjusted. 101 | 102 | 103 | Developer impact 104 | ---------------- 105 | 106 | Much like the deployer impact above, this change could help developers 107 | diagnose issues within different containers more efficiently. 108 | 109 | 110 | Dependencies 111 | ------------ 112 | 113 | This spec has no known dependencies. 114 | 115 | 116 | Implementation 117 | ============== 118 | 119 | Assignee(s) 120 | ----------- 121 | 122 | Primary assignee: 123 | https://launchpad.net/~rackerhacker ``mhayden`` 124 | 125 | Work items 126 | ---------- 127 | 128 | * Update ansible playbooks to specify `lxc.network.veth.pair` in the main LXC 129 | configuration files as well as the interface .ini files 130 | 131 | 132 | Testing 133 | ======= 134 | 135 | * Do greenfield deployment and verify named veths 136 | * Do an upgrade between releases and verify named veths 137 | * Verify that both tests have no impact on running containers 138 | 139 | Documentation impact 140 | ==================== 141 | 142 | Documentation would be beneficial, especially around how this helps with 143 | troubleshooting issues. 144 | 145 | 146 | References 147 | ========== 148 | 149 | N/A 150 | 151 | -------------------------------------------------------------------------------- /specs/liberty/plumgrid-support-liberty.rst: -------------------------------------------------------------------------------- 1 | Add PLUMgrid plugin to neutron playbooks 2 | ######################################## 3 | :date: 2015-09-09 19:30 4 | :tags: neutron, plugins, networking 5 | 6 | This spec is propsed to insert the capability of using the PLUMgrid 7 | OpenStack Neutron Plugin through the OSAD neutron playbooks. 8 | 9 | * https://blueprints.launchpad.net/openstack-ansible/+spec/plumgrid-support-liberty 10 | 11 | Problem Description 12 | =================== 13 | 14 | PLUMgrid is a core neutron networking plugin that has been a part of OpenStack 15 | neutron since Grizzly. It offers a Network Virtualization Platform that uses 16 | direct communication with the Hypervisor layer to provide all the networking 17 | functionality requested through Neutron APIs. The PLUMgrid Neutron Plugin 18 | implements Neutron v2 APIs and helps configure L2/L3 virtual networks 19 | created through the PLUMgrid Platform. It also implements External Networks 20 | and Port Binding Extensions. 21 | 22 | APIs supported by the PLUMgrid plugin: 23 | - Networks 24 | - Subnets 25 | - Ports 26 | - External Networks 27 | - Routers 28 | - Security Groups 29 | - Quotas 30 | - Port Binding 31 | - Provider Networks 32 | 33 | Proposed Change 34 | =============== 35 | 36 | This change is proposed to add the PLUMgrid plugin as a core plugin option 37 | alongside ml2, which will be the default. This configurability should already 38 | be achieved by the BP: modularize-neutron-liberty. 39 | 40 | The rest of the installation for PLUMgrid that requires PLUMgrid Controller 41 | and Compute components, that enable management of the plugin, is maintained 42 | in a public plumgrid-ansible repository. 43 | 44 | The changes described below assume the previously mentioned BP modularization 45 | changes in place. 46 | 47 | This feature is proposed for the master branch leading to liberty. Once 48 | implemented it will be backported to kilo. 49 | 50 | The parameters relevant to the PLUMgrid plugin installation will be added to a 51 | new dictionary item in 'neutron_plugins' in 52 | 'playbooks/roles/os_neutron/defaults/main.yml'. This will allow setting the 53 | 'neutron_plugin_type' to plumgrid if desired. 54 | 55 | Playbook Impact 56 | --------------- 57 | 58 | These files are expected to be modified: 59 | 60 | - playbooks/roles/os_neutron/defaults/main.yml 61 | 62 | New templates will be added in the os_neutron role: 63 | 64 | - playbooks/roles/os_neutron/templates/plugins/plumgrid/plumgrid.ini 65 | - playbooks/roles/os_neutron/templates/plugins/plumgrid/plumlib.ini 66 | - playbooks/roles/os_neutron/files/rootwrap.d/plumlib.filters 67 | 68 | Upgrade impact 69 | -------------- 70 | 71 | None 72 | 73 | Alternatives 74 | ------------ 75 | 76 | To continue using the default ml2 and linuxbridge-agent neutron deployment 77 | with no possibility of other core neutron plugins. 78 | 79 | Security Impact 80 | --------------- 81 | 82 | N/A 83 | 84 | Performance Impact 85 | ------------------ 86 | 87 | This change is not expected to impact performance. A typical PLUMgrid plugin 88 | installation, will furthermore disable neutron agent installations. Hence the 89 | overall performance is expected to remain the same. 90 | 91 | End User Impact 92 | --------------- 93 | 94 | End users will be able to leverage the enhanced scale and operational 95 | capabilities provided by the PLUMgrid plugin when choosing to install this 96 | plugin. Further details can be found in the References section below. 97 | 98 | Deployer Impact 99 | --------------- 100 | 101 | This will provide Deployers with the option to use PLUMgrid as the neutron 102 | plugin. Upgrading from a previous release to use this new feature will only 103 | be possible through a re-run of the neutron playbooks as well. This change 104 | does not effect running instances within the cloud. 105 | 106 | Developer Impact 107 | ---------------- 108 | 109 | This change adds further installable options and as such does not 110 | effect the default flow of the playbooks. 111 | 112 | 113 | Dependencies 114 | ------------ 115 | 116 | None 117 | 118 | Implementation 119 | ============== 120 | 121 | Assignee(s) 122 | ----------- 123 | 124 | Primary assignee: 125 | https://launchpad.net/~javeria-ak ``javeriak`` 126 | 127 | Work items 128 | ---------- 129 | 130 | This change will use the modularized neutron playbooks to provide 131 | PLUMgrid as a plugin option. A set of three new template files will 132 | be added to the neutron plays to support plumgrid. 133 | 134 | Dependencies 135 | ------------ 136 | 137 | Dependent on: 138 | 139 | - https://blueprints.launchpad.net/openstack-ansible/+spec/modularize-neutron-liberty 140 | 141 | Testing 142 | ======= 143 | 144 | There are no additional changes required to test this in the current testing 145 | and or gating framework that also covers the neutron components. 146 | 147 | Documentation Impact 148 | ==================== 149 | 150 | Documentation describing how to modify the configuration parameters 151 | to install PLUMgrid will be required. This will be deployer documentation. 152 | 153 | References 154 | ========== 155 | 156 | * https://www.vmware.com/products/nsx.html 157 | 158 | * https://wiki.openstack.org/wiki/PLUMgrid-Neutron 159 | 160 | * https://github.com/plumgrid/plumgrid-ansible 161 | -------------------------------------------------------------------------------- /specs/liberty/remove-upstream-repo-dependency.rst: -------------------------------------------------------------------------------- 1 | Remove upstream repo dependency 2 | ############################### 3 | :date: 2015-07-19 4 | :tags: repo, repo-servers, repo-clone, pip-wheel 5 | 6 | The purpose of this spec is to remove the repo-clone play from OSAD. 7 | 8 | * https://blueprints.launchpad.net/openstack-ansible/+spec/Remove-upstream-repo-dependency 9 | 10 | 11 | Problem description 12 | =================== 13 | 14 | Presently the repo-clone-mirror play is responsible for cloning the upstream 15 | repository that Rackspace maintains into the repo containers. While this process 16 | is simple enough it does bring with it a reliance on an upstream deployer/vendor. 17 | OSAD already has the ability to build its own python packages which is the process 18 | used to do all gate check testing so it should also be the default means to deploy 19 | an OSAD environment. 20 | 21 | 22 | Proposed change 23 | =============== 24 | 25 | * Remove the repo-clone-mirror.yml play 26 | * Change repo-install.yml to use repo-build.yml as it's included method. 27 | * Modify the pip install role to remove the install requirement using the upstream 28 | mirror. 29 | 30 | 31 | Alternatives 32 | ------------ 33 | 34 | Leave everything the way it is. 35 | 36 | 37 | Playbook impact 38 | --------------- 39 | 40 | Changes the repo create process to always build. This will only impact deployers 41 | that are using the repo-servers and will ensure that the system is always building 42 | the correct packages. 43 | 44 | When bootstrapping a new environment the pip install role is used throughout the stack. 45 | This would modify that role to always pull upstream pip unless otherwise instructed, 46 | through the use of *user_vars*, to go elsewhere. 47 | 48 | 49 | Upgrade impact 50 | -------------- 51 | 52 | n/a 53 | 54 | 55 | Security impact 56 | --------------- 57 | 58 | n/a 59 | 60 | 61 | Performance impact 62 | ------------------ 63 | 64 | Repo clone was intended to be a faster means of delivering packages to the deployment 65 | infrastructure however in testing repo clone and repo build operate at roughly the 66 | same speed. 67 | 68 | 69 | End user impact 70 | --------------- 71 | 72 | n/a 73 | 74 | 75 | Deployer impact 76 | --------------- 77 | 78 | This change will be unnoticeable to the deployer. 79 | 80 | 81 | Developer impact 82 | ---------------- 83 | 84 | n/a 85 | 86 | 87 | Dependencies 88 | ------------ 89 | 90 | n/a 91 | 92 | Implementation 93 | ============== 94 | 95 | Assignee(s) 96 | ----------- 97 | 98 | Primary assignee: 99 | https://launchpad.net/~kevin-carter ``cloudnull`` 100 | 101 | 102 | Work items 103 | ---------- 104 | 105 | * Delete the repo-clone-mirror.yml play 106 | * Change the include in repo-install.yml 's/repo-clone-mirror.yml/repo-build.yml/' 107 | 108 | 109 | Testing 110 | ======= 111 | 112 | This is already being tested on every build within upstream OSAD. 113 | 114 | 115 | Documentation impact 116 | ==================== 117 | 118 | n/a 119 | 120 | 121 | References 122 | ========== 123 | 124 | n/a 125 | -------------------------------------------------------------------------------- /specs/liberty/role-haproxy-v2.rst: -------------------------------------------------------------------------------- 1 | HAProxy improvements 2 | ################################# 3 | :date: 2015-09-04 14:00 4 | :tags: haproxy, production use 5 | 6 | HA Proxy can be improved by adding a few changes: 7 | 8 | * Making it really HA 9 | * Allowing configuration interface to easily adapt load 10 | * Deploying only the configuration for the services 11 | deployed within the inventory. 12 | * Improving backends configuration, for example galera or 13 | adapting the timer values to be more efficient 14 | 15 | https://blueprints.launchpad.net/openstack-ansible/+spec/role-haproxy-v2 16 | 17 | Problem description 18 | =================== 19 | 20 | There are a few features already asked by the community: 21 | 22 | * HA for haproxy 23 | * Enable statistics and improve manageability of haproxy 24 | * Limiting the unnecessary checks of haproxy 25 | 26 | 27 | Proposed change 28 | =============== 29 | 30 | * Implement keepalived for haproxy 31 | * Change the standard haproxy role to add 32 | administrative tools (admin level on socket and stats) 33 | * Remove the large haproxy variable in vars/ folder 34 | * Give this information component by component 35 | (in the group_vars), and make it possible to have 36 | user overrides (user_variables or component by component). 37 | Then ``delegate`` the configuration to haproxy hosts. 38 | * Introduce a skip variable, if you want to deploy 39 | haproxy on some components but not some others 40 | 41 | Alternatives 42 | ------------ 43 | 44 | Wait for ansible2 to have variable merging/cleanup for dicts 45 | on a per task/playbook basis. 46 | 47 | Playbook/Role impact 48 | -------------------- 49 | 50 | The playbook ``haproxy-install.yml`` will be completely 51 | overwritten. 52 | 53 | haproxy playbook run will be longer, due to the ``delegate to``. 54 | 55 | 56 | Upgrade impact 57 | -------------- 58 | 59 | None. 60 | 61 | Security impact 62 | --------------- 63 | 64 | No change 65 | 66 | Performance impact 67 | ------------------ 68 | 69 | Improved performance by: 70 | 71 | * Doing less unnecessary checks to backends 72 | * Adding an easy way to set customer values for the 73 | backend's timers. 74 | 75 | End user impact 76 | --------------- 77 | 78 | No change 79 | 80 | Deployer impact 81 | --------------- 82 | 83 | * No change in default configuration 84 | * The deployer can overwrite the 85 | ``haproxy_service_configs`` per component 86 | 87 | Developer impact 88 | ---------------- 89 | 90 | No impact at first sight. 91 | 92 | Dependencies 93 | ------------ 94 | 95 | None 96 | 97 | Implementation 98 | ============== 99 | 100 | Assignee(s) 101 | ----------- 102 | 103 | None 104 | 105 | Work items 106 | ---------- 107 | 108 | * Keepalived: 109 | https://review.openstack.org/#/c/217517/ 110 | * Easy administration: 111 | https://review.openstack.org/#/c/215019/ and https://review.openstack.org/#/c/214110/ 112 | * Default configuration less static: 113 | 114 | * rewrite haproxy-install with the "delegate_to" and 115 | with a "when" haproxy_component_skip (if you want to deploy 116 | haproxy on some components but not some others) 117 | * create a file per component with default variables under group_vars 118 | 119 | * Default timer value changes. 120 | 121 | Testing 122 | ======= 123 | 124 | * Does this change impact how gating is done? 125 | 126 | There will be a change to haproxy-install playbook if merged. 127 | 128 | * Can this change be tested on a **per-commit** basis? 129 | 130 | Yes 131 | 132 | * Given the instance size restrictions, as found in OpenStack Infra 133 | (8GB Ram, vCPUs <= 8), can the test be run in a resource constrained 134 | environment? 135 | 136 | No change 137 | 138 | * Is this untestable given current limitations (specific hardware / 139 | software configurations available)? If so, are there mitigation plans 140 | for this change to be tested within 3rd party testing, gate enhancements, 141 | etc...? 142 | 143 | No 144 | 145 | * If the service is not OpenStack specific how can we test the change? 146 | 147 | Running the new playbooks 148 | 149 | 150 | Documentation impact 151 | ==================== 152 | 153 | For those who change the default configuration of haproxy (currently not 154 | documented), this change would modify their current configuration, so 155 | it needs to be documented. Explanation of the skip variable and component 156 | by component override should be good to add in the doc too. 157 | 158 | References 159 | ========== 160 | 161 | None 162 | -------------------------------------------------------------------------------- /specs/liberty/upgrade-mariadb-v10.rst: -------------------------------------------------------------------------------- 1 | MariaDB upgrade to v10 2 | ###################### 3 | :date: 2015-07-19 4 | :tags: mysql, galera 5 | 6 | The purpose of this spec is to upgrade MariaDB from v5.5 to v10.0 7 | 8 | https://blueprints.launchpad.net/openstack-ansible/+spec/MariaDB-upgrade-to-v10 9 | 10 | 11 | Problem description 12 | =================== 13 | 14 | MariaDB + Galera is presently using v5.5 which is old and should be upgraded. 15 | Additionally, we are using xtrabackup v1 which was deprecated in favor of xtrabackup 16 | v2 as such that should be changed as we upgrade to v10 so that we can take advantage 17 | of the performance and security enhancement available in the new releases. 18 | 19 | 20 | Proposed change 21 | =============== 22 | 23 | * Upgrade MariaDB - this is a package change as well as upstream mariadb repo 24 | change 25 | * Change xtrabackup to xtrabackup-v2 - This will add a configuration section in 26 | the default ``my.cnf`` for the xtrabackup client(s). 27 | 28 | 29 | Alternatives 30 | ------------ 31 | 32 | Leave everything the way it is. 33 | 34 | 35 | Playbook/Role impact 36 | -------------------- 37 | 38 | There will be no playbook impact however the The galera_server and galera_client 39 | roles will change to support the new packages for xtrabackup-v2 and mariadb+galera 40 | v10. 41 | 42 | 43 | Upgrade impact 44 | -------------- 45 | 46 | n/a 47 | 48 | 49 | Security impact 50 | --------------- 51 | 52 | Upgrading to MariaDB v10 w/ xtrabackup v2 will result OSAD being able to take 53 | advantage of better security options in the future if we so choose. 54 | 55 | 56 | Performance impact 57 | ------------------ 58 | 59 | Upgrading to MariaDB v10 w/ xtrabackup v2 will result in greater performance. 60 | 61 | 62 | End user impact 63 | --------------- 64 | 65 | n/a 66 | 67 | 68 | Deployer impact 69 | --------------- 70 | 71 | The deployer will need to be aware that mariadb v5.5 is being upgraded however 72 | all of the post upgrade processes should be handled automatically. 73 | 74 | 75 | Developer impact 76 | ---------------- 77 | 78 | n/a 79 | 80 | 81 | Dependencies 82 | ------------ 83 | 84 | * SPEC/Limit the distribution of .my.cnf - https://review.openstack.org/#/c/203754/ 85 | 86 | 87 | Implementation 88 | ============== 89 | 90 | Assignee(s) 91 | ----------- 92 | 93 | Primary assignee: (unassigned) 94 | 95 | 96 | Work items 97 | ---------- 98 | 99 | * Change the package for MariaDB10 w/ Galera 100 | * Add repo for new versions of XtraBackup 101 | * Update the my.cnf for use with MariaDB10 (revise it for anything that may 102 | need to be removed) 103 | * Update the cluster.cnf for use with MariaDB10 (revise it for anything that 104 | may need to be removed) 105 | 106 | 107 | Testing 108 | ======= 109 | 110 | The testing for this change will be automatic in upstream as everybuild will 111 | change to using this by default. 112 | 113 | 114 | Documentation impact 115 | ==================== 116 | 117 | n/a 118 | 119 | 120 | References 121 | ========== 122 | 123 | n/a 124 | -------------------------------------------------------------------------------- /specs/mitaka/build-facts-archive.rst: -------------------------------------------------------------------------------- 1 | Build Facts Archive 2 | ################### 3 | :date: 2015-04-23 4 | :tags: archive, deployment, information 5 | 6 | Create a script to archive all valuable information about a deployment. 7 | This information includes but is not limited to the following: kernel version 8 | of all physical host, version of OSAD that is currently installed, all 9 | installed packages and their versions, all running containers and their 10 | installed packages, latest tempest test run, all relevant OSAD configuration 11 | files (openstack_user_config, etc), host networking configuration, 12 | host disk configuration. 13 | 14 | * https://blueprints.launchpad.net/openstack-ansible/+spec/build-facts-archive 15 | 16 | Problem description 17 | =================== 18 | 19 | Currently there is no simple way to get information about a deployment. The 20 | current process requires a log into the deployment host and then knowledge of 21 | ansible and the openstack_inventory.json file and its groups to correctly 22 | structure a ansible query to gather information. 23 | 24 | It is also challenging to create a tool outside of OSAD to do some automation 25 | around aggregation of deployment information as you need to parse the 26 | inventory file or know exactly which host or container you need to access to 27 | get information. 28 | 29 | Proposed change 30 | =============== 31 | 32 | A simple script that the user can run to gather all predetermined important 33 | information to give a solid top down view of a deployed cluster. 34 | 35 | 36 | Alternatives 37 | ------------ 38 | 39 | This script could live in the rpc-extras repository instead of OSAD. This 40 | would not be ideal as it would only help the users who are using rpc-extras. 41 | If it resides in OSAD then all users get a simple way of getting a quick top 42 | down view of what their current cluster has. 43 | 44 | Playbook impact 45 | --------------- 46 | 47 | There will need to most likely be a playbook added to accomplish the task of 48 | gathering valuable information from each host / container based on their role 49 | / group. This playbook will not be deployment impacting. 50 | 51 | Upgrade impact 52 | -------------- 53 | 54 | None 55 | 56 | Security impact 57 | --------------- 58 | 59 | This could potentially touch all containers to gather secure information such 60 | as: configuration files which may contain passwords, information about 61 | keystone users (names, roles, etc). This is a minimal risk as the user would 62 | have to export the output off the host. If someone is running this script they 63 | already have access to this information as they are logged onto the deployment 64 | host. 65 | 66 | Performance impact 67 | ------------------ 68 | 69 | None 70 | 71 | End user impact 72 | --------------- 73 | 74 | None 75 | 76 | Deployer impact 77 | --------------- 78 | 79 | This change will give the deployer an easy way to gather current information 80 | about their cloud. This could help troubleshoot config problems as well as 81 | allow them a quick insight into their latest test results. This will even 82 | allow them to see package discrepencies and help them prepare for an upgrade. 83 | 84 | 85 | Developer impact 86 | ---------------- 87 | 88 | None 89 | 90 | Dependencies 91 | ------------ 92 | 93 | None 94 | 95 | Implementation 96 | ============== 97 | 98 | Assignee(s) 99 | ----------- 100 | 101 | Open to all 102 | 103 | Primary assignee: 104 | None 105 | 106 | Other contributors: 107 | None 108 | 109 | Work items 110 | ---------- 111 | 112 | Create a script to: 113 | - create the archive directory 114 | - gather all relevant deployment information 115 | - tarball archive directory 116 | - move tarball to well known location 117 | - remove archive directory 118 | 119 | It would be up to the end user / deployer what they do with the tarball, but 120 | it should be placed in a resonable spot on the deployment host that would be 121 | easy to find / access for the deployer. 122 | 123 | Testing 124 | ======= 125 | 126 | This should add a task to gating/commit/nightly to run this script and 127 | return the captured archive tarball as a jenkins artifact. Tempest results 128 | could also be sent to jenkins so that the results.xml can be displayed. 129 | This should help developers / qe see testing trends and allow the users 130 | of jenkins to more accurately find bugs in a more timely manner. 131 | 132 | 133 | Documentation impact 134 | ==================== 135 | 136 | A simple reference to this script in the user guide would be all that is 137 | needed if it is determined that it warrents it. 138 | 139 | 140 | References 141 | ========== 142 | 143 | If you look at the current scripts located in the scripts directory 144 | 145 | * https://github.com/openstack/openstack-ansible/blob/master/scripts/scripts-library.sh#L226-L279 146 | 147 | a lot of this information is already gathered about the host that the script 148 | is run on. This proposal should use the information that is gathered as a 149 | blueprint to some of the information that should be gathered about all hosts. 150 | -------------------------------------------------------------------------------- /specs/mitaka/convert-aio-bootstrap-to-ansible.rst: -------------------------------------------------------------------------------- 1 | Convert AIO bootstrap to Ansible 2 | ################################ 3 | :date: 2015-10-16 00:00 4 | :tags: aio, bootstrap, ansible 5 | 6 | The process for an AIO installation of openstack-ansible involves a bash script 7 | to do the initial bootstrapping of the AIO host. This script works well, but it 8 | becomes difficult to update over time and a conversion to Ansible would make 9 | future updates, such as `multi-platform-host blueprint`_, a little easier. 10 | 11 | .. _multi-platform-host blueprint: https://blueprints.launchpad.net/openstack-ansible/+spec/multi-platform-host 12 | 13 | Blueprint - Convert AIO bootstrap to Ansible: 14 | 15 | * https://blueprints.launchpad.net/openstack-ansible/+spec/convert-aio-bootstrap-to-ansible 16 | 17 | Problem description 18 | =================== 19 | 20 | The ``bootstrap-aio.sh`` script works well, but it can be difficult to read in 21 | a few places. Deployers who are familiar with Ansible, but not bash, may have 22 | challenges with updating the script as well. 23 | 24 | 25 | Proposed change 26 | =============== 27 | 28 | At this time, the AIO installation has four steps: 29 | 30 | * Configuration `(optional)` 31 | * Bootstrap the AIO build 32 | * Bootstrap Ansible 33 | * Run the openstack-ansible playbooks 34 | 35 | This spec proposes the following steps to replace the existing ones: 36 | 37 | * Configuration `(optional)` 38 | * Bootstrap Ansible 39 | * Run AIO playbook `(if an AIO deployment is desired)` 40 | * Run the openstack-ansible playbooks 41 | 42 | The current AIO boostrap script is **heavily** used by various deployers as 43 | well as other downstream projects, so changes must be made carefully. The 44 | proposed work for this spec would proceed as follows: 45 | 46 | * Build out the Ansible role for bootstrapping an AIO build 47 | * Update documentation to allow for early testing 48 | * Change the ``bootstrap-aio.sh`` script to call the new AIO bootstrap playbook 49 | * Update the documentation to reflect the new bootstrap script changes 50 | * Remove the ``bootstrap-aio.sh`` script at a later date (if needed) 51 | 52 | Alternatives 53 | ------------ 54 | 55 | The current ``bootstrap-aio.sh`` script could remain as it is now, or it could 56 | be simplified to make it easier to read and update. 57 | 58 | Playbook/Role impact 59 | -------------------- 60 | 61 | The openstack-ansible playbooks themselves shouldn't change as a result of this 62 | update. The AIO bootstrap is a prerequisite step in the deployment right now 63 | and that won't change after the AIO Ansible playbook is available for use. 64 | 65 | 66 | Upgrade impact 67 | -------------- 68 | 69 | This change would only affect greenfield deployments of AIO builds. If a 70 | deployer has an existing AIO build deployed, they would not need to run the 71 | AIO bootstrap playbook again, even with upgrades. 72 | 73 | Security impact 74 | --------------- 75 | 76 | There are no known security impacts of this change. 77 | 78 | Performance impact 79 | ------------------ 80 | 81 | There are no known performance impacts of this change. The Ansible AIO 82 | playbook may be slightly slower than the bash script, but the difference should 83 | be negligible. 84 | 85 | End user impact 86 | --------------- 87 | 88 | An end user would not notice this change since it would only affect deployers. 89 | 90 | 91 | Deployer impact 92 | --------------- 93 | 94 | If deployers are doing greenfield AIO deployments, they will need to follow new 95 | steps and ensure they bootstrap Ansible prior to running the new AIO Ansible 96 | playbook. Documentation for AIO builds will require updates. 97 | 98 | If deployers are doing deployments to multiple servers (non-AIO), their steps 99 | for deploying openstack-ansible will not change. 100 | 101 | Developer impact 102 | ---------------- 103 | 104 | Developers will need to make any future AIO bootstrap changes within the 105 | Ansible playbook instead of the bash script. 106 | 107 | Dependencies 108 | ------------ 109 | 110 | This spec doesn't depend on any other blueprint or spec at this time. 111 | 112 | Implementation 113 | ============== 114 | 115 | Assignee(s) 116 | ----------- 117 | 118 | Primary assignee: 119 | 120 | * Major Hayden (Launchpad: `rackerhacker`_, IRC: mhayden) 121 | 122 | .. _rackerhacker: https://launchpad.net/~rackerhacker 123 | 124 | Work items 125 | ---------- 126 | 127 | The last bulleted list in `Proposed Changes` above details out the work items. 128 | 129 | 130 | Testing 131 | ======= 132 | 133 | These changes will impact gating since the gating jobs run an AIO build. 134 | However, if the bootstrap-aio.sh script is changed to call the AIO bootstrap 135 | Ansible playbook, the gating job itself will not need to be changed. 136 | 137 | No additional resources should be required during gating to run the Ansible AIO 138 | playbook. 139 | 140 | Documentation impact 141 | ==================== 142 | 143 | The documentation for AIO deployments would need to be updated with the new 144 | steps for bootstrapping an AIO build. The changes in the steps are in the 145 | `Proposed Changes` section at the top of this spec. 146 | 147 | Also, deployers would need to note which environment variables and/or Ansible 148 | variables to set to control various parts of the deployment, such as whether or 149 | not to deploy certain OpenStack services in their environment. 150 | 151 | References 152 | ========== 153 | 154 | No references at this time. 155 | 156 | -------------------------------------------------------------------------------- /specs/mitaka/install-guide.rst: -------------------------------------------------------------------------------- 1 | Installation Guide 2 | ################## 3 | :date: 2015-11-02 22:00 4 | :tags: install, config, architecture 5 | 6 | https://blueprints.launchpad.net/openstack-ansible/+spec/install-guide 7 | 8 | Improve the installation guide to appeal to more potential deployers. 9 | 10 | 11 | Problem description 12 | =================== 13 | 14 | The current installation guide mainly supports only one rather complex 15 | deployment architecture that limits the apparent flexibility and appeal 16 | of the project to potential deployers. 17 | 18 | 19 | Proposed change 20 | =============== 21 | 22 | Improve the installation guide to offer several useful deployment 23 | architectures ranging from simple to complex. 24 | 25 | Alternatives 26 | ------------ 27 | 28 | Continue using the existing content that contains significant technical 29 | debt from decisions made prior to entry into the Stackforge and later 30 | OpenStack namespaces. 31 | 32 | Playbook/Role impact 33 | -------------------- 34 | 35 | None. 36 | 37 | Upgrade impact 38 | -------------- 39 | 40 | None, although a separate specification should address development of 41 | upgrade documentation referencing the deployment architectures in the 42 | installation guide as necessary. 43 | 44 | Security impact 45 | --------------- 46 | 47 | None, although the deployment architectures should implement security 48 | measures as necessary. 49 | 50 | Performance impact 51 | ------------------ 52 | 53 | None, although more complex deployment architectures could perform poorly 54 | on hardware that disregards minimum requirements. 55 | 56 | End user impact 57 | --------------- 58 | 59 | None. 60 | 61 | Deployer impact 62 | --------------- 63 | 64 | A variety of different deployment architectures ranging from simple to 65 | complex highlight the flexibility of this project and increase appeal to 66 | potential deployers. 67 | 68 | Developer impact 69 | ---------------- 70 | 71 | Developers should understand these deployment architectures and adjust them 72 | as necessary to account for new services, changes to existing services, 73 | changes to infrastructure requirements, etc. 74 | 75 | Dependencies 76 | ------------ 77 | 78 | None. 79 | 80 | 81 | Implementation 82 | ============== 83 | 84 | Assignee(s) 85 | ----------- 86 | 87 | Primary assignee: 88 | None 89 | 90 | Other contributors: 91 | None 92 | 93 | Work items 94 | ---------- 95 | 96 | * Develop several deployment architectures that range from simple to 97 | complex and attempt to minimize opinions regarding OpenStack service 98 | configuration and operation. For example: 99 | 100 | * A simple architecture may include a minimum of two infrastructure 101 | nodes and one compute node using three networks with minimal physical 102 | network redundancy and deploy only core OpenStack services. 103 | 104 | * A complex architecture may include a minimum of three infrastructure 105 | nodes, one compute node, and three storage nodes using four networks 106 | with reasonable network redundancy and deploy all OpenStack services. 107 | 108 | * Potentially restructure the installation guide to implement these 109 | deployment architectures in the most useful fashion. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * Verify operation of each deployment architecture prior to each major 116 | release. 117 | 118 | 119 | Documentation impact 120 | ==================== 121 | 122 | * Renovating the installation guide. 123 | 124 | 125 | References 126 | ========== 127 | 128 | None. 129 | -------------------------------------------------------------------------------- /specs/mitaka/irr-apt_package_pinning.rst: -------------------------------------------------------------------------------- 1 | IRR - APT package Pinning 2 | ######################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, apt_package_pinning 5 | 6 | Split out the apt package pinning role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "apt_package_pinning" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-galera.rst: -------------------------------------------------------------------------------- 1 | IRR - Galera 2 | ############ 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, galera 5 | 6 | Split out the galera_server and galera_client roles into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "galera_server" and "galera_client" need to be moved from the monolithic stack 23 | and into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-lxc_container_create.rst: -------------------------------------------------------------------------------- 1 | IRR - LXC Container Create 2 | ########################## 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, lxc_container_create 5 | 6 | Split out the lxc container create role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "lxc_container_create" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | The implementation of this spec relies on the following spec(s): 89 | * https://review.openstack.org/#/c/240965 90 | * https://review.openstack.org/#/c/241159 91 | 92 | 93 | Implementation 94 | ============== 95 | 96 | Assignee(s) 97 | ----------- 98 | 99 | Primary assignee: 100 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 101 | 102 | 103 | Work items 104 | ---------- 105 | 106 | * With the role moved, tests will be created within the role via OpenStack CI 107 | to ensure that the role is performing the actions that its supposed to. 108 | * Updated documentation via the "README.rst" will be created to show how the 109 | role can be used standalone. 110 | * Example local inventory will be created to show how the role can be used. 111 | The local only inventory will also be used for testing the role. 112 | 113 | 114 | Testing 115 | ======= 116 | 117 | * The test cases will deploy the role into a regular DSVM image 118 | * The role will execute itself locally 119 | * Once the role has completed an Ansible test play will run through several 120 | assert tasks to ensure the role functioned as intended. 121 | 122 | 123 | Documentation impact 124 | ==================== 125 | 126 | The base README.rst file will be updated to explain how the role can be used 127 | as a standalone role. 128 | 129 | 130 | References 131 | ========== 132 | 133 | n/a 134 | -------------------------------------------------------------------------------- /specs/mitaka/irr-lxc_host.rst: -------------------------------------------------------------------------------- 1 | IRR - LXC Host 2 | ############## 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, lxc_host 5 | 6 | Split out the lxc host role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "lxc_host" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | The implementation of this spec relies on the following spec(s): 89 | * https://review.openstack.org/#/c/240965 90 | 91 | 92 | Implementation 93 | ============== 94 | 95 | Assignee(s) 96 | ----------- 97 | 98 | Primary assignee: 99 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 100 | 101 | 102 | Work items 103 | ---------- 104 | 105 | * With the role moved, tests will be created within the role via OpenStack CI 106 | to ensure that the role is performing the actions that its supposed to. 107 | * Updated documentation via the "README.rst" will be created to show how the 108 | role can be used standalone. 109 | * Example local inventory will be created to show how the role can be used. 110 | The local only inventory will also be used for testing the role. 111 | 112 | 113 | Testing 114 | ======= 115 | 116 | * The test cases will deploy the role into a regular DSVM image 117 | * The role will execute itself locally 118 | * Once the role has completed an Ansible test play will run through several 119 | assert tasks to ensure the role functioned as intended. 120 | 121 | 122 | Documentation impact 123 | ==================== 124 | 125 | The base README.rst file will be updated to explain how the role can be used 126 | as a standalone role. 127 | 128 | 129 | References 130 | ========== 131 | 132 | n/a 133 | -------------------------------------------------------------------------------- /specs/mitaka/irr-memcached_server.rst: -------------------------------------------------------------------------------- 1 | IRR - Memcached Server 2 | ###################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, memcached_server 5 | 6 | Split out the memcached_server role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "memcached_server" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-openstack_hosts.rst: -------------------------------------------------------------------------------- 1 | IRR - OpenStack Hosts 2 | ##################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, openstack_hosts 5 | 6 | Split out the OpenStack hosts role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "openstack_hosts" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | The implementation of this spec relies on the following spec(s): 89 | * https://review.openstack.org/#/c/240965 90 | 91 | 92 | Implementation 93 | ============== 94 | 95 | Assignee(s) 96 | ----------- 97 | 98 | Primary assignee: 99 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 100 | 101 | 102 | Work items 103 | ---------- 104 | 105 | * With the role moved, tests will be created within the role via OpenStack CI 106 | to ensure that the role is performing the actions that its supposed to. 107 | * Updated documentation via the "README.rst" will be created to show how the 108 | role can be used standalone. 109 | * Example local inventory will be created to show how the role can be used. 110 | The local only inventory will also be used for testing the role. 111 | 112 | 113 | Testing 114 | ======= 115 | 116 | * The test cases will deploy the role into a regular DSVM image 117 | * The role will execute itself locally 118 | * Once the role has completed an Ansible test play will run through several 119 | assert tasks to ensure the role functioned as intended. 120 | 121 | 122 | Documentation impact 123 | ==================== 124 | 125 | The base README.rst file will be updated to explain how the role can be used 126 | as a standalone role. 127 | 128 | 129 | References 130 | ========== 131 | 132 | n/a 133 | -------------------------------------------------------------------------------- /specs/mitaka/irr-pip_install.rst: -------------------------------------------------------------------------------- 1 | IRR - pip install 2 | ################# 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, pip_install 5 | 6 | Split out the pip install role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "pip_install" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-pip_lock_down.rst: -------------------------------------------------------------------------------- 1 | IRR - pip_lock_down 2 | ################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, pip_lock_down 5 | 6 | Split out the pip_lock_down role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "pip_lock_down" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-rabbitmq.rst: -------------------------------------------------------------------------------- 1 | IRR - RabbitMQ server 2 | ##################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, rabbitmq_server 5 | 6 | Split out the rabbitmq_server role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "rabbitmq_server" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-repo_server.rst: -------------------------------------------------------------------------------- 1 | IRR - Repo Server 2 | ################# 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, repo_server 5 | 6 | Split out the repo server role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "repo_server" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-rsyslog_client.rst: -------------------------------------------------------------------------------- 1 | IRR - rsyslog client 2 | #################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, rsyslog_client 5 | 6 | Split out the rsyslog client role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "rsyslog_client" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | 133 | -------------------------------------------------------------------------------- /specs/mitaka/irr-rsyslog_server.rst: -------------------------------------------------------------------------------- 1 | IRR - rsyslog_server 2 | #################### 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, rsyslog_server 5 | 6 | Split out the rsyslog_server role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "rsyslog_server" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/irr-utility.rst: -------------------------------------------------------------------------------- 1 | IRR - Utility 2 | ############# 3 | :date: 2015-11-01 4 | :tags: independent-role-repositories, utility 5 | 6 | Split out the utility role into it's own repository. 7 | 8 | 9 | Problem description 10 | =================== 11 | 12 | Roles are all contained within a single monolithic repository making it 13 | impossible/difficult to consume the OSA roles outside of deploying the 14 | entire stack. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | To ensure that the OSA project is consumable by other stacks using different 21 | architectures, deployment methods, and capabilities the role 22 | "utility" need to be moved from the monolithic stack and 23 | into the it's own role repository. 24 | 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Leave everything the way it is. However doing that will hurt general OSA 30 | adoption. 31 | 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | * No impact to the playbooks. 37 | * The role will be removed from the main stack. The plugins, filters, and 38 | libraries may need to be locally updated. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | While the change will impact the placement of the role it will not impact 45 | upgrade-ability of the stack. The general workflow will need to be updated 46 | to ensure that users are updating roles on upgrade using the Ansible 47 | galaxy interface however generally speaking this is already being done for 48 | the deployer when running the ``bootstrap-ansible.sh`` script. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | Moving the role to an external repository will cause an impact in time to 61 | role resolution however that impact should be minimal. 62 | 63 | 64 | End user impact 65 | --------------- 66 | 67 | n/a 68 | 69 | 70 | Deployer impact 71 | --------------- 72 | 73 | Deployers will need to be aware of the new role locations and how to update 74 | existing roles however this should be minimal considering the tooling for 75 | updating existing roles already exists 76 | 77 | 78 | Developer impact 79 | ---------------- 80 | 81 | Developers will need focus work within the roles which will exist within 82 | separate repositories. 83 | 84 | 85 | Dependencies 86 | ------------ 87 | 88 | n/a 89 | 90 | 91 | Implementation 92 | ============== 93 | 94 | Assignee(s) 95 | ----------- 96 | 97 | Primary assignee: 98 | https://launchpad.net/~kevin-carter (IRC: cloudnull) 99 | 100 | 101 | Work items 102 | ---------- 103 | 104 | * With the role moved, tests will be created within the role via OpenStack CI 105 | to ensure that the role is performing the actions that its supposed to. 106 | * Updated documentation via the "README.rst" will be created to show how the 107 | role can be used standalone. 108 | * Example local inventory will be created to show how the role can be used. 109 | The local only inventory will also be used for testing the role. 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | * The test cases will deploy the role into a regular DSVM image 116 | * The role will execute itself locally 117 | * Once the role has completed an Ansible test play will run through several 118 | assert tasks to ensure the role functioned as intended. 119 | 120 | 121 | Documentation impact 122 | ==================== 123 | 124 | The base README.rst file will be updated to explain how the role can be used 125 | as a standalone role. 126 | 127 | 128 | References 129 | ========== 130 | 131 | n/a 132 | -------------------------------------------------------------------------------- /specs/mitaka/limit-mysql-config-distribution.rst: -------------------------------------------------------------------------------- 1 | Limit Mysql Config Distribution 2 | ############################### 3 | :date: 2015-07-20 4 | :tags: mysql, galera 5 | 6 | 7 | * https://blueprints.launchpad.net/openstack-ansible/+spec/limit-mysql-config-distribution 8 | 9 | 10 | Problem description 11 | =================== 12 | 13 | The distribution of the ``.my.cnf`` file should be limited to API nodes and the 14 | utility container. 15 | 16 | 17 | Proposed change 18 | =============== 19 | 20 | * Add a variable to the galera_client role to limit the distribution of the ``.my.cnf`` 21 | file. 22 | 23 | 24 | Alternatives 25 | ------------ 26 | 27 | Leave everything the way it is. 28 | 29 | 30 | Playbook/Role impact 31 | -------------------- 32 | 33 | This will change the galera_client and "os_*" roles to ensure that the ``.my.cnf`` 34 | files are only distributed to a limited set of hosts. 35 | 36 | 37 | Upgrade impact 38 | -------------- 39 | 40 | n/a 41 | 42 | 43 | Security impact 44 | --------------- 45 | 46 | By limiting the distribution of the ``.my.cnf`` file we should be able to improve general 47 | system security. 48 | 49 | 50 | Performance impact 51 | ------------------ 52 | 53 | n/a 54 | 55 | 56 | End user impact 57 | --------------- 58 | 59 | n/a 60 | 61 | 62 | Deployer impact 63 | --------------- 64 | 65 | n/a 66 | 67 | 68 | Developer impact 69 | ---------------- 70 | 71 | n/a 72 | 73 | 74 | Dependencies 75 | ------------ 76 | 77 | n/a 78 | 79 | Implementation 80 | ============== 81 | 82 | Assignee(s) 83 | ----------- 84 | 85 | Primary assignee: (unassigned) 86 | 87 | 88 | Work items 89 | ---------- 90 | 91 | * Add a variable to the galera_client role to disable the task "Drop local .my.cnf file" 92 | * Change the meta entries where the **galera_client** roles is used use the new variable 93 | where appropriate. 94 | 95 | 96 | Testing 97 | ======= 98 | 99 | This will be tested within every gate check for functionality. 100 | 101 | 102 | Documentation impact 103 | ==================== 104 | 105 | n/a 106 | 107 | 108 | References 109 | ========== 110 | 111 | Bug reference for the change: 112 | * https://bugs.launchpad.net/openstack-ansible/trunk/+bug/1412393 113 | -------------------------------------------------------------------------------- /specs/mitaka/modularize-config.rst: -------------------------------------------------------------------------------- 1 | Modularize configuration files 2 | ############################## 3 | :date: 2015-05-08 00:00 4 | :tags: config, configuration, modularize, modular 5 | 6 | Modularize deployment configuration files to simplify the configuration 7 | process. 8 | 9 | Blueprint: 10 | 11 | https://blueprints.launchpad.net/openstack-ansible/+spec/modularize-config 12 | 13 | Problem description 14 | =================== 15 | 16 | Deployment configuration primarily occurs in a rather monolithic 17 | ``openstack_user_config.yml`` file. Although adding documentation 18 | to this file eases understanding of the various levels and options, 19 | it also increases the size and apparant complexity, especially for 20 | larger deployments. With the addition of swift, the configuration 21 | structure already supports configuration files in the conf.d directory. 22 | Splitting the main monolithic configuration file into smaller files 23 | containing similar components helps overall organization, especially 24 | for larger deployments. 25 | 26 | 27 | Proposed change 28 | =============== 29 | 30 | Similar to swift, modularize similar sections of configuration files, 31 | particularly ``openstack_user_config.yml``, into the following separate 32 | files in the conf.d directory. 33 | 34 | * hosts.yml 35 | 36 | * Includes configuration for target hosts with simple options. For 37 | example, ``repo_hosts`` typically contains only a list of hosts. 38 | In comparison, ``storage_hosts`` requires significantly more options 39 | and should therefore use a separate file. 40 | * Contains the following levels: 41 | 42 | * ``shared-infra_hosts`` 43 | * ``repo_hosts`` 44 | * ``os-infra_hosts`` 45 | * ``identity_hosts`` 46 | * ``network_hosts`` 47 | * ``compute_hosts`` 48 | * ``storage-infra_hosts`` 49 | * ``swift_proxy-hosts`` 50 | * ``log_hosts`` 51 | 52 | .. note:: 53 | For consistency, consider changing ``swift_proxy-hosts`` to 54 | ``swift-proxy_hosts`` and ``swift_hosts`` to ``swift-storage_hosts``. 55 | 56 | * networking.yml 57 | 58 | * Includes host networks, IP address blacklist for inventory generator, 59 | load balancer options, and provider networks. 60 | * Contains the following levels: 61 | 62 | * ``cidr_networks`` 63 | * ``used_ips`` 64 | * ``provider_networks`` (from ``global_overrides``) 65 | * ``internal_lb_vip_address``, ``external_lb_vip_address``, 66 | ``management_bridge``, and ``tunnel_bridge`` (from ``global_overrides``) 67 | 68 | * cinder_storage_hosts.yml 69 | 70 | * Includes configuration for cinder storage target hosts with complex 71 | options for backends. 72 | * Contains the following level: 73 | * ``storage_hosts`` 74 | 75 | * swift_storage_hosts.yml 76 | 77 | * Includes configuration for swift storage target hosts with complex 78 | options. 79 | * Contains the following levels: 80 | * ``swift`` (from ``global_overrides``) 81 | * ``swift_hosts`` 82 | 83 | 84 | Alternatives 85 | ------------ 86 | 87 | Use a different strategy to modularize the configuration files or keep the 88 | existing monolithic structure. 89 | 90 | 91 | Playbook impact 92 | --------------- 93 | 94 | None. 95 | 96 | 97 | Upgrade impact 98 | -------------- 99 | 100 | Optionally, modularize configuration files according to this specification 101 | before or after upgrading to a version that supports it. 102 | 103 | 104 | Security impact 105 | --------------- 106 | 107 | None. 108 | 109 | 110 | Performance impact 111 | ------------------ 112 | 113 | None. 114 | 115 | 116 | End user impact 117 | --------------- 118 | 119 | None. 120 | 121 | 122 | Deployer impact 123 | --------------- 124 | 125 | Simplify the configuration process. 126 | 127 | 128 | Developer impact 129 | ---------------- 130 | 131 | Developers should consider the modular configuration when adding or changing 132 | configuration items. 133 | 134 | 135 | Dependencies 136 | ------------ 137 | 138 | None. 139 | 140 | 141 | Implementation 142 | ============== 143 | 144 | Assignee(s) 145 | ----------- 146 | 147 | Primary assignee: 148 | **Sam-I-Am** 149 | 150 | 151 | Work items 152 | ---------- 153 | 154 | * As necessary, break existing monolithic configuration files into smaller 155 | files that contain groups of similar items and reside in a ``.d`` 156 | directory within the configuration file structure. 157 | 158 | 159 | Testing 160 | ======= 161 | 162 | * Verify changes do not break gating process. The AIO script for gating can 163 | continue to use a monolithic file or modular files with the ``.aio`` 164 | extension. 165 | 166 | 167 | Documentation impact 168 | ==================== 169 | 170 | * Change documentation that references monolithic configuration files to 171 | reference modular configuration files. 172 | 173 | 174 | References 175 | ========== 176 | 177 | None. 178 | -------------------------------------------------------------------------------- /specs/mitaka/role-designate.rst: -------------------------------------------------------------------------------- 1 | Additional Role for Designate Deployment 2 | ######################################## 3 | :date: 2015-12-08 12:00 4 | 5 | :tags: designate, openstack-ansible 6 | 7 | The purpose of this spec is to add support for the OpenStack Designate program 8 | to OpenStack-Ansible. This would allow the deployment of Designate along with 9 | the core OpenStack components using OpenStack-Ansible. 10 | 11 | Blueprint - Designate deployment on OpenStack-Ansible: 12 | 13 | https://blueprints.launchpad.net/openstack-ansible/+spec/role-designate 14 | 15 | 16 | Problem description 17 | =================== 18 | 19 | Presently, while deploying OpenStack using OpenStack-Ansible only the core 20 | OpenStack components get deployed. The deployment of other components 21 | (eg: Designate, Trove) on playbooks is not supported yet and to use other 22 | component's services, they need to be deployed manually. 23 | 24 | 25 | Proposed change 26 | =============== 27 | 28 | The Designate program encompasses a number of projects, but this spec and this 29 | proposed series of changes covers the initial implementation of support for 30 | Designate. This will involve adding support for the Designate server[1] and 31 | Designate client[2]. 32 | 33 | The proposed changes include: 34 | 35 | * Creation of an openstack-ansible-designate repository and Ansible role 36 | to support the deployment of Designate. 37 | * Tests to verify the new Ansible role. 38 | 39 | 40 | Alternatives 41 | ------------ 42 | 43 | None 44 | 45 | 46 | Playbook/Role impact 47 | -------------------- 48 | 49 | Test playbooks will be placed in the openstack-ansible-designate repository 50 | for functional testing purposes, with no initially proposed changes to 51 | OpenStack-Ansible playbooks. 52 | 53 | In the future, once the Designate role is found to be useful and acceptable, a 54 | future spec will address the integration of the Designate role with the main 55 | OpenStack-Ansible repository. 56 | 57 | 58 | Upgrade impact 59 | -------------- 60 | 61 | None 62 | 63 | 64 | Security impact 65 | --------------- 66 | 67 | None. 68 | 69 | 70 | Performance impact 71 | ------------------ 72 | 73 | None. 74 | 75 | 76 | End user impact 77 | --------------- 78 | 79 | Deployers will be able to deploy Designate and use DNSaaS through 80 | OpenStack-Ansible. 81 | 82 | 83 | Deployer impact 84 | --------------- 85 | 86 | When support for the new Designate role is added to the parent repository, new 87 | Designate specific configuration options will be made available. This will 88 | provide an optional role for use in the OpenStack-Ansible toolbox for the 89 | deployers. 90 | 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | As this change is self-contained initially, no impact on other developers is 96 | expected. 97 | 98 | 99 | Dependencies 100 | ------------ 101 | 102 | None 103 | 104 | 105 | Implementation 106 | ============== 107 | 108 | Assignee(s) 109 | ----------- 110 | 111 | Primary assignee: 112 | Swati Sharma ( IRC: Swati) 113 | 114 | Other contributors: 115 | None 116 | 117 | 118 | Work items 119 | ---------- 120 | 121 | #. Ask for the new repository, openstack-ansible-designate, to be created 122 | #. Create the role for Designate support 123 | 124 | * Add support for running designate-api, designate-central, 125 | designate-pool_manager, designate-sink, designate-mdns 126 | * Add support for including python-designateclient, which is the operator 127 | tool for supporting Designate. 128 | 129 | 130 | Testing 131 | ======= 132 | 133 | The usual gate checks can be used for these changes. Also, each individual 134 | commit can be functionally tested individually. 135 | 136 | 137 | Documentation impact 138 | ==================== 139 | 140 | Adding support to the user guide on how to enable Designate support will be 141 | required. 142 | 143 | References 144 | ========== 145 | 146 | * [1] The Designate server: http://git.openstack.org/cgit/openstack/designate/ 147 | * [2] The Designate client: 148 | http://git.openstack.org/cgit/openstack/python-designateclient/ 149 | -------------------------------------------------------------------------------- /specs/mitaka/role-ironic.rst: -------------------------------------------------------------------------------- 1 | Role Ironic 2 | ########### 3 | :date: 2015-10-12 16:30 4 | :tags: ansible, ironic 5 | 6 | The purpose of this spec is to add support for the OpenStack Ironic program 7 | to OpenStack Ansible, allowing the provisioning of compute nodes to bare metal 8 | machines. 9 | 10 | https://blueprints.launchpad.net/openstack-ansible/+spec/role-ironic 11 | 12 | 13 | Problem description 14 | =================== 15 | 16 | Openstack Ansible currently does not support the provisioning of bare metal 17 | compute hosts, but this is functionality that operators and users are likely 18 | to want. 19 | 20 | 21 | Proposed change 22 | =============== 23 | 24 | The Ironic program encompasses a number of projects, but this spec and this 25 | proposed series of changes covers the initial implementation of support for 26 | Ironic. This will involve adding support for the Ironic server[1] and Ironic 27 | client[2]. 28 | 29 | Future specs may be raised to cover the addition of ironic-inspector, or to 30 | support alternate deployment mechanisms, or to support different deployment 31 | drivers. The specific detail for these will be added in future specs. 32 | 33 | This work will build upon the experiences learnt in developing bifrost[3] 34 | (which is a set of ansible playbooks for deploying Ironic standalone, without 35 | other OpenStack components). 36 | 37 | The changes that are proposed as part of this spec are: 38 | 39 | * Creation of an openstack-ansible-ironic repository and ansible role to 40 | support the initial implementation of Ironic. This will allow 41 | openstack-ansible to deploy compute nodes to bare metal hosts, via the nova 42 | API. Initially, support will be limited to bare metal hosts that support 43 | IPMI for power control, and PXE for boot. 44 | 45 | * Tests to verify the new ansible role 46 | 47 | 48 | Alternatives 49 | ------------ 50 | 51 | None, really. Supporting bare metal hosts in OpenStack is done via using 52 | Ironic. 53 | 54 | 55 | Playbook/Role impact 56 | -------------------- 57 | 58 | Test playbooks will be placed in the openstack-ansible-ironic repository 59 | for functional testing purposes, with no initially proposed changes to 60 | openstack-ansible playbooks. 61 | 62 | In the future, once the ironic role is deemed useful and acceptible, a future 63 | spec will address the integration of the ironic role with the main 64 | openstack-ansible repository. 65 | 66 | 67 | Upgrade impact 68 | -------------- 69 | 70 | None 71 | 72 | 73 | Security impact 74 | --------------- 75 | 76 | None. 77 | 78 | 79 | Performance impact 80 | ------------------ 81 | 82 | None. 83 | 84 | 85 | End user impact 86 | --------------- 87 | 88 | Deployers will be able to deploy compute nodes to bare metal hosts. 89 | 90 | 91 | Deployer impact 92 | --------------- 93 | 94 | Ironic specific configuration options will be added to the new repository. 95 | When support for the new Ironic role is added to the parent repository new 96 | config options will be made available, however it is expected that Ironic 97 | support will initially be disabled, requiring that deployers explicitly 98 | enable Ironic support, and to enrol hosts for openstack-ansible to use. 99 | 100 | 101 | Developer impact 102 | ---------------- 103 | 104 | As this change is self-contained initially, no impact on other developers 105 | is expected. 106 | 107 | 108 | Dependencies 109 | ------------ 110 | 111 | None 112 | 113 | 114 | Implementation 115 | ============== 116 | 117 | Assignee(s) 118 | ----------- 119 | 120 | Primary assignee: 121 | Michael Davies - mrda on Launchpad and on IRC 122 | 123 | Other contributors: 124 | None 125 | 126 | 127 | Work items 128 | ---------- 129 | 130 | #. Ask for the new repository, openstack-ansible-ironic, to be created 131 | #. Create the role for ironic support 132 | 133 | * Add support for running ironic-api 134 | * Add support for running ironic-conductor 135 | * Add support for including python-ironicclient, which is the operator 136 | tool for supporting Ironic. 137 | * Add configuration to make configuring bare metal deployment easy 138 | #. Add support for enrolling bare metal nodes 139 | #. Add support for configuring Nova to use Ironic. Initially this will be in 140 | the form of documentation until the parent openstack-ansible repository is 141 | updated to use openstack-ansible-ironic 142 | 143 | 144 | Testing 145 | ======= 146 | 147 | As this is testing deploying to hardware, this is challenging :) 148 | 149 | Develop a test playbook to deploy to hardware that can exercise the new 150 | role. Develop tests that verify the role's behaviour independent of 151 | actually requiring hardware to test the role's functionality. 152 | 153 | 154 | Documentation impact 155 | ==================== 156 | 157 | Adding support to the user guide on how to enable Ironic support will be 158 | required. 159 | 160 | 161 | References 162 | ========== 163 | 164 | * [1] The Ironic server: http://git.openstack.org/cgit/openstack/ironic/ 165 | * [2] The Ironic client: 166 | http://git.openstack.org/cgit/openstack/python-ironicclient/ 167 | * [3] The Bifrost project, standalone Ironic installation: 168 | http://git.openstack.org/cgit/openstack/bifrost 169 | -------------------------------------------------------------------------------- /specs/mitaka/role-zaqar.rst: -------------------------------------------------------------------------------- 1 | Additional Role for Zaqar Deployment 2 | ######################################## 3 | :date: 2016-01-20 11:20 4 | 5 | :tags: zaqar, openstack-ansible 6 | 7 | The purpose of this spec is to add support for the OpenStack Zaqar program 8 | to OpenStack-Ansible. This would allow the deployment of Zaqar along with 9 | the core OpenStack components using OpenStack-Ansible. 10 | 11 | Blueprint - Zaqar deployment on OpenStack-Ansible: 12 | 13 | https://blueprints.launchpad.net/openstack-ansible/+spec/role-zaqar 14 | 15 | 16 | Problem description 17 | =================== 18 | 19 | Presently, while deploying OpenStack using OpenStack-Ansible only the core 20 | OpenStack components get deployed. The deployment of other components 21 | (eg: Zaqar) on playbooks is not supported yet and to use other 22 | component's services, they need to be deployed manually. 23 | 24 | 25 | Proposed change 26 | =============== 27 | 28 | The Zaqar program encompasses a number of projects, but this spec and this 29 | proposed series of changes covers the initial implementation of support for 30 | Zaqar. This will involve adding support for the Zaqar server[1] and 31 | Zaqar client[2]. 32 | 33 | The proposed changes include: 34 | 35 | * Creation of an openstack-ansible-zaqar repository and Ansible role 36 | to support the deployment of Zaqar. 37 | * Tests to verify the new Ansible role. 38 | 39 | 40 | Alternatives 41 | ------------ 42 | 43 | None 44 | 45 | 46 | Playbook/Role impact 47 | -------------------- 48 | 49 | Test playbooks will be placed in the openstack-ansible-zaqar repository 50 | for functional testing purposes, with no initially proposed changes to 51 | OpenStack-Ansible playbooks. 52 | 53 | In the future, once the Zaqar role is found to be useful and acceptable, a 54 | future spec will address the integration of the Zaqar role with the main 55 | OpenStack-Ansible repository. 56 | 57 | 58 | Upgrade impact 59 | -------------- 60 | 61 | None 62 | 63 | 64 | Security impact 65 | --------------- 66 | 67 | None. 68 | 69 | 70 | Performance impact 71 | ------------------ 72 | 73 | None. 74 | 75 | 76 | End user impact 77 | --------------- 78 | 79 | Deployers will be able to deploy Zaqar and use messaging service through 80 | OpenStack-Ansible. 81 | 82 | 83 | Deployer impact 84 | --------------- 85 | 86 | When support for the new Zaqar role is added to the parent repository, new 87 | Zaqar specific configuration options will be made available. This will 88 | provide an optional role for use in the OpenStack-Ansible toolbox for the 89 | deployers. 90 | 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | As this change is self-contained initially, no impact on other developers is 96 | expected. 97 | 98 | 99 | Dependencies 100 | ------------ 101 | 102 | None 103 | 104 | 105 | Implementation 106 | ============== 107 | 108 | Assignee(s) 109 | ----------- 110 | 111 | Primary assignee: 112 | Fei Long Wang ( IRC: flwang) 113 | 114 | Other contributors: 115 | None 116 | 117 | 118 | Work items 119 | ---------- 120 | 121 | #. Ask for the new repository, openstack-ansible-zaqar, to be created 122 | #. Create the role for Zaqar support 123 | 124 | * Add support for running zaqar-sever 125 | * Add support for including python-zaqarclient, which is the operator 126 | tool for supporting Zaqar. 127 | 128 | 129 | Testing 130 | ======= 131 | 132 | The usual gate checks can be used for these changes. Also, each individual 133 | commit can be functionally tested individually. 134 | 135 | 136 | Documentation impact 137 | ==================== 138 | 139 | Adding support to the user guide on how to enable Zaqar support will be 140 | required. 141 | 142 | References 143 | ========== 144 | 145 | * [1] The Zaqar server: http://git.openstack.org/cgit/openstack/zaqar/ 146 | * [2] The Zaqar client: 147 | http://git.openstack.org/cgit/openstack/python-zaqarclient/ 148 | -------------------------------------------------------------------------------- /specs/newton/add-support-for-systemd.rst: -------------------------------------------------------------------------------- 1 | Add support for SystemD 2 | ####################### 3 | :date: 2015-07-14 4 | :tags: systemd 5 | 6 | The purpose of this spec is to adjust our current upstart only init process to allow 7 | us to leverage SystemD. While SystemD is not present within the Ubuntu 14.04 LTS 8 | OS that we use today it is something that is coming within the next LTS release and 9 | something that we should begin implementing as an alternative to upstart. 10 | 11 | https://blueprints.launchpad.net/openstack-ansible/+spec/add-support-for-systemd 12 | 13 | 14 | Problem description 15 | =================== 16 | 17 | OSAD presently only support Ubuntu 14.04 LTS using upstart. In the next LTS upstart 18 | will no longer be an option. For this reason I believe its time to begin implementing 19 | SystemD support within the OpenStack roles. 20 | 21 | 22 | Proposed change 23 | =============== 24 | 25 | The basic change is more of a structural one. Essentially adding SystemD support will 26 | be a new template and will follow much of the same pattern found within our current 27 | upstart process. 28 | 29 | 30 | Alternatives 31 | ------------ 32 | 33 | n/a - SystemD is coming and the sooner we have an oppinion on it the better off we will 34 | be. 35 | 36 | 37 | Playbook impact 38 | --------------- 39 | 40 | The playbooks will not be impacted however the roles will have a new SystemD template and 41 | set of tasks that will enable the ability for the system to use SystemD. 42 | 43 | 44 | Upgrade impact 45 | -------------- 46 | 47 | Adding in SystemD support will ensure that deployers are able to upgrade to future OS's 48 | that only have SystemD available. 49 | 50 | 51 | Security impact 52 | --------------- 53 | 54 | n/a 55 | 56 | 57 | Performance impact 58 | ------------------ 59 | 60 | n/a 61 | 62 | 63 | End user impact 64 | --------------- 65 | 66 | n/a 67 | 68 | 69 | Deployer impact 70 | --------------- 71 | 72 | n/a 73 | 74 | 75 | Developer impact 76 | ---------------- 77 | 78 | n/a 79 | 80 | 81 | Dependencies 82 | ------------ 83 | 84 | n/a 85 | 86 | Implementation 87 | ============== 88 | 89 | Assignee(s) 90 | ----------- 91 | 92 | Primary assignee: 93 | https://launchpad.net/~kevin-carter ``cloudnull`` 94 | 95 | 96 | Work items 97 | ---------- 98 | 99 | * Add SystemD templates to all OpenStack roles. 100 | * Add SystemD tasks to all OpenStack roles. 101 | 102 | 103 | Testing 104 | ======= 105 | 106 | Being that we do not gate on anything that uses SystemD at the moment this 107 | will be a set of changes that are being implemented to future proof OSAD. 108 | This change will also allow us to being looking into "other" OS support 109 | which will likely carry with it an implementation of SystemD, such as Debian 110 | "Jessie". 111 | 112 | 113 | Documentation impact 114 | ==================== 115 | 116 | n/a 117 | 118 | 119 | References 120 | ========== 121 | 122 | n/a 123 | -------------------------------------------------------------------------------- /specs/newton/ipv6-project-support.rst: -------------------------------------------------------------------------------- 1 | IPv6 Project Support 2 | #################### 3 | :date: 2015-09-09 22:00 4 | :tags: ipv6 5 | 6 | ospenstack-ansible should support IPv6 for project networks. 7 | To that effect we should make sure that the necessary components and 8 | configurations are installed so that openstack can expose and route IPv6 for 9 | project networks. 10 | 11 | 12 | Problem description 13 | =================== 14 | 15 | Neutron currently (in kilo) has the ability to manage and route IPv6 data. 16 | OpenStack Ansible currently has a few holes in IPv6 support on Neutron tenant 17 | networks (not installing the radvd package in the neutron-agents container 18 | for instance). 19 | 20 | 21 | Proposed change 22 | =============== 23 | 24 | Add a test case for proving IPv6 access on project networks works as expected 25 | 26 | 27 | Alternatives 28 | ------------ 29 | 30 | Don't explicitly support IPv6 31 | 32 | 33 | Playbook impact 34 | --------------- 35 | 36 | As the primary change is adding a test case this is somewhat open ended. 37 | As the support for IPv6 via Neutron is already mostly there this should be 38 | low impact, will likely only be adding the missing package and test support. 39 | 40 | 41 | Upgrade impact 42 | -------------- 43 | 44 | None 45 | 46 | 47 | Security impact 48 | --------------- 49 | 50 | Low, at the moment the only known change is to ensure that radvd is installed 51 | so that Neutron can configure/control it. 52 | 53 | 54 | Performance impact 55 | ------------------ 56 | 57 | None 58 | 59 | 60 | End user impact 61 | --------------- 62 | 63 | The end user will be able to configure IPv6 in the project networks. 64 | 65 | 66 | Deployer impact 67 | --------------- 68 | 69 | None 70 | 71 | 72 | Developer impact 73 | ---------------- 74 | 75 | None once spec is implemented. 76 | 77 | 78 | Dependencies 79 | ------------ 80 | 81 | None 82 | 83 | 84 | Implementation 85 | ============== 86 | 87 | Assignee(s) 88 | ----------- 89 | 90 | Primary assignee: 91 | prometheanfire 92 | 93 | 94 | Work items 95 | ---------- 96 | 97 | * add test support for IPv6 in OpenStack Ansible 98 | 99 | * This would be via configuring a RFC4193 network and connecting from the 100 | neutron radvd namespace to the instance. 101 | 102 | * It would also test unicast routing between neutron networks using RFC4193. 103 | 104 | * ensure that tests pass 105 | 106 | 107 | Testing 108 | ======= 109 | 110 | Ensure that the instance gets an IP in a certian address space and can ping 111 | the gateway. 112 | 113 | Test for routability, ping between instances on two neutron network segments. 114 | 115 | 116 | Documentation impact 117 | ==================== 118 | 119 | Should be minimal 120 | 121 | 122 | References 123 | ========== 124 | 125 | https://bugs.launchpad.net/openstack-ansible/+bug/1492080 126 | -------------------------------------------------------------------------------- /specs/newton/only-install-venvs.rst: -------------------------------------------------------------------------------- 1 | Only support venv installs 2 | ########################## 3 | :date: 2016-06-27 13:30 4 | :tags: python, venv, deployment 5 | 6 | The purpose of this spec is remove support for installing OpenStack services 7 | and dependent pip packages outside of Python virtual environments. 8 | 9 | * https://blueprints.launchpad.net/openstack-ansible/+spec/only-install-venvs 10 | 11 | Problem description 12 | =================== 13 | 14 | Conflicts between system packages and globally installed Python pip packages 15 | can lead to broken services and strange behavior. The default installation 16 | option of OpenStack services since the Liberty release has been to use virtual 17 | environments to isolate each individual service. This should be the only 18 | supported option going forward. 19 | 20 | 21 | Proposed change 22 | =============== 23 | 24 | Each role will be updated to remove tasks and variables related to allowing the 25 | option of installing pip packages outside of a virtual environment. The tasks 26 | which currently handle installing virtual environments will also be updated to 27 | ensure that they are idempotent and can recover properly from an interruption 28 | in a previous run of the same role. 29 | 30 | 31 | Alternatives 32 | ------------ 33 | 34 | Leave the roles as they are. Deployment of OpenStack services would continue 35 | being supported through either virtual environments or installed as global 36 | system Python packages. 37 | 38 | 39 | Playbook/Role impact 40 | -------------------- 41 | 42 | See `Proposed change`_. 43 | 44 | 45 | Upgrade impact 46 | -------------- 47 | 48 | Installing services to virtual environments has been the default since the 49 | Liberty release. If any Mitaka deployments are still configured to not install 50 | services to virtual environments, they will be forced to beginning in the 51 | Newton release. 52 | 53 | 54 | Security impact 55 | --------------- 56 | 57 | N/A. 58 | 59 | 60 | Performance impact 61 | ------------------ 62 | 63 | Tasks which are currently being skipped will be removed, which could slightly 64 | decrease role run times. 65 | 66 | 67 | End user impact 68 | --------------- 69 | 70 | N/A. 71 | 72 | 73 | Deployer impact 74 | --------------- 75 | 76 | The ``*_venv_enabled`` variables will no longer exist and will have no effect 77 | if set by a deployer. 78 | 79 | 80 | Developer impact 81 | ---------------- 82 | 83 | N/A. 84 | 85 | 86 | Dependencies 87 | ------------ 88 | 89 | N/A. 90 | 91 | 92 | Implementation 93 | ============== 94 | 95 | Assignee(s) 96 | ----------- 97 | 98 | Primary assignee: 99 | https://launchpad.net/~jimmy-mccrory (jmccrory) 100 | 101 | 102 | Work items 103 | ---------- 104 | 105 | * Remove tasks related to installation of pip packages outside of a venv from 106 | each role 107 | * Remove variables which currently toggle installation of pip packages to a 108 | venv from each role 109 | * Update each role to make tasks which create and install packages to a venv 110 | more resilient and idempotent 111 | 112 | 113 | Testing 114 | ======= 115 | 116 | Both integrated and independent role gate testing are already only installing 117 | services to virtual environments. 118 | 119 | 120 | Documentation impact 121 | ==================== 122 | 123 | Should be minimal. 124 | 125 | 126 | References 127 | ========== 128 | 129 | N/A. 130 | -------------------------------------------------------------------------------- /specs/newton/osa-install-guide-overhaul.rst: -------------------------------------------------------------------------------- 1 | Overhaul of the current OpenStack-Ansible Installation Guide 2 | ############################################################ 3 | :date: 2016-05-31 00:00 4 | :tags: docs 5 | 6 | Blueprint: Overhaul of the current OpenStack-Ansible Installation Guide 7 | * https://blueprints.launchpad.net/openstack-ansible/+spec/install-guide 8 | * https://blueprints.launchpad.net/openstack-ansible/+spec/osa-install-guide-overhaul 9 | 10 | After the 2016 Austin summit, there was a discussion and a consensus 11 | surrounding the current state of the OpenStack-Ansible Installation Guide. 12 | 13 | .. note:: 14 | 15 | A `blueprint and spec `_ were previously created 16 | with the intention of improving the documentation that pushed the summit discussion. 17 | 18 | Currently, the OpenStack-Ansible install guide has minimal installation 19 | information, and a lot of configuration information. This specification proposes 20 | a more formalized plan to separate this information and streamline the 21 | installation guide to make it easier and quicker to install OpenStack. 22 | 23 | Problem description 24 | =================== 25 | 26 | The OpenStack-Ansible Installation Guide contains information that does not 27 | necessarily pertain to that of an installation guide structure. It has 28 | accumulated a lot of configuration information and reference information that 29 | reduces the user's focus and simplicity to install OpenStack. 30 | 31 | The current installation guide also does not follow the openstack-manuals 32 | documentation conventions. 33 | 34 | Proposed change 35 | =============== 36 | 37 | The main focus of the installation guide is reorganising and developing 38 | content so a deployer makes very few decisions and minimal configuration 39 | to deploy an OpenStack test environment and production environment. 40 | 41 | The proposed changes are: 42 | 43 | * Clearly define reference architecture and develop use case configuration 44 | examples in an appendix. 45 | 46 | * Removal of the configuration information from the current installation guide 47 | and including it in the OpenStack-Ansible role documentation. 48 | 49 | * Migrate operations content temporarily to openstack-ansible-ops repo 50 | until an operations guide can be produced. 51 | 52 | * Restructure the guide to include basic deployment configuration. 53 | 54 | * Appendices that include configuration file examples, neutron plugins, 55 | cinder options and additional resources relevant to an OpenStack-Ansible 56 | installation. 57 | 58 | * Include links to role based documentation from the Installation Guide. 59 | 60 | Alternatives 61 | ------------ 62 | 63 | * Leaving the installation guide as is, and migrating only the configuration 64 | information to the developer docs. 65 | 66 | * Consider revising the installation guide to meet criteria in 67 | `project-specific installation guide 68 | `_ 69 | and publish to docs.openstack.org 70 | 71 | Playbook/Role impact 72 | -------------------- 73 | 74 | N/A 75 | 76 | Upgrade impact 77 | -------------- 78 | 79 | N/A 80 | 81 | Security impact 82 | --------------- 83 | 84 | N/A 85 | 86 | Performance impact 87 | ------------------ 88 | 89 | N/A 90 | 91 | End user impact 92 | --------------- 93 | 94 | These changes will hopefully improve the end user experience, by providing 95 | a more structured and better flow of information to install OpenStack. 96 | 97 | Deployer impact 98 | --------------- 99 | 100 | N/A 101 | 102 | 103 | Developer impact 104 | ---------------- 105 | 106 | Move existing content over to the roles first, then developers must 107 | submit any new documentation to the role repositories. 108 | 109 | Dependencies 110 | ------------ 111 | 112 | N/A 113 | 114 | Implementation 115 | ============== 116 | 117 | Assignee(s) 118 | ----------- 119 | 120 | Primary assignee: 121 | Alexandra Settle (asettle) 122 | 123 | Other contributors: 124 | Darren Chan (darrenc), Jesse Pretorius (odyssey4me), 125 | Travis Truman (automagically), Major Hayden (mhayden) 126 | 127 | Work items 128 | ---------- 129 | 130 | - Clarify and obtain consensus on the content structure 131 | - Gather information from SMEs as needed 132 | - Create a draft directory for installation guide changes 133 | - Create a work items list and allocate resources 134 | - Ensure documentation meets openstack-manuals writing conventions 135 | - Test draft documentation before publication 136 | 137 | Testing 138 | ======= 139 | 140 | The testing will be conducted by the community once a draft is available. 141 | OpenStack-Ansible users will be asked to follow the new installation guide 142 | to install OpenStack and evaluate if the information provided is accurate, 143 | clear, and concise. 144 | 145 | Documentation impact 146 | ==================== 147 | 148 | This is a documentation change, N/A. 149 | 150 | References 151 | ========== 152 | 153 | * `Design Summit discussion 154 | `_ 155 | 156 | * `ToC planning 157 | `_ 158 | -------------------------------------------------------------------------------- /specs/newton/role-tacker.rst: -------------------------------------------------------------------------------- 1 | Additional Role for Tacker Service Deployment 2 | ############################################## 3 | :date: 2016-10-19 12:30 4 | 5 | :tags: tacker, openstack-ansible 6 | 7 | The purpose of this spec is to add support for the OpenStack Tacker service 8 | to OpenStack-Ansible. This would allow the deployment of Tacker along with 9 | the core OpenStack components using OpenStack-Ansible. 10 | 11 | Blueprint - Tacker deployment on OpenStack-Ansible: 12 | 13 | https://blueprints.launchpad.net/openstack-ansible/+spec/role-tacker 14 | 15 | 16 | Problem description 17 | =================== 18 | 19 | Presently, while deploying OpenStack using OpenStack-Ansible only the core 20 | OpenStack components get deployed. The deployment of other components 21 | (eg: Tacker) on playbooks is not supported yet and to use other 22 | component's services, they need to be deployed manually. 23 | 24 | 25 | Proposed change 26 | =============== 27 | 28 | This change involves adding support for the Tacker server, Tacker client, 29 | and Tacker Horizon dashboard interface. 30 | 31 | The proposed changes include: 32 | 33 | * Creation of an openstack-ansible-tacker repository and Ansible role 34 | to support the deployment of Tacker. 35 | * Tests to verify the new Ansible role. 36 | * Deployment of Tacker client 37 | * Deployment of Tacker Horizon 38 | 39 | 40 | Alternatives 41 | ------------ 42 | 43 | None 44 | 45 | 46 | Playbook/Role impact 47 | -------------------- 48 | 49 | Test playbooks will be placed in the openstack-ansible-tacker repository 50 | for functional testing purposes, with no initially proposed changes to 51 | OpenStack-Ansible playbooks. 52 | 53 | In the future, once the Tacker role has reached a muture state, a future 54 | spec will address the integration of the Tacker role with the main 55 | OpenStack-Ansible repository. 56 | 57 | 58 | Upgrade impact 59 | -------------- 60 | 61 | None 62 | 63 | 64 | Security impact 65 | --------------- 66 | 67 | None. 68 | 69 | 70 | Performance impact 71 | ------------------ 72 | 73 | None. 74 | 75 | 76 | End user impact 77 | --------------- 78 | 79 | Deployers will be able to deploy Tacker service through OpenStack-Ansible 80 | framework for VNF management and orchestration purposes. 81 | 82 | 83 | Deployer impact 84 | --------------- 85 | 86 | When support for the new Tacker role is added to the parent repository, new 87 | Tacker specific configuration options will be made available. This will 88 | provide an optional role for use in the OpenStack-Ansible toolbox for the 89 | deployers. 90 | 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | As this change is self-contained initially, no impact on other developers is 96 | expected. 97 | 98 | 99 | Dependencies 100 | ------------ 101 | 102 | None 103 | 104 | 105 | Implementation 106 | ============== 107 | 108 | Assignee(s) 109 | ----------- 110 | 111 | Primary assignee: 112 | Jeff Rametta ( IRC: jcrst) 113 | 114 | Other contributors: 115 | None 116 | 117 | 118 | Work items 119 | ---------- 120 | 121 | #. Ask for the new repository, openstack-ansible-tacker, to be created 122 | #. Create the role for Tacker support 123 | 124 | * Add support for running tacker-sever 125 | * Add support for python tacker client 126 | * Add support for Tacker Horizon dashboard 127 | * Add documentation and install guide for the role 128 | 129 | 130 | Testing 131 | ======= 132 | 133 | The usual gate checks can be used for these changes. Also, each individual 134 | commit can be functionally tested individually. 135 | 136 | 137 | Documentation impact 138 | ==================== 139 | 140 | Adding support to the user guide on how to enable Tacker support will be 141 | required. 142 | 143 | References 144 | ========== 145 | 146 | * Tacker server: https://git.openstack.org/cgit/openstack/tacker/ 147 | * Tacker client: https://git.openstack.org/cgit/openstack/python-tackerclient 148 | * Tacker Horizon: https://git.openstack.org/cgit/openstack/tacker-horizon 149 | 150 | 151 | -------------------------------------------------------------------------------- /specs/newton/standalone-swift.rst: -------------------------------------------------------------------------------- 1 | standalone-swift 2 | ################################# 3 | :date: 2015-07-07 22:00 4 | :tags: swift, aio, tests 5 | 6 | This spec exists to allow for testing a diferent deployment methodogy, namely 7 | swift deployments. The problem is the openstack_user_config.yml.aio file 8 | defines hosts that are not needed for an AIO deployment. 9 | 10 | * https://blueprints.launchpad.net/openstack-ansible/+spec/standalone-swift 11 | 12 | 13 | Problem description 14 | =================== 15 | 16 | Deploying aio for testing deploys all Openstack services only swift is desired. 17 | We are not testing this deployment type. 18 | 19 | 20 | Proposed change 21 | =============== 22 | 23 | * add openstack_user_config.yml.aio.swift for swift only deployments. 24 | 25 | * add/modify the deployment scripts to add a switch for swift only deployments. 26 | 27 | * modify tests to allow for swift only deployments. 28 | 29 | 30 | Alternatives 31 | ------------ 32 | 33 | N/A 34 | 35 | 36 | Playbook impact 37 | --------------- 38 | 39 | Minimal to no impact to the actual playbooks. 40 | 41 | 42 | Upgrade impact 43 | -------------- 44 | 45 | N/A 46 | 47 | 48 | Security impact 49 | --------------- 50 | 51 | N/A 52 | 53 | 54 | Performance impact 55 | ------------------ 56 | 57 | N/A 58 | 59 | 60 | End user impact 61 | --------------- 62 | 63 | Allows the end user to use the openstack_user_config.yml.aio.swift file as a 64 | template to base their own swift deployments. 65 | 66 | 67 | Deployer impact 68 | --------------- 69 | 70 | The playbooks would remain unchanged, only deployers using the scripts may 71 | need to change, this does not alter default behavior. 72 | 73 | 74 | Developer impact 75 | ---------------- 76 | 77 | This would allow testing of standalone swift deployments. 78 | 79 | 80 | Dependencies 81 | ------------ 82 | 83 | N/A 84 | 85 | 86 | Implementation 87 | ============== 88 | 89 | Assignee(s) 90 | ----------- 91 | 92 | Primary assignee: 93 | prometheanfire 94 | 95 | 96 | Work items 97 | ---------- 98 | 99 | * create aio file 100 | 101 | * add/alter scripts to allow for standalone swift testing (tempest changes) 102 | 103 | * add test to project_config 104 | 105 | * enable test in openstack-ansible 106 | 107 | 108 | Testing 109 | ======= 110 | 111 | This will add a test/vote to openstack-ansible 112 | 113 | 114 | Documentation impact 115 | ==================== 116 | 117 | Possibly pointing out the openstack_user_config.yml.aio.swift file as a 118 | template for larger deployments and documenting the new environment variables. 119 | 120 | 121 | References 122 | ========== 123 | 124 | N/A 125 | 126 | -------------------------------------------------------------------------------- /specs/newton/support-multiple-rabbitmq-clusters.rst: -------------------------------------------------------------------------------- 1 | Add support for multiple RabbitMQ clusters 2 | ########################################## 3 | :date: 2016-07-11 21:00 4 | :tags: rabbitmq, messaging, notifications, scalability 5 | 6 | Larger deployments may wish to provision multiple RabbitMQ 7 | clusters such that each cluster is deployed on its own set of hosts. 8 | 9 | Such functionality would allow a deployer to configure one or 10 | more additional component and container skeletons to add inventory 11 | groups to be used for the deployment of additional clusters. 12 | 13 | Problem description 14 | =================== 15 | 16 | The current playbook and roles assume a single inventory group: 17 | ``rabbitmq_all`` that is deployed on the ``shared-infra_hosts`` 18 | infrastructure. The inventory group name is hardcoded throughout 19 | and the playbook makes the assumption that only one cluster will 20 | ever be needed. 21 | 22 | 23 | Proposed change 24 | =============== 25 | 26 | * Modify the rabbitmq_server role to be more configurable with 27 | respect to the inventory group(s) that it operates upon 28 | * Modify the rabbitmq-install play to be more configurable with 29 | respect to the inventory group it operates upon 30 | 31 | Alternatives 32 | ------------ 33 | 34 | I'm not aware of alternative ways for the project to address this need. 35 | 36 | 37 | Playbook/Role impact 38 | -------------------- 39 | 40 | Initial impacts will be to the rabbitmq_server role and the rabbitmq-install 41 | play. However, I expect that additional impacts may exist within other roles 42 | such that they would need to change to be more configurable with respect to 43 | the inventory group they expect to use for rabbit hosts, or the variables they 44 | use to identify which rabbit hosts they should connect to. 45 | 46 | 47 | Upgrade impact 48 | -------------- 49 | 50 | Unclear on how upgrades would be impacted. To my knowledge, custom inventory 51 | extensions are not currently handled in the upgrade automation. 52 | 53 | 54 | Security impact 55 | --------------- 56 | 57 | No unique security impacts. The existing RabbitMQ security posture will 58 | be maintained, though additional secrets may be required. 59 | 60 | 61 | Performance impact 62 | ------------------ 63 | 64 | None expected/anticipated. 65 | 66 | 67 | End user impact 68 | --------------- 69 | 70 | End users will have increased flexibility in defining their deployment 71 | architecture. 72 | 73 | 74 | Deployer impact 75 | --------------- 76 | 77 | The goal is for the deployer impact to be negligible due to the opt-in 78 | nature of the changes discussed. 79 | 80 | 81 | Developer impact 82 | ---------------- 83 | 84 | This change will add some additional complexity for developers, but it 85 | should be minimal. 86 | 87 | 88 | Dependencies 89 | ------------ 90 | 91 | None 92 | 93 | 94 | Implementation 95 | ============== 96 | 97 | Assignee(s) 98 | ----------- 99 | 100 | Primary assignee: 101 | travis-truman (automagically) 102 | 103 | Work items 104 | ---------- 105 | 106 | * rabbitmq_server role modifications for inventory group configurability 107 | * rabbitmq-install play modifications for inventory group configurability 108 | * Documentation explaining how to create additional RabbitMQ cluster groups 109 | * Other role modifications to support cluster connectivity configurability 110 | 111 | 112 | Testing 113 | ======= 114 | 115 | This should be able to be tested within the rabbitmq_server role functional 116 | tests given some changes to the test inventory. 117 | 118 | Documentation impact 119 | ==================== 120 | 121 | An appendix should be added that explains to deployers how to configure 122 | their environment for RabbitMQ multiple cluster support. 123 | 124 | References 125 | ========== 126 | 127 | None 128 | -------------------------------------------------------------------------------- /specs/newton/xen-virt-driver.rst: -------------------------------------------------------------------------------- 1 | Support Xen Virt Driver 2 | ####################### 3 | :date: 2016-06-03 11:17 4 | :tags: ansible, xen 5 | 6 | The purpose of this spec is to add support for the Xen Hypervisor to 7 | OpenStack-Ansible. This will allow the use of Xen as an option on OpenStack 8 | compute nodes. 9 | 10 | https://blueprints.launchpad.net/openstack-ansible/+spec/xen-virt-driver 11 | 12 | 13 | Problem description 14 | =================== 15 | 16 | Xen is a tested and supported hypervisor in OpenStack. It is used in some of 17 | the largest public clouds today and would make a good addition to 18 | OpenStack-Ansible. Support for Xen exists in the OpenStack Libvirt Driver today 19 | so implementation should not be difficult. 20 | 21 | Proposed change 22 | =============== 23 | 24 | The primary change is to add support in OpenStack-Ansible for Xen on CentOS 7, 25 | Ubuntu 16.04, and Ubuntu 14.04 (by using UCA repos). The necessary 26 | `changes `_ 27 | for Xen to work with OpenStack are in Xen 4.5.1 and Libvirt 1.2.15. This 28 | blueprint covers deploying the nova-xen compute driver with the standard 29 | networking agents that OpenStack-Ansible supports. 30 | 31 | The proposed changes include: 32 | 33 | * Add support for installing/configuring the Xen virt driver and dependencies 34 | * Documentation for how to configure a compute to run the Xen virt driver 35 | * Tests to verify changes to the os_nova role required for Xen support 36 | 37 | 38 | Alternatives 39 | ------------ 40 | 41 | * Maintain independent Xen Ansible playbooks - This requires reinvention 42 | of base function and does not meet operator requirements. 43 | 44 | 45 | Playbook/Role impact 46 | -------------------- 47 | 48 | See the Work Items for the playback/role impact. References to nova_virt_type 49 | will be updated to reflect a 'xen' option. 50 | 51 | 52 | Upgrade impact 53 | -------------- 54 | 55 | None. The xen driver is new for OpenStack-Ansible, and as such has no upgrade 56 | impact. 57 | 58 | 59 | Security impact 60 | --------------- 61 | 62 | None. 63 | 64 | 65 | Performance impact 66 | ------------------ 67 | 68 | None. 69 | 70 | 71 | End user impact 72 | --------------- 73 | 74 | End users will be able to deploy compute nodes using the Xen virt driver. 75 | 76 | 77 | Deployer impact 78 | --------------- 79 | 80 | Xen specific configuration options will be added to the 81 | openstack-ansible-os_nova role. 82 | 83 | When support for Xen as a virt driver is added these config options will be 84 | available for use; however it is expected that Xen support will be disabled by 85 | default, requiring that deployers explicitly enable Xen support and configure 86 | hosts for OpenStack-Ansible to use. 87 | 88 | Documentation of these new configuration items will be provided and a set of 89 | defaults will also be provided. The Xen virt driver has limited its 90 | configuration to be minimal, so the operators should only have a few required 91 | options to set when Xen is selected as the virt driver. 92 | 93 | 94 | Developer impact 95 | ---------------- 96 | 97 | The existing development team will be asked for reviews and approvals of the 98 | change sets. The Xen driver team will do the necessary implementation and 99 | support of this function. 100 | 101 | 102 | Dependencies 103 | ------------ 104 | 105 | * `Multi-platform Host OS Enablement `_ 106 | - Needed to support Ubuntu 16.04 and CentOS 7 107 | 108 | 109 | Implementation 110 | ============== 111 | 112 | Assignee(s) 113 | ----------- 114 | 115 | Primary assignee: 116 | Antony Messerli - antonym on IRC and Launchpad 117 | 118 | Other contributors: 119 | 120 | Work items 121 | ---------- 122 | 123 | Multiple changes would be needed: 124 | 125 | * Add 'xen' to the 'nova_virt_types' structure alongside the necessary 126 | variable requirements for driver configuration, matching the other compute 127 | types. 128 | 129 | * As required, add nova.conf templating for xen-specific configuration 130 | options that is conditionally included when nova_virt_type is 'xen'. 131 | 132 | * Create a new nova_compute_xen.yml in the openstack-ansible-os_nova 133 | project. This will contain the tasks needed to ensure the xen driver 134 | is installed and configured on the system. 135 | 136 | * Update the existing nova_compute.yml to include the nova_compute_xen.yml 137 | and add the appropriate conditionals for that import. 138 | 139 | * Create a new nova_compute_xen_install.yml, which will be included by 140 | nova_compute_xen.yml. It will ensure that the necessary configuration 141 | and dependencies for running the Xen driver are in place. 142 | 143 | * Update documentation and comments indicating the new Xen nova_virt_type 144 | and how to configure OpenStack-Ansible for the Xen driver. 145 | 146 | * Automated unit test (see Testing) 147 | 148 | 149 | Testing 150 | ======= 151 | 152 | A new test-install-nova-xen.yml will be created for validating the new xen 153 | playbooks within the openstack-ansible-os_nova project. 154 | 155 | 156 | Documentation impact 157 | ==================== 158 | 159 | Documentation covering how to enable and configure Xen support will be 160 | added to the user guide. 161 | 162 | 163 | References 164 | ========== 165 | 166 | Xen and OpenStack required versions: ``_ 167 | 168 | Multi-platform Host OS Enablement: ``_ 169 | -------------------------------------------------------------------------------- /specs/ocata/.keep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/openstack/openstack-ansible-specs/31668fd59c527930dd262dafc2b0af8f021d7c98/specs/ocata/.keep -------------------------------------------------------------------------------- /specs/ocata/create-ops-guide.rst: -------------------------------------------------------------------------------- 1 | Create Operations Guide 2 | ####################### 3 | :date: 2016-12-19 17:00 4 | :tags: docs, ops 5 | 6 | Blueprint: Create OpenStack-Ansible Operations Guide 7 | * https://blueprints.launchpad.net/openstack-ansible/+spec/create-ops-guide 8 | 9 | This specification proposes the development of an OpenStack-Ansible Operations 10 | Guide for the Ocata release. 11 | 12 | Problem description 13 | =================== 14 | 15 | During the Newton development cycle, the Installation Guide was revised which 16 | focused on providing a method for installing OpenStack for a test 17 | environment and production environment. As noted in the 18 | `Installation Guide spec `_, 19 | the operations content did not belong in the Installation Guide as it 20 | reduced the user's focus to install OpenStack, and was temporarily relocated to 21 | the following Developer Documentation pages: 22 | 23 | * http://docs.openstack.org/developer/openstack-ansible/developer-docs/ops.html 24 | * http://docs.openstack.org/developer/openstack-ansible/developer-docs/extending.html 25 | 26 | There is a need to develop a standalone Openstack-Ansible operations 27 | guide that will address an operator's need for information on managing and 28 | configuring an OpenStack cloud using OpenStack-Ansible. 29 | 30 | Proposed change 31 | =============== 32 | 33 | The main focus of the operations guide is to re-organise the current content and 34 | develop new content so an OpenStack operator can easily search for information 35 | on maintaining their environment, troubleshooting, and resolving issues. 36 | 37 | The proposed changes are: 38 | 39 | * A new ToC with input from developers and operations: https://review.openstack.org/#/c/409854/ 40 | * Removal of duplicated content from the OpenStack manuals operations guide 41 | (so that this guide focuses primarily upon OpenStack-Ansible operations). 42 | * Structuring the guide in a 'runbook' format for the following reasons: 43 | 44 | #. Ensuring the guide includes lower-level how-to's for anyone starting to 45 | operate their own cloud. 46 | 47 | #. Ensuring the guide includes higher-level troubleshooting information for 48 | more experienced operator. 49 | 50 | #. It is structured to make it easy for operators to find the information 51 | they are looking for. 52 | 53 | * Review and update current operations content to follow the 54 | openstack-manuals documentation conventions. 55 | 56 | Alternatives 57 | ------------ 58 | 59 | * The current operations content and any future content will remain in the 60 | Developer Documentation. 61 | 62 | Playbook/Role impact 63 | -------------------- 64 | 65 | N/A 66 | 67 | 68 | Upgrade impact 69 | -------------- 70 | 71 | N/A 72 | 73 | 74 | Security impact 75 | --------------- 76 | 77 | N/A 78 | 79 | 80 | Performance impact 81 | ------------------ 82 | 83 | N/A 84 | 85 | 86 | End user impact 87 | --------------- 88 | 89 | These changes will improve the end user experience, by providing 90 | a more structured and better flow of information to operate your OpenStack 91 | cloud. 92 | 93 | Deployer impact 94 | --------------- 95 | 96 | N/A 97 | 98 | 99 | Developer impact 100 | ---------------- 101 | 102 | N/A 103 | 104 | 105 | Dependencies 106 | ------------ 107 | 108 | N/A 109 | 110 | 111 | Implementation 112 | ============== 113 | 114 | Assignee(s) 115 | ----------- 116 | 117 | Primary assignee: 118 | Alexandra Settle (asettle) 119 | 120 | Other contributors: 121 | Andy McCrae (andymccr), OpenStack-Ansible PTL 122 | Darren Chan (darrenc) 123 | Robb Romans (rromans) 124 | 125 | Work items 126 | ---------- 127 | 128 | - Clarify and obtain consensus on the content structure 129 | - Gather information from SMEs as needed 130 | - Create a draft directory for operations guide changes 131 | - Create a work items list and allocate resources 132 | - Ensure documentation meets openstack-manuals writing conventions 133 | - Test draft documentation before publication 134 | 135 | Testing 136 | ======= 137 | 138 | The testing will be conducted by the community once a draft is available. 139 | OpenStack-Ansible users will be asked to utilise the new operations guide 140 | to perform the OpenStack operations and evaluate if the information provided 141 | is accurate, clear, and concise. 142 | 143 | Documentation impact 144 | ==================== 145 | 146 | This is a documentation change, N/A. 147 | 148 | References 149 | ========== 150 | 151 | * ToC planning 152 | 153 | * https://docs.google.com/document/d/1xeJ_lep7P2e7HLbRFG57Dx4W9s8brkuNIqJmOvheWKI/edit?usp=sharing 154 | 155 | * https://review.openstack.org/#/c/409854/ 156 | -------------------------------------------------------------------------------- /specs/pike/ovs-nsh.rst: -------------------------------------------------------------------------------- 1 | Openvswitch with NSH support in Neutron 2 | ####################################### 3 | :date: 2017-06-21 15:00 4 | :tags: Openvswitch,neutron,SFC,NSH 5 | 6 | Blueprint on Launchpad 7 | 8 | * https://blueprints.launchpad.net/openstack-ansible/+spec/openvswitch-with-nsh-support 9 | 10 | 11 | This spec introduces the work required to have Open vSwitch with NSH protocol 12 | support which is used in Service Function Chaining. 13 | 14 | Problem description 15 | =================== 16 | 17 | According to * https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/ 18 | Network Service Header (NSH) is inserted to a packet or a frame to realize 19 | service functions paths. Also provides a mechanism for metadata exchange 20 | along the instantiated service path. The NSH protocol is used as an SFC 21 | encapsulation which is required for the support of the Service Function 22 | Chaining (SFC) Architecture as it defined in RFC7665. 23 | 24 | The Openvswitch currently doesn't support the NSH protocol. So the only way 25 | to add NSH support to Open vSwitch is through Yi Yang's patches 26 | (https://github.com/yyang13/ovs_nsh_patches). 27 | 28 | Proposed change 29 | =============== 30 | 31 | The proposed change is the use of the existing Neutron Ansible Role 32 | for the installation of Open vSwitch with NSH support when the user selects 33 | that functionality through specific configuration in Openstack-Ansible 34 | project. We intent to configure only Neutron component and not use the 35 | aforementioned functionality for end to end testing. 36 | 37 | The installation of Open vSwitch with NSH support will be addressed 38 | by the use of specific packages which are going to be maintained in 39 | private repositories unti the NSH functionality will be included in a 40 | subsequent release of Open vSwitch project. 41 | 42 | 43 | Alternatives 44 | ============ 45 | 46 | An alternative to create a SFC without NSH is the port chaining technique. 47 | The aforementioned technique uses Neutron ports to steer the traffic to a 48 | service chain and has no notion of the actual services which are attached 49 | to those Neutron ports. 50 | 51 | Playbook/Role impact 52 | -------------------- 53 | 54 | The os_neutron role will be modified to optionally install Open vSwitch with 55 | NSH support. The proposal is to add an extra variable so the user can decide 56 | whether or not he needs to add NSH support with the Open vSwitch installation. When 57 | the ``neutron_plugin_type`` variable is set to ``ml2.ovs`` or ``ml2.dragonflow`` 58 | and the ``ovs_nsh_support`` variable is set to ``true`` then the Open vSwitch will 59 | be installed with NSH support. So there will be an extra task in the ``neutron_pre_install.yml`` 60 | which will add the distribution specific repositories with the ovs_nsh packages. 61 | 62 | Upgrade impact 63 | -------------- 64 | 65 | This is the first implementation of Open vSwitch with NSH support in OpenStack-Ansible,so 66 | no upgrade concerns yet. 67 | 68 | Security impact 69 | --------------- 70 | 71 | No security impact 72 | 73 | Performance impact 74 | ------------------ 75 | 76 | The added NSH support to Open vSwitch will not have any performance impact to the current 77 | OpenStack-Ansible installation because the system will need to install only some extra packages. 78 | 79 | End user impact 80 | --------------- 81 | 82 | The end users will have the capability to create service function chains with the use 83 | of the NSH protocol. Also they can use OpenDaylight as networking backend which 84 | via the ``sfc`` component supports the creation of SFCs through the NSH protocol. 85 | 86 | Deployer impact 87 | --------------- 88 | 89 | The deployer needs to ensure that the specific repositories which hold the ovs_nsh packages 90 | are added to the system and the proper Open vSwitch packages are installed. 91 | 92 | Developer impact 93 | ---------------- 94 | 95 | The developer impact is really low because the NSH support for Open vSwitch is optional 96 | and can be ignored when extending or modifying Neutron role. 97 | 98 | Dependencies 99 | ------------ 100 | 101 | There are no dependencies 102 | 103 | Implementation 104 | ============== 105 | 106 | Assignee(s) 107 | ----------- 108 | 109 | Primary assignee: 110 | Dimitrios Markou (mardim) 111 | 112 | Work items 113 | ---------- 114 | 115 | 1. Add specific PPA for ovs_nsh packages 116 | 2. Install Open vSwitch with NSH protocol suppport 117 | 3. Document the new functionality 118 | 119 | Testing 120 | ======= 121 | 122 | Existing tests should be run because the only thing that change is that the 123 | installation of Open vSwitch is managed by specific repositories when NSH support is selected. 124 | 125 | Documentation impact 126 | ==================== 127 | 128 | The new functionality *Open vSwitch with NSH support* should be documented, explaining 129 | the required configuration parameters which are necessary for this deployment. 130 | 131 | References 132 | ========== 133 | 134 | Open vSwitch scenario with OpenStack-Ansible 135 | 136 | * https://docs.openstack.org/openstack-ansible-os_neutron/latest/app-openvswitch.html 137 | 138 | NSH ietf draft 139 | 140 | * https://datatracker.ietf.org/doc/draft-ietf-sfc-nsh/ 141 | 142 | SFC RFC 7665 143 | 144 | * https://tools.ietf.org/html/rfc7665 145 | 146 | PPA for Openvswitch-NSH packages 147 | 148 | * https://launchpad.net/~mardim/+archive/ubuntu/mardim-ppa 149 | 150 | Openvswitch-NSH packages for Centos 151 | 152 | * https://copr.fedorainfracloud.org/coprs/mardim/openvswitch-nsh/ 153 | -------------------------------------------------------------------------------- /specs/pike/replace-ip-generation.rst: -------------------------------------------------------------------------------- 1 | Replace IP Generation Code 2 | ########################## 3 | :date: 2017-1-11 22:00 4 | :tags: inventory, ip, networking 5 | 6 | 7 | The current inventory code uses a simple set to manage assigned IPs 8 | (``USED_IPS``) and complex queues to pull from the available subnets. 9 | 10 | This code can be simplified and made more modular. 11 | 12 | Launchpad blueprint: 13 | * https://blueprints.launchpad.net/openstack-ansible/+spec/replace-ip-generation 14 | 15 | The current IP generation code is tightly couple to the configuration loading, 16 | writing, and inventory manipulation code. To help provide better, more focused 17 | test coverage, this code can be updated and replaced. 18 | 19 | 20 | 21 | Problem description 22 | =================== 23 | 24 | The current IP generation code is difficult to maintain, despite mostly being 25 | `moved `_ into a separate ``ip.py`` 26 | module. The code uses the external ``Queue`` class, which is slightly more 27 | complex than necessary. The ``USED_IPS`` set and the pools of available IPs 28 | are not managed together, and could easily become out-of-sync. 29 | 30 | New code has been written to add an `IPManager class`_, but it is not 31 | currently integrated into any other code. Such integration is a somewhat 32 | large task, and would be error-prone to do in a single review. This spec 33 | is intended to serve as a road map to guide small, focused changes towards 34 | using it. 35 | 36 | Note that while the IPManager includes an API for external IPAM systems, this 37 | spec is only focused on using this class within the code, not on any sort of 38 | plugin system. 39 | 40 | Proposed change 41 | =============== 42 | 43 | An initial draft of new IP management code has been written in the `IPManager 44 | class`_. 45 | 46 | After that, the existing ``get_ip_address``, and ``set_used_ips`` were 47 | refactored to still use the existing data structures, but in a way that 48 | would allow usage of the new IPManager class. See `review 403915`_. 49 | 50 | Some refactors may be necessary for the IPManager class to facilitate this 51 | and further codify assumptions. 52 | 53 | Alternatives 54 | ------------ 55 | 56 | The code be left as is, with the assumption that it will be replaced wholesale 57 | by some other system in the near future. That replacement might happen via 58 | plugins or a new inventory codebase. This has not been deeply explored in the 59 | context of the IP management/generation. 60 | 61 | One such replacement system, for example, could be using LXD to entirely 62 | manage container creation, which is where IP generation is primarily used. 63 | 64 | Playbook/Role impact 65 | -------------------- 66 | 67 | No noticeable impact on the playbooks and roles should be seen; this is 68 | largely facilitating code maintenance and should produce the same output. 69 | 70 | Upgrade impact 71 | -------------- 72 | 73 | There should be no upgrade impact - the IPManager class should be loaded with 74 | the already-generated IP addresses in upgraded installations. 75 | 76 | Security impact 77 | --------------- 78 | 79 | This change should not affect any sensitive data. It is unrelated to secret 80 | storage. 81 | 82 | 83 | Performance impact 84 | ------------------ 85 | 86 | Generating IPs may be slightly faster, since this approach doesn't rely on 87 | delayed access from ``Queue`` objects. However, the overall runtime of the 88 | inventory is negligible in the overall speed of the system and hasn't been 89 | profiled. 90 | 91 | End user impact 92 | --------------- 93 | 94 | This change would be invisible to users of the deployed cloud. 95 | 96 | Deployer impact 97 | --------------- 98 | 99 | No configuration or output changes should be introduced. The current 100 | configurations should be used as-is. 101 | 102 | Developer impact 103 | ---------------- 104 | 105 | This should improve quality of life for developers debugging the IP generation 106 | behavior. 107 | 108 | Dependencies 109 | ------------ 110 | 111 | This has no direct dependencies on other blueprints or specs. 112 | 113 | Implementation 114 | ============== 115 | 116 | Assignee(s) 117 | ----------- 118 | 119 | Primary assignee: 120 | nolan-brubaker, IRC: palendae 121 | 122 | Other contributors: 123 | steve-lewis, IRC: stevelle 124 | 125 | Please add **IRC nicknames** where applicable. 126 | 127 | Work items 128 | ---------- 129 | 130 | * Refactor current IP loading/management functions to be amenable to replacing 131 | the data structures. 132 | 133 | * Replace the data structures and update the objects being passed between 134 | functions. 135 | 136 | Testing 137 | ======= 138 | 139 | Unit and integration tests should be added for all code changes to confirm 140 | there are no regressions. 141 | 142 | Documentation impact 143 | ==================== 144 | 145 | Developer documentation should be updated to reflect the new mechanism used, 146 | preferably included with implementation patches. 147 | 148 | References 149 | ========== 150 | 151 | N/A 152 | 153 | .. _`IPManager class`: https://review.openstack.org/#/c/397299/ 154 | .. _`review 403915`: https://review.openstack.org/#/c/403915/ 155 | -------------------------------------------------------------------------------- /specs/queens/ansible-roles-reuse.rst: -------------------------------------------------------------------------------- 1 | Generalize Infrastructure Roles 2 | ############################### 3 | :date: 2017-09-10 14:00 4 | :tags: ansible, roles, mariadb, rabbitmq 5 | 6 | Provide a synopsis as to why you are creating this spec/blueprint. 7 | 8 | Include the URL of your launchpad blueprint: 9 | * https://blueprints.launchpad.net/openstack-ansible/+spec/ansible-roles-reuse 10 | 11 | Currently openstack-ansible is maintaining infrastructure roles that are used 12 | to deploy general infrastructure services such as MariaDB and RabbitMQ, which 13 | are applicable in non-OpenStack ansible environments also. With little to no 14 | refactoring these roles can be used to deploy the services in other Ansible 15 | managed environments also. 16 | 17 | By maintaining robust, generalized service roles, they are more likely to be 18 | consumed, improved, and maintained by other operators in the greater Ansible 19 | community. This will benefit us by training us to keep a modular mindset when 20 | building the roles, which leads to better maintainability and wider testing 21 | for OSA consumers also. 22 | 23 | In some cases we may wish to deprecate our openstack-ansible roles and consume 24 | more generalized upstream alternatives. 25 | 26 | 27 | Problem description 28 | =================== 29 | 30 | In some of the roles (such as haproxy), we implement a very OSA specific 31 | deployment with very little reusability or configurability for a typical 32 | HAProxy deployer. 33 | 34 | Other roles, such as Galera server, are fairly generalized and robust, but 35 | carry the openstack-ansible-service_name naming scheme, making it less likely 36 | for anyone NOT using openstack-ansible to use the role in their deployments. 37 | 38 | pip_install is an example of a role that will require some minor refactoring to 39 | generalize it. The role performs some very out of scope tasks, such as repo 40 | management, which have nothing to do with installing pip. These features should 41 | be moved to appropriately modularized roles (a general repo management role?), 42 | so that pip_install is only doing the work it is meant to do. 43 | 44 | 45 | Proposed change 46 | =============== 47 | 48 | Examine the following roles to identify and refactor out of scope tasks and 49 | orchestrate openstack-ansible specific configurations at the integrated repo 50 | level. If the role is built properly it should offer the necessary service 51 | configuration to be injected from the inventory and playbooks. 52 | 53 | Roles to examine initially: 54 | * openstack-ansible-pip_install 55 | * openstack-ansible-lxc_hosts 56 | * openstack-ansible-lxc_container_create 57 | * openstack-ansible-haproxy_server 58 | * openstack-ansible-memcached_server 59 | * openstack-ansible-galera_server 60 | * openstack-ansible-rabbitmq_server 61 | * openstack-ansible-ceph_client 62 | 63 | Once the work outlined above has progressed sufficiently, we should consider 64 | renaming some of the roles to a more appropriate naming, ie. 65 | openstack-ansible-galera_server becomes ansible-mariadb-cluster, etc. 66 | 67 | 68 | Alternatives 69 | ------------ 70 | 71 | N/A 72 | 73 | Playbook/Role impact 74 | -------------------- 75 | 76 | The playbooks and especially inventory should eventually contain all of our 77 | openstack-ansible specific configurations. The infrastructure roles themselves 78 | should be generalized without an assumption or skew toward being consumed only 79 | by openstack-ansible. 80 | 81 | In some cases this is already implemented, but in other cases the role will 82 | undergo significant changes or wholesale replacement to accomplish this. 83 | 84 | 85 | Upgrade impact 86 | -------------- 87 | 88 | Consumers of the roles will need to adjust to any major refactorings that take 89 | place, including possible renaming of the git sources and role names. 90 | 91 | 92 | Security impact 93 | --------------- 94 | 95 | N/A 96 | 97 | 98 | Performance impact 99 | ------------------ 100 | 101 | N/A 102 | 103 | 104 | End user impact 105 | --------------- 106 | 107 | N/A 108 | 109 | 110 | Deployer impact 111 | --------------- 112 | 113 | Deployers who work frequently with openstack-ansible will benefit from the 114 | ability to use the same roles to deploy applicable services for other projects 115 | they work on besides OSA. 116 | 117 | 118 | Developer impact 119 | ---------------- 120 | 121 | It is possible that this could draw more developers to assist in maintaining 122 | some of the roles. Cosmetic changes such as renaming may also help veteran 123 | OSA developers take a more abstract approach when crafting changes to these 124 | roles, which should make them more maintainable in the long run. 125 | 126 | 127 | Dependencies 128 | ------------ 129 | 130 | N/A 131 | 132 | 133 | Implementation 134 | ============== 135 | 136 | Assignee(s) 137 | ----------- 138 | 139 | Primary assignee: 140 | Logan Vig (LP: loganv; IRC: logan-) 141 | 142 | 143 | Work items 144 | ---------- 145 | 146 | * Examine the infrastructure roles for out of scope tasks or reusability 147 | concerns. Address the issues by refactoring or replacing the role. 148 | * Improve the role documentation if necessary with example playbooks 149 | demonstrating ad-hoc usage of the role. 150 | * Rename the role and repo to a globally namespaced ansible role such as 151 | ansible-service-name. 152 | 153 | 154 | Testing 155 | ======= 156 | 157 | N/A 158 | 159 | 160 | Documentation impact 161 | ==================== 162 | 163 | Improving and expanding the role documentation will be beneficial for 164 | reusability also. 165 | 166 | 167 | References 168 | ========== 169 | 170 | * openstack-ansible 5/18 community meeting: http://eavesdrop.openstack.org/meetings/openstack_ansible_meeting/2017/openstack_ansible_meeting.2017-05-18-16.01.log.html#l-136 171 | -------------------------------------------------------------------------------- /specs/queens/blazar.rst: -------------------------------------------------------------------------------- 1 | Integration of Blazar with OpenStack-Ansible 2 | ############################################## 3 | :date: 2017-12-17 00:02 4 | :tags: openstack, blazar, opnfv, promise 5 | 6 | Blazar is a resource reservation service for OpenStack. It is used to book 7 | or reserve specific resources for a particular amount of time. This spec outlines 8 | the steps required to integrate Blazar with OpenStack-Ansible. 9 | 10 | Problem description 11 | =================== 12 | 13 | Blazar is used to reserve OpenStack resources in advance for a specific amount of 14 | time. However, it needs to be installed manually with OpenStack-Ansible. No role 15 | exists to deploy it as other services are deployed. 16 | 17 | Proposed change 18 | =============== 19 | 20 | The change consists of creating a new role for Blazar integration with OpenStack-Ansible. 21 | It will make it possible to deploy Blazar as part of the installation of OpenStack-Ansible, 22 | rather then requiring to install and configure it manually. 23 | 24 | Alternatives 25 | ------------ 26 | 27 | There are no alternatives. 28 | 29 | Playbook/Role impact 30 | -------------------- 31 | 32 | This is a new feature added into OpenStack-Ansible. No role currently exists. Therefore, 33 | a new role, `openstack-ansible-os_blazar` needs to be written from scratch. 34 | 35 | Upgrade impact 36 | -------------- 37 | 38 | No upgrade impact. 39 | 40 | 41 | Security impact 42 | --------------- 43 | 44 | No security impact. 45 | 46 | Performance impact 47 | ------------------ 48 | 49 | No performance impact. 50 | 51 | End user impact 52 | --------------- 53 | 54 | End user will be able to use Blazar out of the box, without going through any 55 | manual installation and configuration. One of the endusers is Promise, an OPNFV 56 | project, which is using Blazar, in an NFV context. 57 | 58 | Deployer impact 59 | --------------- 60 | 61 | No impact. 62 | 63 | Developer impact 64 | ---------------- 65 | 66 | Little or no impact, since this feature will be optional and can be safely ignored. 67 | 68 | Dependencies 69 | ------------ 70 | 71 | No dependencies. 72 | 73 | Implementation 74 | ============== 75 | 76 | Assignee(s) 77 | ----------- 78 | 79 | Primary assignee: 80 | Taseer Ahmed (Taseer) 81 | 82 | Other contributors: 83 | Fatih Degirmenci (fdegir) 84 | 85 | Work items 86 | ---------- 87 | 88 | Blazar is not available as a service for OpenStack-Ansible. No role already exists. 89 | A new role will be developed from scratch in compliance with the standards set by the 90 | community. The steps for developing this new role are as follows: 91 | 92 | 1. Create a new repository on GitHub. 93 | 2. Add tasks to the role. 94 | 3. Add tests for the new role. 95 | 4. Ensure that the role works well with AIO. 96 | 97 | Testing 98 | ======= 99 | 100 | Tests will be developed to ensure that deployment of Blazar works and also to test the 101 | functionality of the deployed service. 102 | 103 | Documentation impact 104 | ==================== 105 | 106 | As this would be new feature added to OpenStack-Ansible, it needs to be 107 | documented, explaining all the configuration parameters. 108 | 109 | References 110 | ========== 111 | 112 | Blazar Overview 113 | 114 | * https://wiki.openstack.org/wiki/Blazar 115 | 116 | Blazar Installation steps 117 | 118 | * https://docs.openstack.org/blazar/latest/install/install-without-devstack.html 119 | 120 | OPNFV Promise 121 | 122 | * https://wiki.opnfv.org/display/promise/Promise -------------------------------------------------------------------------------- /specs/queens/congress.rst: -------------------------------------------------------------------------------- 1 | Integration of Congress with OpenStack Ansible 2 | ############################################## 3 | :date: 2017-08-30 00:02 4 | :tags: openstack, congress 5 | 6 | Blueprint on Launchpad: 7 | * https://blueprints.launchpad.net/openstack-ansible/+spec/role-congress 8 | 9 | Congress is the policy framework for OpenStack. This spec introduces the work required 10 | to deploy Congress, as a service for OpenStack Ansible. 11 | 12 | Problem description 13 | =================== 14 | 15 | There are many policy frameworks for OpenStack. However, very few of them 16 | come with OpenStack Ansible. They need to be manually configured and installed. 17 | The aim of this spec is to deploy Congress with OpenStack Ansible, provided as a 18 | service to OpenStack Ansible and OpenStack users in general. 19 | 20 | Proposed change 21 | =============== 22 | 23 | The change consists of integrating Congress with OpenStack Ansible during deployment 24 | phase of OpenStack. 25 | 26 | Alternatives 27 | ------------ 28 | 29 | Many policy frameworks for OpenStack exist. Tacker is one of them and has already been 30 | integrated with OpenStack Ansible. However, Tacker is more of a VNF Manager, mostly used 31 | for NFV related activites such as Service Function Chaining etc. 32 | 33 | Playbook/Role impact 34 | -------------------- 35 | 36 | This is a new feature being introduced.An existing role does not already exist. 37 | A new role will be developed, e.g `openstack-ansible-os_congress`. This new role 38 | will be developed as per the steps outlined by the community. 39 | 40 | Upgrade impact 41 | -------------- 42 | 43 | No upgrade impact since this would be the first time implementation of the proposed 44 | change. 45 | 46 | Security impact 47 | --------------- 48 | 49 | No security impact. 50 | 51 | Performance impact 52 | ------------------ 53 | 54 | Performance impact should be very low, it only needs a few preliminary packages. 55 | 56 | End user impact 57 | --------------- 58 | 59 | Congress uses a simple declarative language to define real world policies. Currently 60 | it needs to be manually configured and deployed. This feature would enable the users to 61 | use Congress as a service, and be able to manage OpenStack more efficiently. 62 | 63 | Deployer impact 64 | --------------- 65 | 66 | No default policies will be enforced. If the deployer chooses to enable Congress service, 67 | policies need to be defined as per the requirements. 68 | 69 | Developer impact 70 | ---------------- 71 | 72 | Little or no impact, since this feature will be optional and can be safely ignored. 73 | 74 | Dependencies 75 | ------------ 76 | 77 | No dependencies. 78 | 79 | Implementation 80 | ============== 81 | 82 | Assignee(s) 83 | ----------- 84 | 85 | Primary assignee: 86 | Taseer Ahmed (Taseer) 87 | 88 | Other contributors: 89 | Fatih Degirmenci (fdegir) 90 | 91 | Work items 92 | ---------- 93 | 94 | Congress is not available as a service for OpenStack Ansible. No role already exists. 95 | A new role will be developed from scratch in compliance with the standards set by the 96 | community. The steps for developing this new role are as follows: 97 | 98 | 1. Create a new repository on GitHub. 99 | 2. Add tasks to the role. 100 | 3. Add tests for the new role. 101 | 4. Ensure that the role works well with AIO. 102 | 103 | Testing 104 | ======= 105 | 106 | Tests will be developed to ensure that deployment of Congress works and also test the 107 | functionality of the deployed service. 108 | 109 | Documentation impact 110 | ==================== 111 | 112 | As this would be new feature added to OpenStack Ansible, it needs to be 113 | documented, explaining all the configuration parameters. 114 | 115 | References 116 | ========== 117 | 118 | Congress Overview 119 | 120 | * https://wiki.openstack.org/wiki/Congress 121 | 122 | Congress Installation steps 123 | 124 | * https://docs.openstack.org/congress/latest/install/index.html#separate-install -------------------------------------------------------------------------------- /specs/queens/hybrid-messaging.rst: -------------------------------------------------------------------------------- 1 | Provide option of hybrid messaging backends 2 | ########################################### 3 | :date: 2017-09-31 10:00 4 | :tags: messaging, rabbitmq, qpid 5 | 6 | OpenStack services make use of a message bus system for both remote procedure 7 | calls (RPC) between components and to emit notifications. The aim of this spec 8 | is to layout a plan for providing an alternative to RabbitMQ for RPC messaging. 9 | 10 | https://blueprints.launchpad.net/openstack-ansible/+spec/hybrid-messaging 11 | 12 | Problem description 13 | =================== 14 | 15 | RabbitMQ is currently used as the message bus system for all remote procedure 16 | calls (RPC) and notifications of OpenStack services deployed by 17 | OpenStack-Ansible. While RabbitMQ is well tested and has wide acceptance across 18 | OpenStack projects and deployments, it may not be the most efficient option for 19 | RPC messaging. A brokerless message queue may provide greater performance of 20 | messaging throughput and be less of a bottleneck, particularly in larger scale 21 | deployments. 22 | 23 | Proposed change 24 | =============== 25 | 26 | This spec proposes offering Qpid Dispatch Router as an alternative option 27 | for RPC messaging within an OpenStack-Ansible deployment. 28 | 29 | Deployers will be able be given more options for messaging backends: 30 | 31 | * RabbitMQ for both RPC and notifications (will remain the default deployment) 32 | * Qpid Dispatch Router for RPC (with no dedicated backend for notifications) 33 | * Qpid Dispatch Router for RPC and RabbitMQ for notifications (hybrid 34 | messaging) 35 | 36 | Alternatives 37 | ------------ 38 | 39 | Leave RabbitMQ as the sole option for messaging within OpenStack-Ansible 40 | deployments. 41 | 42 | Playbook/Role impact 43 | -------------------- 44 | 45 | Playbooks that deploy OpenStack services will need to be modified to make 46 | any required against the deployer's messaging backend of choice. Roles will 47 | need to include additional package dependencies to connect to the Qpid 48 | Dispatch Router. 49 | 50 | Upgrade impact 51 | -------------- 52 | 53 | An upgrade scenario will test the migration of a deployment from using 54 | RabbitMQ. 55 | 56 | Security impact 57 | --------------- 58 | 59 | The default deployment of Qpid Dispatch Router should provide as close as 60 | possible parity with OpenStack-Ansible's default RabbitMQ deployment including 61 | use of TLS/SSL encryption and virtualhost namespacing of messaging data. 62 | 63 | Performance impact 64 | ------------------ 65 | 66 | Especially in larger scale deployments, there is a potential improvement in 67 | the throughput of messages and lowered CPU utilization. 68 | 69 | End user impact 70 | --------------- 71 | 72 | When chosen to be implemented by a deployer, the changes involved should be 73 | transparent to end users. 74 | 75 | Deployer impact 76 | --------------- 77 | 78 | There would be no immediate impact to deployers as the changes involved would 79 | be entirely opt-in initially. For deployers choosing to deploy Qpid Dispatch 80 | Router, the service will be installed, likely in a new container, and OpenStack 81 | services will be configured to make use of it. 82 | 83 | Developer impact 84 | ---------------- 85 | 86 | New roles for OpenStack projects should include configuration options to allow 87 | for using either RabbitMQ or Qpid Dispatch Router and testing of each. 88 | 89 | Dependencies 90 | ------------ 91 | 92 | N/A 93 | 94 | Implementation 95 | ============== 96 | 97 | Assignee(s) 98 | ----------- 99 | 100 | Primary assignee: 101 | jimmy-mccrory (jmccrory) 102 | 103 | Work items 104 | ---------- 105 | 106 | * Create a new role for the installation of Qpid Dispatch Router 107 | * Create a playbook to deploy Qpid Dispatch Router 108 | * Modify OpenStack service configuration templates within each role to allow 109 | a transport URL other than RabbitMQ and default variables to support that 110 | * Add required client package dependencies to roles 111 | * Create test scenarios in the roles to deploy using Qpid Dispatch Router as 112 | the messaging backend for RPC 113 | * Create a common playbook for any Qpid Dispatch Router configuration changes 114 | required by individual OpenStack projects that the OpenStack project 115 | playbooks will consume 116 | * Create test scenarios in the integrated gate for greenfield and upgrade 117 | deployments 118 | 119 | Testing 120 | ======= 121 | 122 | A Qpid Dispatch Router scenario would be created within the roles of OpenStack 123 | projects which make use of a message queue and the integrated OpenStack-Ansible 124 | repo to ensure installations and deployments, including upgrades, remain 125 | functional. 126 | 127 | Documentation impact 128 | ==================== 129 | 130 | Documentation will need to be added for the configuration options of Qpid 131 | services, the configuration options for OpenStack services to make use of Qpid 132 | services, and any associated maintenance tasks within the Operations Guide. 133 | 134 | References 135 | ========== 136 | 137 | AMQP 1.O (Qpid Dispatch Router) Oslo Messaging Driver Reference: 138 | 139 | * https://docs.openstack.org/oslo.messaging/latest/admin/AMQP1.0.html 140 | 141 | Message Routing- A Next-Generation Alternative to RabbitMQ: 142 | 143 | * https://www.youtube.com/watch?v=R0fwHr8XC1I 144 | 145 | Hybrid Messaging Solutions for Large Scale OpenStack Deployments: 146 | 147 | * https://www.youtube.com/watch?v=o30YaqfLV9A 148 | -------------------------------------------------------------------------------- /specs/queens/hyperconverged-containers.rst: -------------------------------------------------------------------------------- 1 | Hyper-Converge Containers 2 | ######################### 3 | :date: 2017-09-01 22:00 4 | :tags: containers, hyperconverged, performance 5 | 6 | Reduce container counts across the infra structure hosts. 7 | 8 | To lower our deployment times and resource consumption across the board. This 9 | spec looks to remove single purpose containers that have little to no benefit 10 | on the architecture at scale. 11 | 12 | This change groups services resulting in fewer containers. This does not 13 | mix service categories so there's no worry of cross polluting a different 14 | service with unknown packages or unknown workloads. We're only look to minimize 15 | the container types we have and simplify operations. By converging containers 16 | we're removing no less than 10 steps in the container deployment process and the 17 | service setup. Operationally we're reducing the load on operations teams 18 | managing clouds at any scale. 19 | 20 | 21 | Problem description 22 | =================== 23 | 24 | When we started this project we started with the best of intentions to create a 25 | pseudo micro-service model for our system layout and container orchestration. 26 | While this works today, it does create a lot of unnecessary containers in terms 27 | of resource utilization. 28 | 29 | 30 | Proposed change 31 | =============== 32 | 33 | Converge groups of containers found within the `env.d` directory into a single 34 | container where at all possible. Most the changes we need to get this work done 35 | have already been committed. In some instances we will need to "revert a change" 36 | to get the core functionality of this spec into master but there will be little 37 | to no development required to get the initial convergence work completed. 38 | 39 | Once the convergence work is complete we intend to develop a set of playbooks 40 | which will allow the deployer to run an "opt-in" set of tasks which will cleanup 41 | containers and services wherever necessary. Services behind a load balanacer 42 | will need to be updated. Updates to the load balancer will be covered by the 43 | "opt-in" playbooks provided the environment is using our supported software 44 | LB (HAProxy). The "opt-in" playbooks will need to be codified, tested, and 45 | documented. Should it be decided that the hyperconverged work is to be 46 | cherry-picked to a stable branch, the new playbooks will need to first exist 47 | and be tested within our periodic gates. We should expect no playbook impact 48 | in-terms of the general deployer workflow. 49 | 50 | 51 | Alternatives 52 | ------------ 53 | 54 | We could leave everything as-is which carries the resource requirements we 55 | currently have along with an understanding that the resources required will 56 | grow given the fact OpenStack services, both existing and net new, are ever 57 | expanding. 58 | 59 | 60 | Playbook/Role impact 61 | -------------------- 62 | 63 | At least one new playbook will be added allowing a deployer to cleanup old 64 | container types from the run-time and inventory should they decide to. The 65 | cleanup playbook(s) will be "opt-in" and will not be part of our normal 66 | automated deployment process. 67 | 68 | 69 | Upgrade impact 70 | -------------- 71 | 72 | There is no upgrade impact with this change as any existing deployment would 73 | already have the all required associations within inventory. Services would 74 | continue to function normally after this change. Greenfield deployments on the 75 | other hand would have fewer containers to manage which reduces the resource 76 | requirements while also ensuring we retain the host, network, and process 77 | separation we have today. 78 | 79 | We will create a set of playbooks to cleanup some of the redundant containers 80 | that would exist post upgrade however the execution of this playbook would be 81 | opt-in. 82 | 83 | 84 | Security impact 85 | --------------- 86 | 87 | Security is not a concern within this spec however reducing the container 88 | count would reduce the potential attack surface we already have. 89 | 90 | 91 | Performance impact 92 | ------------------ 93 | 94 | Hyperconverging containers will reduce resource consumption on physical host. 95 | Reducing the resources required to run an OpenStack cloud will improve the 96 | performance of the playbooks and the system as a whole. 97 | 98 | 99 | End user impact 100 | --------------- 101 | 102 | N/A 103 | 104 | 105 | Deployer impact 106 | --------------- 107 | 108 | Deployers will have fewer containers to manage and be concerned with as they 109 | run clouds for long periods of time. 110 | 111 | * Within an upgrade scenario a deployer will have the option to "opt-in" to a 112 | hyperconverged setup. This change will have no service impact on running 113 | deployments by default. 114 | 115 | 116 | Developer impact 117 | ---------------- 118 | 119 | N/A 120 | 121 | 122 | Dependencies 123 | ------------ 124 | 125 | * If we're to test the "opt-in" cleanup playbooks we'll need a periodic upgrade 126 | gate job. The playbooks would be executed by the upgrade gate job and post 127 | results to the ML/channel so that the OSA development team is notified of the 128 | failure. 129 | 130 | 131 | Implementation 132 | ============== 133 | 134 | Assignee(s) 135 | ----------- 136 | 137 | Primary assignee: 138 | Kevin Carter (IRC: cloudnull) 139 | Major Hayden (IRC: mhayden) 140 | 141 | 142 | Work items 143 | ---------- 144 | 145 | * Converge the containers into fewer groups 146 | * Create the "opt-in" container reduction playbooks 147 | * Document the new playbooks 148 | 149 | 150 | Testing 151 | ======= 152 | 153 | * The core functionality of this patch will be tested on every commit. 154 | * If the upgrade test dependencies are met we can create a code path within the 155 | periodic gates and test the "opt-in" cleanup playbooks. 156 | 157 | 158 | Documentation impact 159 | ==================== 160 | 161 | Documentation will be created for the "opt-in" container cleanup playbooks 162 | created. 163 | 164 | 165 | References 166 | ========== 167 | 168 | N/A 169 | -------------------------------------------------------------------------------- /specs/rocky/masakari.rst: -------------------------------------------------------------------------------- 1 | Integration of Masakari with OpenStack-Ansible 2 | ############################################## 3 | :date: 2018-03-22 14:00 4 | :tags: openstack, masakari, masakari-monitors 5 | 6 | Blueprint on Launchpad: 7 | * https://blueprints.launchpad.net/openstack-ansible/+spec/masakari-ansible-plugin 8 | 9 | Masakari provides Virtual Machine High Availability (VMHA) service for OpenStack clouds 10 | by automatically recovering the KVM-based Virtual Machine(VM)s from failure events such 11 | as VM process down, provisioning process down, and nova-compute host failure. It also 12 | provides API service for managing and controlling the automated rescue mechanism. 13 | The Masakari service consists of the following components: 14 | 15 | * masakari-api: 16 | An OpenStack-native REST API that processes API requests by sending 17 | them to the ``masakari-engine`` over `Remote Procedure Call (RPC)`. 18 | 19 | * masakari-engine: 20 | Processes the notifications received from ``masakari-api`` by execcuting the 21 | recovery workflow in asynchronus way. 22 | 23 | * masakari-monitors: 24 | Monitors for Masakari provides Virtual Machine High Availability (VMHA) service for OpenStack 25 | clouds by automatically detecting the failure events such as VM process down, provisioning 26 | process down, and nova-compute host failure. If it detects the events, it sends notifications 27 | to the masakari-api. 28 | 29 | This spec outlines the steps required to integrate Masakari with OpenStack-Ansible. 30 | 31 | Problem description 32 | =================== 33 | 34 | Masakari provides Instances High Availability Service for OpenStack clouds 35 | by automatically recovering failed Instances. However, it needs to be installed 36 | manually with OpenStack-Ansible. No role exists to deploy it as other services are deployed. 37 | 38 | Proposed change 39 | =============== 40 | 41 | The proposed changes would include: 42 | 43 | * Import a proof of concept role for Masakari from 44 | https://github.com/NirajSingh90/openstack-ansible-os_masakari to 45 | ``openstack-ansible-os_masakari`` 46 | * Follow the usual path described in the developer documentation. 47 | 48 | Alternatives 49 | ------------ 50 | 51 | There are no alternatives. 52 | 53 | Playbook/Role impact 54 | -------------------- 55 | 56 | This is a new feature added into OpenStack-Ansible. No role currently exists. Therefore, 57 | new role, `openstack-ansible-os_masakari` needs to be written from scratch. 58 | 59 | Upgrade impact 60 | -------------- 61 | 62 | No upgrade impact since this would be the first implementation of the proposed change. 63 | 64 | Security impact 65 | --------------- 66 | 67 | No security impact. 68 | 69 | Performance impact 70 | ------------------ 71 | 72 | No performance impact. 73 | 74 | End user impact 75 | --------------- 76 | 77 | End user will be able to use masakari as a service within OpenStack-Ansible. 78 | 79 | Deployer impact 80 | --------------- 81 | 82 | Deployers will need to enable Masakari deployments if they choose to use this. 83 | Masakari will not be deployed by default. 84 | 85 | Developer impact 86 | ---------------- 87 | 88 | No impact. 89 | 90 | Dependencies 91 | ------------ 92 | 93 | By employing a combination of Corosync and Pacemaker, OpenStack Masakari creates a 94 | cluster of servers, detecting and reporting failure of hosts in the cluster. 95 | So masakari is dependent on Corosync and Pacemaker. 96 | 97 | We will reuse an external role for corosync and pacemaker to not re-invent the wheel, 98 | like the one found in https://github.com/leucos/ansible-pacemaker-corosync . 99 | 100 | Implementation 101 | ============== 102 | 103 | Assignee(s) 104 | ----------- 105 | 106 | Primary assignee: 107 | Niraj Singh (IRC: niraj_singh) 108 | 109 | Work items 110 | ---------- 111 | 112 | Masakari is not available as a service for OpenStack-Ansible. No role already exists. 113 | A new role will be developed from scratch in compliance with the standards set by the 114 | community. It will be added under https://github.com/openstack/openstack-ansible-os_masakari 115 | 116 | Note: Masakari role will install below services: 117 | masakari-api 118 | masakari-engine 119 | masakari-processmonitor 120 | masakari-hostmonitor 121 | masakari-instancemonitor 122 | 123 | masakari-processmonitor, masakari-hostmonitor and masakari-instancemonitor will be 124 | installed only on nova-compute nodes 125 | 126 | Testing 127 | ======= 128 | 129 | Tests will be developed to ensure that deployment of Masakari works. Masakari 130 | doesn't have tempest tests therefore we will start by testing the API responses 131 | codes. Masakari-monitor and Masakari-engine services tests will be added in 132 | future using third party CI tests. 133 | 134 | Documentation impact 135 | ==================== 136 | 137 | As this would be new feature added to OpenStack-Ansible, it needs to be 138 | documented, explaining all the configuration parameters. 139 | 140 | References 141 | ========== 142 | 143 | Masakari Overview 144 | 145 | * https://wiki.openstack.org/wiki/Masakari 146 | 147 | Masakari developer/operator documentation 148 | 149 | * https://docs.openstack.org/masakari/latest 150 | -------------------------------------------------------------------------------- /specs/rocky/openstack-distribution-packages.rst: -------------------------------------------------------------------------------- 1 | Install OpenStack services from distribution packages 2 | ####################################################### 3 | :date: 2018-03-27 00:00 4 | :tags: roles, deployment 5 | 6 | Blueprint on Launchpad 7 | 8 | * https://blueprints.launchpad.net/openstack-ansible/+spec/openstack-distribution-packages 9 | 10 | This spec outlines the work required to enable the OpenStack-Ansible roles to 11 | install the OpenStack services using the distribution packages from the distribution 12 | Cloud repositories. 13 | 14 | Problem description 15 | =================== 16 | 17 | OpenStack-Ansible installs the OpenStack services from the source. Whilst this 18 | is great in terms of flexibility, it creates some problems such as: 19 | 20 | * Long deployment times since wheel packages need to be built and distributed. 21 | * Unsupported installations by distributions. The versions of OpenStack services 22 | built from source do not necessarily match what distributions test together as 23 | part of their integration and verification process so it's hard for them to 24 | provide support for such installations. As a result of which, operators have 25 | limited options when seeking technical support for their deployments. 26 | 27 | Proposed change 28 | =============== 29 | 30 | Add an additional installation method to all the OpenStack-Ansible roles in 31 | which the services will be installed using the packages provided by the 32 | distributions themselves. The default installation method will not change. 33 | 34 | Alternatives 35 | ------------ 36 | 37 | N/A 38 | 39 | Playbook/Role impact 40 | -------------------- 41 | 42 | All the OpenStack Ansible roles which install OpenStack services (os_*) will be 43 | impacted by the proposed change. A new variable will be made available on per-role 44 | basis to allow deployers to select the preferred installation method. 45 | 46 | Switching from one installation method to the other will not be supported. 47 | This can be clarified on the Deployer's documentation and also explicitly detected 48 | and prevented in the Ansible playbooks possibly by storing a local fact on the host to 49 | denote the installation method and checking it during upgrades. 50 | 51 | Upgrade impact 52 | -------------- 53 | 54 | Upgrades should not be impacted since the default installation method will not 55 | change. 56 | 57 | Security impact 58 | --------------- 59 | 60 | The security of the overall installation will not change since distributions 61 | normally backport security fixes which are already present in the upstream packages 62 | so both installations methods will offer the same level of security reassurances. 63 | 64 | Performance impact 65 | ------------------ 66 | 67 | The overall performance of the deployment will likely be improved since the 68 | distribution packages normally have their default settings tweaked and 69 | optimized to match each distribution's environment and needs. 70 | 71 | End user impact 72 | --------------- 73 | 74 | N/A 75 | 76 | Deployer impact 77 | --------------- 78 | 79 | The benefit of this new method for deployer's is twofold: 80 | 81 | - Use supported packages by distributions and provide feedback back to them. 82 | This benefits both distributions and operators since both ends use packages 83 | which have passed integration and functional testing before being released. 84 | - Shorten deployment times since distribution packages are used instead of building 85 | new ones from source. 86 | 87 | Developer impact 88 | ---------------- 89 | 90 | N/A 91 | 92 | Dependencies 93 | ------------ 94 | 95 | N/A 96 | 97 | Implementation 98 | ============== 99 | 100 | Assignee(s) 101 | ----------- 102 | 103 | Primary assignee: 104 | Markos Chandras (hwoarang) 105 | 106 | Work items 107 | ---------- 108 | 109 | The following work items are the same across all impacted roles 110 | 111 | * Move existing installation tasks to a new file (``${role}_install_source.yml``) 112 | * Create new file (``${role}_install_distro.yml``) with a set of tasks for distribution 113 | installations if necessary. 114 | * Add new variable to allow deployers to select installation method (``${role}_install_method``) 115 | * Dynamically include the appropriate installation file based on the variable's value 116 | 117 | Testing 118 | ======= 119 | 120 | Since the default installation method does not change, no new tests are required. 121 | However, developers may choose to add new jobs on per distribution basis to test 122 | the new installation method. 123 | 124 | Documentation impact 125 | ==================== 126 | 127 | Documentation needs to be modified to explain how to use the ``distribution`` 128 | installation method. 129 | 130 | References 131 | ========== 132 | 133 | N/A 134 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 3.18.0 3 | skipsdist = True 4 | envlist = docs 5 | ignore_basepython_conflict = True 6 | 7 | [testenv] 8 | basepython = python3 9 | usedevelop = True 10 | deps = 11 | -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} 12 | -r{toxinidir}/requirements.txt 13 | commands = 14 | /usr/bin/find . -type f -name "*.pyc" -delete 15 | passenv = 16 | HOME 17 | USER 18 | http_proxy 19 | HTTP_PROXY 20 | https_proxy 21 | HTTPS_PROXY 22 | no_proxy 23 | NO_PROXY 24 | allowlist_externals = 25 | bash 26 | setenv = 27 | PYTHONUNBUFFERED=1 28 | VIRTUAL_ENV={envdir} 29 | 30 | [testenv:docs] 31 | deps = -r{toxinidir}/doc/requirements.txt 32 | commands = 33 | bash -c "rm -rf doc/build" 34 | doc8 doc 35 | sphinx-build -W -b html doc/source doc/build/html 36 | 37 | [testenv:pdf-docs] 38 | deps = {[testenv:docs]deps} 39 | allowlist_externals = 40 | make 41 | commands = 42 | sphinx-build -W -b latex doc/source doc/build/pdf 43 | make -C doc/build/pdf 44 | 45 | [doc8] 46 | # Settings for doc8: 47 | extensions = .rst 48 | 49 | # environment used by the -infra templated docs job 50 | [testenv:venv] 51 | commands = 52 | {posargs} 53 | --------------------------------------------------------------------------------