├── .gitignore
├── .gitreview
├── .yamllint
├── LICENSE
├── README.rst
├── a-universe-from-nothing.sh
├── configure-local-networking.sh
├── etc
└── kayobe
│ ├── ansible
│ ├── filter_plugins
│ ├── group_vars
│ ├── lab-hosts.yml
│ ├── pull-retag-push.yml
│ └── test_plugins
│ ├── apt.yml
│ ├── bifrost.yml
│ ├── bmc.yml
│ ├── compute.yml
│ ├── container-engine.yml
│ ├── controllers.yml
│ ├── dell-switch-bmp.yml
│ ├── dnf.yml
│ ├── dns.yml
│ ├── docker-registry.yml
│ ├── globals.yml
│ ├── grafana.yml
│ ├── hooks
│ ├── .gitkeep
│ └── overcloud-inventory-discover
│ │ └── post.d
│ │ └── 01-lab-hosts.yml
│ ├── idrac.yml
│ ├── infra-vms.yml
│ ├── inspector.yml
│ ├── inventory
│ ├── group_vars
│ │ ├── compute
│ │ │ └── network-interfaces
│ │ ├── controllers
│ │ │ └── network-interfaces
│ │ ├── infra-vms
│ │ │ ├── ansible-python-interpreter
│ │ │ └── network-interfaces
│ │ ├── overcloud
│ │ │ └── ansible-python-interpreter
│ │ ├── seed-hypervisor
│ │ │ ├── ansible-python-interpreter
│ │ │ └── network-interfaces
│ │ ├── seed
│ │ │ ├── ansible-python-interpreter
│ │ │ └── network-interfaces
│ │ └── storage
│ │ │ └── network-interfaces
│ ├── groups
│ └── hosts
│ ├── ipa.yml
│ ├── ironic.yml
│ ├── kolla.yml
│ ├── kolla
│ ├── config
│ │ ├── bifrost
│ │ │ └── bifrost.yml
│ │ ├── ironic-inspector.conf
│ │ ├── neutron.conf
│ │ └── neutron
│ │ │ └── ml2_conf.ini
│ ├── globals.yml
│ └── kolla-build.conf
│ ├── libvirt.yml
│ ├── logging.yml
│ ├── monitoring.yml
│ ├── network-allocation.yml
│ ├── networks.yml
│ ├── neutron.yml
│ ├── nova.yml
│ ├── opensm.yml
│ ├── openstack.yml
│ ├── overcloud-dib.yml
│ ├── overcloud.yml
│ ├── pip.yml
│ ├── proxy.yml
│ ├── seed-hypervisor.yml
│ ├── seed-vm.yml
│ ├── seed.yml
│ ├── ssh.yml
│ ├── storage.yml
│ ├── swift.yml
│ ├── time.yml
│ ├── users.yml
│ └── vgpu.yml
├── init-runonce.sh
├── kayobe-env
├── pull-retag-push-images.sh
├── setup.cfg
├── setup.py
├── tenks-compute.yml
├── tenks-storage.yml
├── tenks.yml
├── test-requirements.txt
├── tox.ini
└── zuul.d
└── project.yaml
/.gitignore:
--------------------------------------------------------------------------------
1 | *.py[cod]
2 |
3 | # C extensions
4 | *.so
5 |
6 | # Packages
7 | *.egg*
8 | *.egg-info
9 | dist
10 | build
11 | eggs
12 | parts
13 | bin
14 | var
15 | sdist
16 | develop-eggs
17 | .installed.cfg
18 | lib
19 | lib64
20 |
21 | # Installer logs
22 | pip-log.txt
23 |
24 | # Unit test / coverage reports
25 | .tox
26 |
27 | # Editors
28 | *~
29 | .*.swp
30 | .*sw?
31 |
32 | # Files generated by Ansible
33 | ansible/*.retry
34 |
35 | # Others
36 | .DS_Store
37 | .vimrc
38 |
39 | # Ignore auto-generated group variables file.
40 | etc/kayobe/inventory/group_vars/seed/ansible-host
41 |
42 | # Ignore kolla configuration.
43 | etc/kolla
44 |
--------------------------------------------------------------------------------
/.gitreview:
--------------------------------------------------------------------------------
1 | [gerrit]
2 | host=review.opendev.org
3 | port=29418
4 | project=openstack/kayobe-config-dev.git
5 |
--------------------------------------------------------------------------------
/.yamllint:
--------------------------------------------------------------------------------
1 | extends: default
2 |
3 | rules:
4 | braces:
5 | max-spaces-inside: 1
6 | level: error
7 | brackets:
8 | max-spaces-inside: 1
9 | level: error
10 | comments:
11 | require-starting-space: false
12 | truthy: disable
13 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 |
2 | Apache License
3 | Version 2.0, January 2004
4 | http://www.apache.org/licenses/
5 |
6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
7 |
8 | 1. Definitions.
9 |
10 | "License" shall mean the terms and conditions for use, reproduction,
11 | and distribution as defined by Sections 1 through 9 of this document.
12 |
13 | "Licensor" shall mean the copyright owner or entity authorized by
14 | the copyright owner that is granting the License.
15 |
16 | "Legal Entity" shall mean the union of the acting entity and all
17 | other entities that control, are controlled by, or are under common
18 | control with that entity. For the purposes of this definition,
19 | "control" means (i) the power, direct or indirect, to cause the
20 | direction or management of such entity, whether by contract or
21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the
22 | outstanding shares, or (iii) beneficial ownership of such entity.
23 |
24 | "You" (or "Your") shall mean an individual or Legal Entity
25 | exercising permissions granted by this License.
26 |
27 | "Source" form shall mean the preferred form for making modifications,
28 | including but not limited to software source code, documentation
29 | source, and configuration files.
30 |
31 | "Object" form shall mean any form resulting from mechanical
32 | transformation or translation of a Source form, including but
33 | not limited to compiled object code, generated documentation,
34 | and conversions to other media types.
35 |
36 | "Work" shall mean the work of authorship, whether in Source or
37 | Object form, made available under the License, as indicated by a
38 | copyright notice that is included in or attached to the work
39 | (an example is provided in the Appendix below).
40 |
41 | "Derivative Works" shall mean any work, whether in Source or Object
42 | form, that is based on (or derived from) the Work and for which the
43 | editorial revisions, annotations, elaborations, or other modifications
44 | represent, as a whole, an original work of authorship. For the purposes
45 | of this License, Derivative Works shall not include works that remain
46 | separable from, or merely link (or bind by name) to the interfaces of,
47 | the Work and Derivative Works thereof.
48 |
49 | "Contribution" shall mean any work of authorship, including
50 | the original version of the Work and any modifications or additions
51 | to that Work or Derivative Works thereof, that is intentionally
52 | submitted to Licensor for inclusion in the Work by the copyright owner
53 | or by an individual or Legal Entity authorized to submit on behalf of
54 | the copyright owner. For the purposes of this definition, "submitted"
55 | means any form of electronic, verbal, or written communication sent
56 | to the Licensor or its representatives, including but not limited to
57 | communication on electronic mailing lists, source code control systems,
58 | and issue tracking systems that are managed by, or on behalf of, the
59 | Licensor for the purpose of discussing and improving the Work, but
60 | excluding communication that is conspicuously marked or otherwise
61 | designated in writing by the copyright owner as "Not a Contribution."
62 |
63 | "Contributor" shall mean Licensor and any individual or Legal Entity
64 | on behalf of whom a Contribution has been received by Licensor and
65 | subsequently incorporated within the Work.
66 |
67 | 2. Grant of Copyright License. Subject to the terms and conditions of
68 | this License, each Contributor hereby grants to You a perpetual,
69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
70 | copyright license to reproduce, prepare Derivative Works of,
71 | publicly display, publicly perform, sublicense, and distribute the
72 | Work and such Derivative Works in Source or Object form.
73 |
74 | 3. Grant of Patent License. Subject to the terms and conditions of
75 | this License, each Contributor hereby grants to You a perpetual,
76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable
77 | (except as stated in this section) patent license to make, have made,
78 | use, offer to sell, sell, import, and otherwise transfer the Work,
79 | where such license applies only to those patent claims licensable
80 | by such Contributor that are necessarily infringed by their
81 | Contribution(s) alone or by combination of their Contribution(s)
82 | with the Work to which such Contribution(s) was submitted. If You
83 | institute patent litigation against any entity (including a
84 | cross-claim or counterclaim in a lawsuit) alleging that the Work
85 | or a Contribution incorporated within the Work constitutes direct
86 | or contributory patent infringement, then any patent licenses
87 | granted to You under this License for that Work shall terminate
88 | as of the date such litigation is filed.
89 |
90 | 4. Redistribution. You may reproduce and distribute copies of the
91 | Work or Derivative Works thereof in any medium, with or without
92 | modifications, and in Source or Object form, provided that You
93 | meet the following conditions:
94 |
95 | (a) You must give any other recipients of the Work or
96 | Derivative Works a copy of this License; and
97 |
98 | (b) You must cause any modified files to carry prominent notices
99 | stating that You changed the files; and
100 |
101 | (c) You must retain, in the Source form of any Derivative Works
102 | that You distribute, all copyright, patent, trademark, and
103 | attribution notices from the Source form of the Work,
104 | excluding those notices that do not pertain to any part of
105 | the Derivative Works; and
106 |
107 | (d) If the Work includes a "NOTICE" text file as part of its
108 | distribution, then any Derivative Works that You distribute must
109 | include a readable copy of the attribution notices contained
110 | within such NOTICE file, excluding those notices that do not
111 | pertain to any part of the Derivative Works, in at least one
112 | of the following places: within a NOTICE text file distributed
113 | as part of the Derivative Works; within the Source form or
114 | documentation, if provided along with the Derivative Works; or,
115 | within a display generated by the Derivative Works, if and
116 | wherever such third-party notices normally appear. The contents
117 | of the NOTICE file are for informational purposes only and
118 | do not modify the License. You may add Your own attribution
119 | notices within Derivative Works that You distribute, alongside
120 | or as an addendum to the NOTICE text from the Work, provided
121 | that such additional attribution notices cannot be construed
122 | as modifying the License.
123 |
124 | You may add Your own copyright statement to Your modifications and
125 | may provide additional or different license terms and conditions
126 | for use, reproduction, or distribution of Your modifications, or
127 | for any such Derivative Works as a whole, provided Your use,
128 | reproduction, and distribution of the Work otherwise complies with
129 | the conditions stated in this License.
130 |
131 | 5. Submission of Contributions. Unless You explicitly state otherwise,
132 | any Contribution intentionally submitted for inclusion in the Work
133 | by You to the Licensor shall be under the terms and conditions of
134 | this License, without any additional terms or conditions.
135 | Notwithstanding the above, nothing herein shall supersede or modify
136 | the terms of any separate license agreement you may have executed
137 | with Licensor regarding such Contributions.
138 |
139 | 6. Trademarks. This License does not grant permission to use the trade
140 | names, trademarks, service marks, or product names of the Licensor,
141 | except as required for reasonable and customary use in describing the
142 | origin of the Work and reproducing the content of the NOTICE file.
143 |
144 | 7. Disclaimer of Warranty. Unless required by applicable law or
145 | agreed to in writing, Licensor provides the Work (and each
146 | Contributor provides its Contributions) on an "AS IS" BASIS,
147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
148 | implied, including, without limitation, any warranties or conditions
149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
150 | PARTICULAR PURPOSE. You are solely responsible for determining the
151 | appropriateness of using or redistributing the Work and assume any
152 | risks associated with Your exercise of permissions under this License.
153 |
154 | 8. Limitation of Liability. In no event and under no legal theory,
155 | whether in tort (including negligence), contract, or otherwise,
156 | unless required by applicable law (such as deliberate and grossly
157 | negligent acts) or agreed to in writing, shall any Contributor be
158 | liable to You for damages, including any direct, indirect, special,
159 | incidental, or consequential damages of any character arising as a
160 | result of this License or out of the use or inability to use the
161 | Work (including but not limited to damages for loss of goodwill,
162 | work stoppage, computer failure or malfunction, or any and all
163 | other commercial damages or losses), even if such Contributor
164 | has been advised of the possibility of such damages.
165 |
166 | 9. Accepting Warranty or Additional Liability. While redistributing
167 | the Work or Derivative Works thereof, You may choose to offer,
168 | and charge a fee for, acceptance of support, warranty, indemnity,
169 | or other liability obligations and/or rights consistent with this
170 | License. However, in accepting such obligations, You may act only
171 | on Your own behalf and on Your sole responsibility, not on behalf
172 | of any other Contributor, and only if You agree to indemnify,
173 | defend, and hold each Contributor harmless for any liability
174 | incurred by, or claims asserted against, such Contributor by reason
175 | of your accepting any such warranty or additional liability.
176 |
177 |
--------------------------------------------------------------------------------
/README.rst:
--------------------------------------------------------------------------------
1 | ======================================================================================================================
2 | Kayobe Configuration for "A Universe from Nothing: Containerised OpenStack deployment using Kolla, Ansible and Kayobe"
3 | ======================================================================================================================
4 |
5 | This repository may be used as a workshop to configure, deploy and
6 | get hands-on with OpenStack Kayobe.
7 |
8 | It provides a configuration and walkthrough for the `Kayobe
9 | `__ project based on the
10 | configuration provided by the `kayobe-config
11 | `__ repository.
12 | It deploys a containerised OpenStack environment using Kolla, Ansible and
13 | Kayobe.
14 |
15 | Select the Git branch of this repository for the OpenStack release you
16 | are interested in, and follow the README.
17 |
18 | Requirements
19 | ============
20 |
21 | For this workshop, we require the use of a single server, configured as a
22 | *seed hypervisor*. This server should be a bare metal node or VM running
23 | Ubuntu Jammy or Rocky 9, with the following minimum requirements:
24 |
25 | * 64GB RAM (more is recommended when growing the lab deployment)
26 | * 100GB disk
27 |
28 | We will also need SSH access to the seed hypervisor, and passwordless sudo
29 | configured for the login user.
30 |
31 | Exercise
32 | ========
33 |
34 | On the seed hypervisor, we will deploy three VMs:
35 |
36 | * 1 seed
37 | * 1 controller
38 | * 1 compute node
39 |
40 | The seed runs a standalone Ironic service. The controller and compute node
41 | are 'virtual bare metal' hosts, and we will use the seed to provision them
42 | with an OS. Next we'll deploy OpenStack services on the controller and
43 | compute node.
44 |
45 | At the end you'll have a miniature OpenStack cluster that you can use to test
46 | out booting an instance using Nova, access the Horizon dashboard, etc.
47 |
48 | Usage
49 | =====
50 |
51 | There are four parts to this guide:
52 |
53 | * `Preparation`_
54 | * `Deploying a Seed`_
55 | * `A Universe from a Seed`_
56 | * `Next Steps`_
57 |
58 | *Preparation* has instructions to prepare the seed hypervisor for the
59 | exercise, and fetching the necessary source code.
60 |
61 | *Deploying a Seed* includes all instructions necessary to download and install
62 | the Kayobe prerequisites on a plain Rocky 9 or Ubuntu Jammy cloud image,
63 | including provisioning and configuration of a seed VM. Optionally, snapshot the
64 | instance after this step to reduce setup time in the future.
65 |
66 | *A Universe from a Seed* contains all instructions necessary to deploy from
67 | a host running a seed VM. An image suitable for this can be created
68 | via `Optional: Creating a Seed Snapshot`_.
69 |
70 | Once the control plane has been deployed see `Next Steps`_ for
71 | some ideas for what to try next.
72 |
73 | Preparation
74 | -----------
75 |
76 | This shows how to prepare the seed hypervisor for the exercise. It assumes you
77 | have created a seed hypervisor instance fitting the requirements above and have
78 | already logged in (e.g. ``ssh rocky@``, or ``ssh ubuntu@``).
79 |
80 | .. code-block:: console
81 |
82 | # Install git and tmux.
83 | if $(which dnf 2>/dev/null >/dev/null); then
84 | sudo dnf -y install git tmux
85 | else
86 | sudo apt update
87 | sudo apt -y install git tmux
88 | fi
89 |
90 | # Disable the firewall.
91 | sudo systemctl is-enabled firewalld && sudo systemctl stop firewalld && sudo systemctl disable firewalld
92 |
93 | # Put SELinux in permissive mode both immediately and permanently.
94 | if $(which setenforce 2>/dev/null >/dev/null); then
95 | sudo setenforce 0
96 | sudo sed -i 's/^SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
97 | fi
98 |
99 | # Prevent sudo from making DNS queries.
100 | echo 'Defaults !fqdn' | sudo tee /etc/sudoers.d/no-fqdn
101 |
102 | # Optional: start a new tmux session in case we lose our connection.
103 | tmux
104 |
105 | # Start at home.
106 | cd
107 |
108 | # Clone Beokay.
109 | git clone https://github.com/stackhpc/beokay.git
110 |
111 | # Use Beokay to bootstrap your control host.
112 | [[ -d deployment ]] || beokay/beokay.py create --base-path ~/deployment --kayobe-repo https://opendev.org/openstack/kayobe.git --kayobe-branch stable/2025.1 --kayobe-config-repo https://github.com/stackhpc/a-universe-from-nothing.git --kayobe-config-branch stable/2025.1
113 |
114 | # Clone the Tenks repository.
115 | cd ~/deployment/src
116 | [[ -d tenks ]] || git clone https://opendev.org/openstack/tenks.git
117 | cd
118 |
119 | # Configure host networking (bridge, routes & firewall)
120 | ~/deployment/src/kayobe-config/configure-local-networking.sh
121 |
122 | Deploying a Seed
123 | ----------------
124 |
125 | This shows how to create an image suitable for deploying Kayobe. It assumes you
126 | have created a seed hypervisor instance fitting the requirements above and have
127 | already logged in (e.g. ``ssh rocky@``, or ``ssh ubuntu@``), and
128 | performed the necessary `Preparation`_.
129 |
130 | .. code-block:: console
131 |
132 | # If you have not done so already, activate the Kayobe environment, to allow
133 | # running commands directly.
134 | source ~/deployment/env-vars.sh
135 |
136 | # Configure the seed hypervisor host.
137 | kayobe seed hypervisor host configure
138 |
139 | # Provision the seed VM.
140 | kayobe seed vm provision
141 |
142 | # Configure the seed host, and deploy a local registry.
143 | kayobe seed host configure
144 |
145 | # Pull, retag images, then push to our local registry.
146 | ~/deployment/src/kayobe-config/pull-retag-push-images.sh
147 |
148 | # Deploy the seed services.
149 | kayobe seed service deploy
150 |
151 | # Deploying the seed restarts networking interface,
152 | # run configure-local-networking.sh again to re-add routes.
153 | ~/deployment/src/kayobe-config/configure-local-networking.sh
154 |
155 | # Optional: Shutdown the seed VM if creating a seed snapshot.
156 | sudo virsh shutdown seed
157 |
158 | If required, add any additional SSH public keys to ~/.ssh/authorized_keys
159 |
160 | Optional: Creating a Seed Snapshot
161 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
162 |
163 | If necessary, take a snapshot of the hypervisor instance at this point to speed up this
164 | process in the future.
165 |
166 | You are now ready to deploy a control plane using this host or snapshot.
167 |
168 | A Universe from a Seed
169 | -----------------------------
170 |
171 | This shows how to deploy a control plane from a VM image that contains a
172 | pre-deployed seed VM, or a host that has run through the steps in
173 | `Deploying a Seed`.
174 |
175 | Having a snapshot image saves us some time if we need to repeat the deployment.
176 | If working from a snapshot, create a new instance with the same dimensions as
177 | the Seed image and log into it.
178 | Otherwise, continue working with the instance from `Deploying a Seed`_.
179 |
180 | .. code-block:: console
181 |
182 | # Optional: start a new tmux session in case we lose our connection.
183 | tmux
184 |
185 | # Configure non-persistent networking, if the node has rebooted.
186 | ~/deployment/src/kayobe-config/configure-local-networking.sh
187 |
188 | Make sure that the seed VM (running Bifrost and supporting services)
189 | is present and running.
190 |
191 | .. code-block:: console
192 |
193 | # Check if the seed VM is present and running.
194 | sudo virsh list --all
195 |
196 | # Start up the seed VM if it is shut off.
197 | sudo virsh start seed
198 |
199 | We use the `TENKS project `_ to model
200 | some 'bare metal' VMs for the controller and compute node. Here we set up
201 | our model development environment, alongside the seed VM.
202 |
203 | .. code-block:: console
204 |
205 | # Set Environment variables for Kayobe dev scripts
206 | export KAYOBE_CONFIG_SOURCE_PATH=~/deployment/src/kayobe-config
207 | export KAYOBE_VENV_PATH=~/deployment/venvs/kayobe
208 | export TENKS_CONFIG_PATH=~/deployment/src/kayobe-config/tenks.yml
209 |
210 | # Use tenks to deploy the overcloud machines
211 | ~/deployment/src/kayobe/dev/tenks-deploy-overcloud.sh ~/deployment/src/tenks
212 |
213 | # Activate the Kayobe environment, to allow running commands directly.
214 | source ~/deployment/env-vars.sh
215 |
216 | # Inspect and provision the overcloud hardware:
217 | kayobe overcloud inventory discover
218 | kayobe overcloud hardware inspect
219 | kayobe overcloud introspection data save
220 | kayobe overcloud provision
221 |
222 | Configure and deploy OpenStack to the control plane
223 | (following `Kayobe host configuration documentation `_):
224 |
225 | .. code-block:: console
226 |
227 | kayobe overcloud host configure
228 | kayobe overcloud container image pull
229 | kayobe overcloud service deploy
230 | source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
231 | kayobe overcloud post configure
232 |
233 | At this point it should be possible to access the Horizon GUI via the
234 | server's public IP address, using port 80 (achieved through port
235 | forwarding to the controller VM). Use the admin credentials from
236 | ``OS_USERNAME`` and ``OS_PASSWORD`` to get in.
237 |
238 | The following script will register some resources (keys, flavors,
239 | networks, images, etc) in OpenStack to enable booting up a tenant
240 | VM:
241 |
242 | .. code-block:: console
243 |
244 | source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
245 | ~/deployment/src/kayobe-config/init-runonce.sh
246 |
247 | Following the instructions displayed by the above script, boot a VM.
248 | You'll need to have activated the `~/deployment/venvs/os-venv` virtual environment.
249 |
250 | .. code-block:: console
251 |
252 | source ~/deployment/venvs/os-venv/bin/activate
253 | openstack server create --image cirros \
254 | --flavor m1.tiny \
255 | --key-name mykey \
256 | --network demo-net demo1
257 |
258 | # Assign a floating IP to the server to make it accessible.
259 | openstack floating ip create public1
260 | fip=$(openstack floating ip list -f value -c 'Floating IP Address' --status DOWN | head -n 1)
261 | openstack server add floating ip demo1 $fip
262 |
263 | # Check SSH access to the VM.
264 | ssh cirros@$fip
265 |
266 | # If the ssh command above fails you may need to reconfigure the local
267 | networking setup again:
268 | ~/deployment/src/kayobe-config/configure-local-networking.sh
269 |
270 | *Note*: when accessing the VNC console of an instance via Horizon,
271 | you will be sent to the internal IP address of the controller,
272 | ``192.168.33.2``, which will fail. Open the console-only display link
273 | in new broser tab and replace this IP in the address bar with
274 | the public IP of the hypervisor host.
275 |
276 | That's it, you're done!
277 |
278 | Next Steps
279 | -----------------------------
280 |
281 | Here's some ideas for things to explore with the deployment:
282 |
283 | * **Access Control Plane Components**: take a deep dive into the internals
284 | by `Exploring the Deployment`_.
285 | * **Deploy OpenSearch and OpenSearch Dashboards**: see `Enabling Centralised Logging`_
286 | to get logs aggregated from across our OpenStack control plane.
287 |
288 | Exploring the Deployment
289 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
290 |
291 | Once each of the VMs becomes available, they should be accessible via SSH as
292 | the ``rocky``, ``ubuntu`` or ``stack`` user at the following IP addresses:
293 |
294 | =========== ================
295 | Host IP
296 | =========== ================
297 | seed ``192.168.33.5``
298 | controller0 ``192.168.33.3``
299 | compute0 ``192.168.33.6``
300 | =========== ================
301 |
302 | The control plane services are run in Docker containers, so try
303 | using the docker CLI to inspect the system.
304 |
305 | .. code-block:: console
306 |
307 | # List containers
308 | docker ps
309 | # List images
310 | docker images
311 | # List volumes
312 | docker volume ls
313 | # Inspect a container
314 | docker inspect
315 | # Execute a process in a container
316 | docker exec -it
317 |
318 | The kolla container configuration is generated under ``/etc/kolla`` on
319 | the seed and overcloud hosts - each container has its own directory
320 | that is bind mounted into the container.
321 |
322 | Log files are stored in the ``kolla_logs`` docker volume, which is
323 | mounted at ``/var/log/kolla`` in each container. They can be accessed
324 | on the host at ``/var/lib/docker/volumes/kolla_logs/_data/``.
325 |
326 | Exploring Tenks & the Seed
327 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
328 |
329 | Verify that Tenks has created ``controller0`` and ``compute0`` VMs:
330 |
331 | .. code-block:: console
332 |
333 | sudo virsh list --all
334 |
335 | Verify that `virtualbmc `_ is running:
336 |
337 | .. code-block:: console
338 |
339 | ~/tenks-venv/bin/vbmc list
340 | +-------------+---------+--------------+------+
341 | | Domain name | Status | Address | Port |
342 | +-------------+---------+--------------+------+
343 | | compute0 | running | 192.168.33.4 | 6231 |
344 | | controller0 | running | 192.168.33.4 | 6230 |
345 | +-------------+---------+--------------+------+
346 |
347 | VirtualBMC config is here (on the VM hypervisor host):
348 |
349 | .. code-block:: console
350 |
351 | /root/.vbmc/controller0/config
352 |
353 | Note that the controller and compute node are registered in Ironic, in the bifrost container.
354 | Once kayobe is deployed and configured the compute0 and controller0 will be controlled by
355 | bifrost and not virsh commands.
356 |
357 | .. code-block:: console
358 |
359 | ssh stack@192.168.33.5
360 | docker exec -it bifrost_deploy bash
361 | export OS_CLOUD=bifrost
362 | baremetal node list
363 | +--------------------------------------+-------------+---------------+-------------+--------------------+-------------+
364 | | UUID | Name | Instance UUID | Power State | Provisioning State | Maintenance |
365 | +--------------------------------------+-------------+---------------+-------------+--------------------+-------------+
366 | | d7184461-ac4b-4b9e-b9ed-329978fc0648 | compute0 | None | power on | active | False |
367 | | 1a40de56-be8a-49e2-a903-b408f432ef23 | controller0 | None | power on | active | False |
368 | +--------------------------------------+-------------+---------------+-------------+--------------------+-------------+
369 | exit
370 |
371 | Enabling Centralised Logging
372 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
373 |
374 | In Kolla-Ansible, centralised logging is easily enabled and results in the
375 | deployment of OpenSearch services and configuration to forward
376 | all OpenStack service logging. **Be cautious as OpenSearch will consume a
377 | significant portion of available resources on a standard deployment.**
378 |
379 | To enable the service, one flag must be changed in
380 | ``~/deployment/src/kayobe-config/etc/kayobe/kolla.yml``:
381 |
382 | .. code-block:: diff
383 |
384 | -#kolla_enable_central_logging:
385 | +kolla_enable_central_logging: yes
386 |
387 | This will deploy ``opensearch`` and ``opensearch_dashboards`` containers, and
388 | configure logging via ``fluentd`` so that logging from all deployed Docker
389 | containers will be routed to OpenSearch.
390 |
391 | Before this can be applied, it is necessary to download the missing images to
392 | the seed VM. Pull, retag and push the centralised logging images:
393 |
394 | .. code-block:: console
395 |
396 | ~/deployment/src/kayobe-config/pull-retag-push-images.sh ^opensearch
397 |
398 | To deploy the logging stack:
399 |
400 | .. code-block:: console
401 |
402 | kayobe overcloud container image pull
403 | kayobe overcloud service deploy
404 |
405 | As simple as that...
406 |
407 | The new containers can be seen running on the controller node:
408 |
409 | .. code-block:: console
410 |
411 | $ ssh stack@192.168.33.3 docker ps
412 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES
413 | fad79f29afbc 192.168.33.5:4000/openstack.kolla/opensearch-dashboards:2025.1-rocky-9 "dumb-init --single-…" 6 hours ago Up 6 hours (healthy) opensearch_dashboards
414 | 64df77adc709 192.168.33.5:4000/openstack.kolla/opensearch:2025.1-rocky-9 "dumb-init --single-…" 6 hours ago Up 6 hours (healthy) opensearch
415 |
416 | We can see the log indexes in OpenSearch:
417 |
418 | .. code-block:: console
419 |
420 | curl -X GET "192.168.33.3:9200/_cat/indices?v"
421 |
422 | To access OpenSearch Dashboards, we must first forward connections from our
423 | public interface to the OpenSearch Dashboards service running on our
424 | ``controller0`` VM.
425 |
426 | The easiest way to do this is to add OpenSearch Dashboards's default port (5601) to our
427 | ``configure-local-networking.sh`` script in ``~/deployment/src/kayobe-config/``:
428 |
429 | .. code-block:: diff
430 |
431 | --- a/configure-local-networking.sh
432 | +++ b/configure-local-networking.sh
433 | @@ -20,7 +20,7 @@ seed_hv_private_ip=$(ip a show dev $iface | grep 'inet ' | awk '{ print $2 }' |
434 | # Forward the following ports to the controller.
435 | # 80: Horizon
436 | # 6080: VNC console
437 | -forwarded_ports="80 6080"
438 | +forwarded_ports="80 6080 5601"
439 |
440 | Then rerun the script to apply the change:
441 |
442 | .. code-block:: console
443 |
444 | ~/deployment/src/kayobe-config/configure-local-networking.sh
445 |
446 | We can now connect to OpenSearch Dashboards using our hypervisor host public IP and port 5601.
447 |
448 | The username is ``opensearch`` and the password we can extract from the
449 | Kolla-Ansible passwords (in production these would be vault-encrypted
450 | but they are not here).
451 |
452 | .. code-block:: console
453 |
454 | grep opensearch_dashboards ~/deployment/src/kayobe-config/etc/kolla/passwords.yml
455 |
456 | Once you're in, OpenSearch Dashboards needs some further setup which is not automated.
457 | Set the log index to ``flog-*`` and you should be ready to go.
458 |
459 | Adding the Barbican service
460 | ^^^^^^^^^^^^^^^^^^^^^^^^^^^
461 |
462 | `Barbican `_ is the OpenStack
463 | secret management service. It is an example of a simple service we
464 | can use to illustrate the process of adding new services to our deployment.
465 |
466 | As with the Logging service above, enable Barbican by modifying the flag in
467 | ``~/deployment/src/kayobe-config/etc/kayobe/kolla.yml`` as follows:
468 |
469 | .. code-block:: diff
470 |
471 | -#kolla_enable_barbican:
472 | +kolla_enable_barbican: yes
473 |
474 | This instructs Kolla to install the barbican api, worker & keystone-listener
475 | containers. Pull down barbican images:
476 |
477 | .. code-block:: console
478 |
479 | ~/deployment/src/kayobe-config/pull-retag-push-images.sh barbican
480 |
481 | To deploy the Barbican service:
482 |
483 | .. code-block:: console
484 |
485 | # Activate the venv if not already active
486 | source ~/deployment/env-vars.sh
487 |
488 | kayobe overcloud container image pull
489 | kayobe overcloud service deploy
490 |
491 | Once Barbican has been deployed it can be tested using the barbicanclient
492 | plugin to the OpenStack CLI. This should be installed and tested in the
493 | OpenStack venv:
494 |
495 | .. code-block:: console
496 |
497 | # Deactivate existing venv context if necessary
498 | deactivate
499 |
500 | # Activate the OpenStack venv
501 | ~/deployment/venvs/os-venv/bin/activate
502 |
503 | # Install barbicanclient
504 | pip install python-barbicanclient -c https://releases.openstack.org/constraints/upper/2025.1
505 |
506 | # Source the OpenStack environment variables
507 | source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
508 |
509 | # Store a test secret
510 | openstack secret store --name mysecret --payload foo=bar
511 |
512 | # Copy the 'Secret href' URI for later use
513 | SECRET_URL=$(openstack secret list --name mysecret -f value --column 'Secret href')
514 |
515 | # Get secret metadata
516 | openstack secret get ${SECRET_URL}
517 |
518 | # Get secret payload
519 | openstack secret get ${SECRET_URL} --payload
520 |
521 | Congratulations, you have successfully installed Barbican on Kayobe.
522 |
523 |
524 | References
525 | ==========
526 |
527 | * Kayobe documentation: https://docs.openstack.org/kayobe/latest/
528 | * Source: https://github.com/stackhpc/a-universe-from-nothing
529 | * Bugs: https://github.com/stackhpc/a-universe-from-nothing/issues
530 | * IRC: #openstack-kolla
531 |
--------------------------------------------------------------------------------
/a-universe-from-nothing.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # Cheat script for a full deployment.
4 | # This should be used for testing only.
5 |
6 | set -eu
7 |
8 | # Install git and tmux.
9 | if $(which dnf 2>/dev/null >/dev/null); then
10 | sudo dnf -y install git tmux
11 | else
12 | sudo apt update
13 | sudo apt -y install git tmux
14 | fi
15 |
16 | # Install Python 3.12 on Rocky Linux 9
17 | if $(which dnf 2>/dev/null >/dev/null); then
18 | sudo dnf -y install python3.12
19 | fi
20 |
21 | # Disable the firewall.
22 | sudo systemctl is-enabled firewalld && sudo systemctl stop firewalld && sudo systemctl disable firewalld
23 |
24 | # Put SELinux in permissive mode both immediately and permanently.
25 | if $(which setenforce 2>/dev/null >/dev/null); then
26 | sudo setenforce 0
27 | sudo sed -i 's/^SELINUX=enforcing/SELINUX=permissive/' /etc/selinux/config
28 | fi
29 |
30 | # Prevent sudo from performing DNS queries.
31 | echo 'Defaults !fqdn' | sudo tee /etc/sudoers.d/no-fqdn
32 |
33 | # Start at home.
34 | cd
35 |
36 | # Clone Beokay.
37 | [[ -d beokay ]] || git clone https://github.com/stackhpc/beokay.git
38 |
39 | # Use Beokay to bootstrap your control host.
40 | if $(which dnf 2>/dev/null >/dev/null); then
41 | PYTHON_ARG=" --python /usr/bin/python3.12"
42 | else
43 | PYTHON_ARG=""
44 | fi
45 | [[ -d deployment ]] || beokay/beokay.py create --base-path ~/deployment --kayobe-repo https://opendev.org/openstack/kayobe.git --kayobe-branch stable/2025.1 --kayobe-config-repo https://github.com/stackhpc/a-universe-from-nothing.git --kayobe-config-branch stable/2025.1 $PYTHON_ARG
46 |
47 | # Clone the Tenks repository.
48 | cd ~/deployment/src
49 | [[ -d tenks ]] || git clone https://opendev.org/openstack/tenks.git
50 |
51 | # Configure host networking (bridge, routes & firewall)
52 | ./kayobe-config/configure-local-networking.sh
53 |
54 | # Use the kayobe virtual environment, and export kayobe environment variables
55 | source ~/deployment/env-vars.sh
56 |
57 | # Configure the seed hypervisor host.
58 | kayobe seed hypervisor host configure
59 |
60 | # Provision the seed VM.
61 | kayobe seed vm provision
62 |
63 | # Configure the seed host, and deploy a local registry.
64 | kayobe seed host configure
65 |
66 | # Pull, retag images, then push to our local registry.
67 | ~/deployment/src/kayobe-config/pull-retag-push-images.sh
68 |
69 | # Deploy the seed services.
70 | kayobe seed service deploy
71 |
72 | # Deploying the seed restarts networking interface,
73 | # run configure-local-networking.sh again to re-add routes.
74 | ~/deployment/src/kayobe-config/configure-local-networking.sh
75 |
76 | # Set Environment variables for Kayobe dev scripts
77 | export KAYOBE_CONFIG_SOURCE_PATH=~/deployment/src/kayobe-config
78 | export KAYOBE_VENV_PATH=~/deployment/venvs/kayobe
79 | export TENKS_CONFIG_PATH=~/deployment/src/kayobe-config/tenks.yml
80 |
81 | # Deploy overcloud using Tenks
82 | ~/deployment/src/kayobe/dev/tenks-deploy-overcloud.sh ~/deployment/src/tenks
83 |
84 | # Inspect and provision the overcloud hardware:
85 | kayobe overcloud inventory discover
86 | kayobe overcloud hardware inspect
87 | kayobe overcloud introspection data save
88 | kayobe overcloud provision
89 | kayobe overcloud host configure
90 | kayobe overcloud container image pull
91 | kayobe overcloud service deploy
92 | source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
93 | kayobe overcloud post configure
94 | source ~/deployment/src/kayobe-config/etc/kolla/public-openrc.sh
95 | ~/deployment/src/kayobe-config/init-runonce.sh
96 |
--------------------------------------------------------------------------------
/configure-local-networking.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 | set -o pipefail
5 |
6 | # This should be run on the seed hypervisor.
7 |
8 | # IP addresses on the all-in-one Kayobe cloud network.
9 | # These IP addresses map to those statically configured in
10 | # etc/kayobe/network-allocation.yml and etc/kayobe/networks.yml.
11 | controller_vip=192.168.33.2
12 | seed_hv_ip=192.168.33.4
13 | seed_vm_ip=192.168.33.5
14 |
15 | iface=$(ip route | awk '$1 == "default" {print $5; exit}')
16 |
17 | # Private IP address by which the seed hypervisor is accessible in the cloud
18 | # hosting the VM.
19 | seed_hv_private_ip=$(ip a show dev $iface | awk '$1 == "inet" { gsub(/\/[0-9]*/,"",$2); print $2; exit }')
20 |
21 | # Forward the following ports to the controller.
22 | # 80: Horizon
23 | # 6080: VNC console
24 | forwarded_ports="80 6080"
25 |
26 | # IP of the seed hypervisor on the OpenStack 'public' network created by init-runonce.sh.
27 | public_ip="10.0.2.1"
28 |
29 | # Install iptables.
30 | if $(which dnf >/dev/null 2>&1); then
31 | sudo dnf -y install iptables
32 | fi
33 |
34 | if $(which apt >/dev/null 2>&1); then
35 | sudo apt update
36 | sudo apt -y install iptables
37 | fi
38 |
39 | # Configure local networking.
40 | # Add a bridge 'braio' for the Kayobe all-in-one cloud network.
41 | if ! sudo ip l show braio >/dev/null 2>&1; then
42 | sudo ip l add braio type bridge
43 | sudo ip l set braio up
44 | sudo ip a add $seed_hv_ip/24 dev braio
45 | fi
46 | # On CentOS 8, bridges without a port are DOWN, which causes network
47 | # configuration to fail. Add a dummy interface and plug it into the bridge.
48 | if ! sudo ip l show dummy1 >/dev/null 2>&1; then
49 | sudo ip l add dummy1 type dummy
50 | sudo ip l set dummy1 up
51 | sudo ip l set dummy1 master braio
52 | fi
53 |
54 | # Configure IP routing and NAT to allow the seed VM and overcloud hosts to
55 | # route via this route to the outside world.
56 | sudo iptables -A POSTROUTING -t nat -o $iface -j MASQUERADE
57 | sudo sysctl -w net.ipv4.conf.all.forwarding=1
58 |
59 | # Configure port forwarding from the hypervisor to the Horizon GUI on the
60 | # controller.
61 | sudo iptables -A FORWARD -i $iface -o braio -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
62 | sudo iptables -A FORWARD -i braio -o $iface -m conntrack --ctstate ESTABLISHED,RELATED -j ACCEPT
63 | for port in $forwarded_ports; do
64 | # Allow new connections.
65 | sudo iptables -A FORWARD -i $iface -o braio -p tcp --syn --dport $port -m conntrack --ctstate NEW -j ACCEPT
66 | # Destination NAT.
67 | sudo iptables -t nat -A PREROUTING -i $iface -p tcp --dport $port -j DNAT --to-destination $controller_vip
68 | # Source NAT.
69 | sudo iptables -t nat -A POSTROUTING -o braio -p tcp --dport $port -d $controller_vip -j SNAT --to-source $seed_hv_private_ip
70 | done
71 |
72 | # Configure an IP on the 'public' network to allow access to/from the cloud.
73 | if ! sudo ip a show dev braio | grep $public_ip/24 >/dev/null 2>&1; then
74 | sudo ip a add $public_ip/24 dev braio
75 | fi
76 |
77 | echo
78 | echo "NOTE: The network configuration applied by this script is not"
79 | echo "persistent across reboots."
80 | echo "If you reboot the system, please re-run this script."
81 |
--------------------------------------------------------------------------------
/etc/kayobe/ansible/filter_plugins:
--------------------------------------------------------------------------------
1 | ../../../../../../ansible/filter_plugins
--------------------------------------------------------------------------------
/etc/kayobe/ansible/group_vars:
--------------------------------------------------------------------------------
1 | ../../../../../../ansible/group_vars
--------------------------------------------------------------------------------
/etc/kayobe/ansible/lab-hosts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | gather_facts: false
4 | tasks:
5 | - name: Update /etc/hosts for lab entities
6 | blockinfile:
7 | path: /etc/hosts
8 | block: |
9 | {% for item in groups['overcloud'] + groups['seed'] %}
10 | {{ aio_ips[item] }} {{ item }}
11 | {% endfor %}
12 | become: true
13 |
14 | - name: Update ssh config for lab entities
15 | blockinfile:
16 | path: "{{ lookup('env','HOME') }}/.ssh/config"
17 | create: true
18 | mode: 0600
19 | block: |
20 | {% for item in groups['overcloud'] + groups['seed'] %}
21 | Host {{ item }}
22 | User stack
23 | {% endfor %}
24 |
25 |
--------------------------------------------------------------------------------
/etc/kayobe/ansible/pull-retag-push.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Run the Kayobe kolla-build.yml playbook
3 | import_playbook: "{{ lookup('ansible.builtin.env', 'VIRTUAL_ENV') }}/share/kayobe/ansible/kolla-build.yml"
4 |
5 | - name: Pull, retag, and push images
6 | hosts: container-image-builders
7 | vars:
8 | # Set this variable to a space-separated list of regexes to override the
9 | # default set of images.
10 | container_image_regexes: ""
11 | container_image_sets: "{{ seed_container_image_sets + overcloud_container_image_sets }}"
12 | kolla_build_log_path: "/var/log/kolla-build.log"
13 | docker_pull_namespace: "openstack.kolla"
14 | docker_pull_registry: "quay.io"
15 | docker_pull_tag: "{{ kolla_tag }}"
16 | tasks:
17 | - name: Set the container image sets to build if images regexes specified
18 | set_fact:
19 | container_image_sets:
20 | - regexes: "{{ container_image_regexes }}"
21 | when: container_image_regexes != ''
22 |
23 | - name: Display the regexes for container images that will be built
24 | debug:
25 | msg: >
26 | Building container images matching '{{ item.regexes }}'. Build logs
27 | will be appended to {{ kolla_build_log_path }}.
28 | with_items: "{{ container_image_sets }}"
29 |
30 | - name: Ensure Kolla build log file exists
31 | file:
32 | path: "{{ kolla_build_log_path }}"
33 | state: touch
34 | owner: "{{ ansible_facts.user_uid }}"
35 | group: "{{ ansible_facts.user_gid }}"
36 | become: True
37 |
38 | - name: Login to docker registry
39 | docker_login:
40 | registry_url: "{{ kolla_docker_registry or omit }}"
41 | username: "{{ kolla_docker_registry_username }}"
42 | password: "{{ kolla_docker_registry_password }}"
43 | reauthorize: yes
44 | when:
45 | - kolla_docker_registry_username is not none
46 | - kolla_docker_registry_password is not none
47 |
48 | - name: List container images
49 | shell:
50 | cmd: >
51 | set -o pipefail &&
52 | source {{ kolla_venv }}/bin/activate &&
53 | kolla-build
54 | --config-dir {{ kolla_build_config_path }}
55 | {% if kolla_docker_registry is not none %}--registry {{ kolla_docker_registry }} {% endif %}
56 | --list-images
57 | {{ item.regexes }}
58 | executable: /bin/bash
59 | with_items: "{{ container_image_sets }}"
60 | when: item.regexes != ''
61 | register: list_result
62 | changed_when: false
63 |
64 | - name: Build a list of images
65 | vars:
66 | image: "{{ image_name }}"
67 | image_name: "{{ item.1.split()[2] }}"
68 | set_fact:
69 | images: "{{ (images | default([])) + [image] }}"
70 | with_subelements:
71 | - "{{ list_result.results }}"
72 | - stdout_lines
73 | - skip_missing: true
74 | when:
75 | - not item.1.endswith('base')
76 |
77 | - name: Display images which will be pulled, retagged and pushed
78 | debug:
79 | var: images
80 |
81 | - name: Pull container images (may take a long time)
82 | become: true
83 | vars:
84 | remote_image: "{% if docker_pull_registry != '' %}{{ docker_pull_registry }}/{% endif %}{{ docker_pull_namespace }}/{{ item }}:{{ docker_pull_tag }}"
85 | command:
86 | cmd: "docker pull {{ remote_image }}"
87 | with_items: "{{ images }}"
88 |
89 | - name: Retag container images
90 | become: true
91 | vars:
92 | remote_image: "{% if docker_pull_registry != '' %}{{ docker_pull_registry }}/{% endif %}{{ docker_pull_namespace }}/{{ item }}:{{ docker_pull_tag }}"
93 | local_image: "{{ kolla_docker_registry }}/{{ kolla_docker_namespace }}/{{ item }}:{{ kolla_tag }}"
94 | command:
95 | cmd: "docker tag {{ remote_image }} {{ local_image }}"
96 | with_items: "{{ images }}"
97 |
98 | - name: Push container images (may take a long time)
99 | become: true
100 | vars:
101 | local_image: "{{ kolla_docker_registry }}/{{ kolla_docker_namespace }}/{{ item }}:{{ kolla_tag }}"
102 | command:
103 | cmd: "docker push {{ local_image }}"
104 | with_items: "{{ images }}"
105 |
--------------------------------------------------------------------------------
/etc/kayobe/ansible/test_plugins:
--------------------------------------------------------------------------------
1 | ../../../../../../ansible/test_plugins/
--------------------------------------------------------------------------------
/etc/kayobe/apt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Apt package manager configuration.
4 |
5 | # Apt cache TTL in seconds. Default is 3600.
6 | #apt_cache_valid_time:
7 |
8 | # Apt proxy URL for HTTP. Default is empty (no proxy).
9 | #apt_proxy_http:
10 |
11 | # Apt proxy URL for HTTPS. Default is {{ apt_proxy_http }}.
12 | #apt_proxy_https:
13 |
14 | # List of Apt configuration options. Each item is a dict with the following
15 | # keys:
16 | # * content: free-form configuration file content
17 | # * filename: name of a file in /etc/apt/apt.conf.d/ in which to write the
18 | # configuration
19 | # Default is an empty list.
20 | #apt_config:
21 |
22 | # List of apt keys. Each item is a dict containing the following keys:
23 | # * url: URL of key
24 | # * filename: Name of a file in which to store the downloaded key. The
25 | # extension should be '.asc' for ASCII-armoured keys, or '.gpg' otherwise.
26 | # Default is an empty list.
27 | #apt_keys:
28 |
29 | # A list of Apt repositories. Each item is a dict with the following keys:
30 | # * name: the .sources filename part. Optional. Default is 'kayobe' and
31 | # the default filename is 'kayobe.sources'.
32 | # * types: whitespace-separated list of repository types, e.g. deb or deb-src
33 | # (optional, default is 'deb')
34 | # * url: URL of the repository
35 | # * suites: whitespace-separated list of suites, e.g. noble (optional, default
36 | # is ansible_facts.distribution_release)
37 | # * components: whitespace-separated list of components, e.g. main (optional,
38 | # default is 'main')
39 | # * signed_by: whitespace-separated list of names of GPG keyring files in
40 | # apt_keys_path (optional, default is unset)
41 | # * architecture: whitespace-separated list of architectures that will be used
42 | # (optional, default is unset)
43 | # Default is an empty list.
44 | #apt_repositories:
45 |
46 | # List of Apt preferences options. Each item is a dict with the following
47 | # keys:
48 | # * content: free-form preferences file content
49 | # * filename: name of a file in /etc/apt/preferences.d/ in which to write
50 | # the configuration
51 | # Default is an empty list.
52 | #apt_preferences:
53 |
54 | # Whether to disable repositories in /etc/apt/sources.list. This may be used
55 | # when replacing the distribution repositories via apt_repositories.
56 | # Default is false.
57 | #apt_disable_sources_list:
58 |
59 | # List of Apt auth configurations. Each item is a dict with the following keys:
60 | # * machine: 'machine' entry in the auth file
61 | # * login: 'login' entry in the auth file
62 | # * password: 'password' entry in the auth file
63 | # * filename: Name of a file in /etc/apt/auth.conf.d in which to store
64 | # the auth configuration. The extension should be ``.conf``.
65 | # Default is an empty list.
66 | #apt_auth:
67 |
68 | ###############################################################################
69 | # Dummy variable to allow Ansible to accept this file.
70 | workaround_ansible_issue_8743: yes
71 |
--------------------------------------------------------------------------------
/etc/kayobe/bifrost.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kayobe configuration for Bifrost.
3 |
4 | ###############################################################################
5 | # Bifrost installation.
6 |
7 | # URL of Bifrost source code repository.
8 | #kolla_bifrost_source_url:
9 |
10 | # Version (branch, tag, etc.) of Bifrost source code repository. Default is
11 | # {{ openstack_branch }}.
12 | #kolla_bifrost_source_version:
13 |
14 | # Whether Bifrost uses firewalld. Default value is false to avoid conflicting
15 | # with iptables rules configured on the seed host by Kayobe.
16 | #kolla_bifrost_use_firewalld:
17 |
18 | # Firewalld zone used by Bifrost. Default is "trusted", to avoid blocking other
19 | # services running on the seed host.
20 | #kolla_bifrost_firewalld_internal_zone:
21 |
22 | ###############################################################################
23 | # Diskimage-builder configuration.
24 |
25 | # DIB base OS element. Default is {{ os_distribution }}.
26 | #kolla_bifrost_dib_os_element:
27 |
28 | # DIB image OS release. Default is {{ os_release }}.
29 | #kolla_bifrost_dib_os_release:
30 |
31 | # List of default DIB elements. Default is ["enable-serial-console", "vm"].
32 | #kolla_bifrost_dib_elements_default:
33 |
34 | # List of additional DIB elements. Default is none.
35 | #kolla_bifrost_dib_elements_extra:
36 |
37 | # List of DIB elements. Default is a combination of
38 | # kolla_bifrost_dib_elements_default and kolla_bifrost_dib_elements_extra.
39 | #kolla_bifrost_dib_elements:
40 |
41 | # DIB init element. Default is "cloud-init-datasources".
42 | #kolla_bifrost_dib_init_element:
43 |
44 | # DIB default environment variables. Default is
45 | # {DIB_BOOTLOADER_DEFAULT_CMDLINE:
46 | # "nofb nomodeset gfxpayload=text net.ifnames=1",
47 | # "DIB_CLOUD_INIT_DATASOURCES": "ConfigDrive"}.
48 | #kolla_bifrost_dib_env_vars_default:
49 |
50 | # DIB additional environment variables. Default is none.
51 | #kolla_bifrost_dib_env_vars_extra:
52 |
53 | # DIB environment variables. Default is combination of
54 | # kolla_bifrost_dib_env_vars_default and kolla_bifrost_dib_env_vars_extra.
55 | #kolla_bifrost_dib_env_vars:
56 |
57 | # List of DIB packages to install. Default is to install no extra packages.
58 | #kolla_bifrost_dib_packages:
59 |
60 | ###############################################################################
61 | # Disk image deployment configuration.
62 |
63 | # Name of disk image file to deploy. Default is "deployment_image.qcow2".
64 | #kolla_bifrost_deploy_image_filename:
65 |
66 | # UUID of the root filesystem contained within the deployment image.
67 | # See below URL for instructions on how to extract it:
68 | # https://docs.openstack.org/ironic/latest/admin/raid.html#image-requirements
69 | # Default is none.
70 | #kolla_bifrost_deploy_image_rootfs:
71 |
72 | # Custom cloud-init user-data passed to deploy of the deployment image.
73 | # Default is an empty string.
74 | #kolla_bifrost_deploy_image_user_data_content:
75 |
76 | ###############################################################################
77 | # Ironic configuration.
78 |
79 | # List of hardware types to enable for Bifrost's Ironic.
80 | #kolla_bifrost_enabled_hardware_types:
81 |
82 | # List of extra kernel parameters for Bifrost's Ironic PXE configuration.
83 | # Default is empty.
84 | #kolla_bifrost_extra_kernel_options:
85 |
86 | ###############################################################################
87 | # Ironic Inspector configuration.
88 |
89 | # List of of inspector processing plugins.
90 | #kolla_bifrost_inspector_processing_hooks:
91 |
92 | # Which MAC addresses to add as ports during introspection. One of 'all',
93 | # 'active' or 'pxe'.
94 | #kolla_bifrost_inspector_port_addition:
95 |
96 | # List of extra kernel parameters for the inspector default PXE configuration.
97 | # Default is {{ inspector_extra_kernel_options }}, defined in inspector.yml.
98 | # When customising this variable, the default extra kernel parameters should be
99 | # kept to retain full node inspection capabilities.
100 | #kolla_bifrost_inspector_extra_kernel_options:
101 |
102 | # List of introspection rules for Bifrost's Ironic Inspector service.
103 | #kolla_bifrost_inspector_rules:
104 |
105 | # Ironic inspector IPMI username to set.
106 | #kolla_bifrost_inspector_ipmi_username:
107 |
108 | # Ironic inspector IPMI password to set.
109 | #kolla_bifrost_inspector_ipmi_password:
110 |
111 | # Ironic inspector Redfish username to set.
112 | #kolla_bifrost_inspector_redfish_username:
113 |
114 | # Ironic inspector Redfish password to set.
115 | #kolla_bifrost_inspector_redfish_password:
116 |
117 | # Ironic inspector network interface name on which to check for an LLDP switch
118 | # port description to use as the node's name.
119 | #kolla_bifrost_inspector_lldp_switch_port_interface:
120 |
121 | # Ironic inspector deployment kernel location.
122 | #kolla_bifrost_inspector_deploy_kernel:
123 |
124 | # Ironic inspector deployment ramdisk location.
125 | #kolla_bifrost_inspector_deploy_ramdisk:
126 |
127 | # Ironic inspector legacy deployment kernel location.
128 | #kolla_bifrost_inspector_legacy_deploy_kernel:
129 |
130 | # Timeout of hardware inspection on overcloud nodes, in seconds. Default is
131 | # {{ inspector_inspection_timeout }}.
132 | #kolla_bifrost_inspection_timeout:
133 |
134 | ###############################################################################
135 | # Ironic Python Agent (IPA) configuration.
136 |
137 | # URL of Ironic Python Agent (IPA) kernel image.
138 | #kolla_bifrost_ipa_kernel_upstream_url:
139 |
140 | # URL of checksum of Ironic Python Agent (IPA) kernel image.
141 | #kolla_bifrost_ipa_kernel_checksum_url:
142 |
143 | # Algorithm of checksum of Ironic Python Agent (IPA) kernel image.
144 | #kolla_bifrost_ipa_kernel_checksum_algorithm:
145 |
146 | # URL of Ironic Python Agent (IPA) ramdisk image.
147 | #kolla_bifrost_ipa_ramdisk_upstream_url:
148 |
149 | # URL of checksum of Ironic Python Agent (IPA) ramdisk image.
150 | #kolla_bifrost_ipa_ramdisk_checksum_url:
151 |
152 | # Algorithm of checksum of Ironic Python Agent (IPA) ramdisk image.
153 | #kolla_bifrost_ipa_ramdisk_checksum_algorithm:
154 |
155 | ###############################################################################
156 | # Inventory configuration.
157 |
158 | # Server inventory for Bifrost.
159 | #kolla_bifrost_servers:
160 |
161 | ###############################################################################
162 | # Node provisioning configuration
163 | # Whether to use Ironic introspection data for admin interface MAC address
164 | # Default is false.
165 | #kolla_bifrost_use_introspection_mac:
166 |
167 | ###############################################################################
168 | # Dummy variable to allow Ansible to accept this file.
169 | workaround_ansible_issue_8743: yes
170 |
--------------------------------------------------------------------------------
/etc/kayobe/bmc.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Configuration of Baseboard Management Controllers (BMCs).
4 |
5 | # Type of host's BMC. Currently only idrac is supported.
6 | #bmc_type:
7 |
8 | # Address to use to access a host's BMC via IPMI.
9 | #ipmi_address:
10 |
11 | # Username to use to access a host's BMC via IPMI.
12 | #ipmi_username:
13 |
14 | # Password to use to access a host's BMC via IPMI.
15 | #ipmi_password:
16 |
17 | # Address to use to access a host's BMC via Redfish.
18 | #redfish_address:
19 |
20 | ###############################################################################
21 | # Dummy variable to allow Ansible to accept this file.
22 | workaround_ansible_issue_8743: yes
23 |
--------------------------------------------------------------------------------
/etc/kayobe/compute.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Compute node configuration.
4 |
5 | # User with which to access the computes via SSH during bootstrap, in order
6 | # to setup the Kayobe user account. Default is {{ os_distribution }}.
7 | #compute_bootstrap_user:
8 |
9 | ###############################################################################
10 | # Compute network interface configuration.
11 |
12 | # List of networks to which compute nodes are attached.
13 | #compute_network_interfaces:
14 |
15 | # List of default networks to which compute nodes are attached.
16 | #compute_default_network_interfaces:
17 |
18 | # List of extra networks to which compute nodes are attached.
19 | #compute_extra_network_interfaces:
20 |
21 | ###############################################################################
22 | # Compute node BIOS configuration.
23 |
24 | # Dict of compute BIOS options. Format is same as that used by stackhpc.drac
25 | # role.
26 | #compute_bios_config:
27 |
28 | # Dict of default compute BIOS options. Format is same as that used by
29 | # stackhpc.drac role.
30 | #compute_bios_config_default:
31 |
32 | # Dict of additional compute BIOS options. Format is same as that used by
33 | # stackhpc.drac role.
34 | #compute_bios_config_extra:
35 |
36 | ###############################################################################
37 | # Compute node RAID configuration.
38 |
39 | # List of compute RAID volumes. Format is same as that used by stackhpc.drac
40 | # role.
41 | #compute_raid_config:
42 |
43 | # List of default compute RAID volumes. Format is same as that used by
44 | # stackhpc.drac role.
45 | #compute_raid_config_default:
46 |
47 | # List of additional compute RAID volumes. Format is same as that used by
48 | # stackhpc.drac role.
49 | #compute_raid_config_extra:
50 |
51 | ###############################################################################
52 | # Compute node software RAID configuration.
53 |
54 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
55 | #compute_mdadm_arrays:
56 |
57 | ###############################################################################
58 | # Compute node encryption configuration.
59 |
60 | # List of block devices to encrypt. See stackhpc.luks role for format.
61 | #compute_luks_devices:
62 |
63 | ###############################################################################
64 | # Compute node LVM configuration.
65 |
66 | # List of compute volume groups. See mrlesmithjr.manage_lvm role for
67 | # format.
68 | #compute_lvm_groups:
69 |
70 | # Default list of compute volume groups. See mrlesmithjr.manage_lvm role for
71 | # format.
72 | #compute_lvm_groups_default:
73 |
74 | # Additional list of compute volume groups. See mrlesmithjr.manage_lvm role
75 | # for format.
76 | #compute_lvm_groups_extra:
77 |
78 | # Whether a 'data' LVM volume group should exist on compute hosts. By default
79 | # this contains a 'docker-volumes' logical volume for container volume storage
80 | # if using the docker container engine, or a 'podman-volumes' logical volume
81 | # for container volume storage if using the podman container engine.
82 | # Default is false.
83 | #compute_lvm_group_data_enabled:
84 |
85 | # Compute LVM volume group for data. See mrlesmithjr.manage_lvm role for
86 | # format.
87 | #compute_lvm_group_data:
88 |
89 | # List of disks for use by compute LVM data volume group. Default to an
90 | # invalid value to require configuration.
91 | #compute_lvm_group_data_disks:
92 |
93 | # List of LVM logical volumes for the data volume group when using docker.
94 | #compute_lvm_group_data_docker_lvs:
95 |
96 | # List of LVM logical volumes for the data volume group when using podman.
97 | #compute_lvm_group_data_podman_lvs:
98 |
99 | # List of LVM logical volumes for the data volume group.
100 | #compute_lvm_group_data_lvs:
101 |
102 | # Docker volumes LVM backing volume.
103 | #compute_lvm_group_data_lv_docker_volumes:
104 |
105 | # Podman volumes LVM backing volume.
106 | #compute_lvm_group_data_lv_podman_volumes:
107 |
108 | # Size of docker volumes LVM backing volume.
109 | #compute_lvm_group_data_lv_docker_volumes_size:
110 |
111 | # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
112 | #compute_lvm_group_data_lv_docker_volumes_fs:
113 |
114 | # Size of podman volumes LVM backing volume.
115 | #compute_lvm_group_data_lv_podman_volumes_size:
116 |
117 | # Filesystem for podman volumes LVM backing volume. ext4 allows for shrinking.
118 | #compute_lvm_group_data_lv_podman_volumes_fs:
119 |
120 | ###############################################################################
121 | # Compute node sysctl configuration.
122 |
123 | # Dict of sysctl parameters to set.
124 | #compute_sysctl_parameters:
125 |
126 | ###############################################################################
127 | # Compute node tuned configuration.
128 |
129 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
130 | # role. Default is virtual-host.
131 | #compute_tuned_active_builtin_profile:
132 |
133 | ###############################################################################
134 | # Compute node user configuration.
135 |
136 | # List of users to create. This should be in a format accepted by the
137 | # singleplatform-eng.users role.
138 | #compute_users:
139 |
140 | ###############################################################################
141 | # Compute node firewalld configuration.
142 |
143 | # Whether to install and enable firewalld.
144 | #compute_firewalld_enabled:
145 |
146 | # A list of zones to create. Each item is a dict containing a 'zone' item.
147 | #compute_firewalld_zones:
148 |
149 | # A firewalld zone to set as the default. Default is unset, in which case the
150 | # default zone will not be changed.
151 | #compute_firewalld_default_zone:
152 |
153 | # A list of firewall rules to apply. Each item is a dict containing arguments
154 | # to pass to the firewalld module. Arguments are omitted if not provided, with
155 | # the following exceptions:
156 | # - offline: true
157 | # - permanent: true
158 | # - state: enabled
159 | #compute_firewalld_rules:
160 |
161 | ###############################################################################
162 | # Compute node host libvirt configuration.
163 |
164 | # Whether to enable a host libvirt daemon. Default is true if kolla_enable_nova
165 | # is true and kolla_enable_nova_libvirt_container is false.
166 | #compute_libvirt_enabled:
167 |
168 | # A dict of default configuration options to write to
169 | # /etc/libvirt/libvirtd.conf.
170 | #compute_libvirt_conf_default:
171 |
172 | # A dict of additional configuration options to write to
173 | # /etc/libvirt/libvirtd.conf.
174 | #compute_libvirt_conf_extra:
175 |
176 | # A dict of configuration options to write to /etc/libvirt/libvirtd.conf.
177 | # Default is a combination of compute_libvirt_conf_default and
178 | # compute_libvirt_conf_extra.
179 | #compute_libvirt_conf:
180 |
181 | # Numerical log level for libvirtd. Default is 3.
182 | #compute_libvirtd_log_level:
183 |
184 | # A dict of default configuration options to write to
185 | # /etc/libvirt/qemu.conf.
186 | #compute_qemu_conf_default:
187 |
188 | # A dict of additional configuration options to write to
189 | # /etc/libvirt/qemu.conf.
190 | #compute_qemu_conf_extra:
191 |
192 | # A dict of configuration options to write to /etc/libvirt/qemu.conf.
193 | # Default is a combination of compute_qemu_conf_default and
194 | # compute_qemu_conf_extra.
195 | #compute_qemu_conf:
196 |
197 | # Whether to enable libvirt SASL authentication. Default is true.
198 | #compute_libvirt_enable_sasl:
199 |
200 | # libvirt SASL password. Default is unset.
201 | #compute_libvirt_sasl_password:
202 |
203 | # Whether to enable a libvirt TLS listener. Default is false.
204 | #compute_libvirt_enable_tls:
205 |
206 | # Whether to install a Ceph package repository on CentOS and Rocky hosts.
207 | # Default is true.
208 | #compute_libvirt_ceph_repo_install:
209 |
210 | # Ceph package repository release to install on CentOS and Rocky hosts when
211 | # compute_libvirt_ceph_repo_install is true. Default is 'pacific'.
212 | #compute_libvirt_ceph_repo_release:
213 |
214 | ###############################################################################
215 | # Dummy variable to allow Ansible to accept this file.
216 | workaround_ansible_issue_8743: yes
217 |
--------------------------------------------------------------------------------
/etc/kayobe/container-engine.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Container engine configuration
4 |
5 | # Configures the container engine. Default is 'docker'.
6 | #container_engine:
7 |
8 | # Path to container volumes. Default is '{{ podman_volumes_path }}' if
9 | # 'container_engine' is set to podman, otherwise '{{ docker_volumes_path }}'.
10 | #container_engine_volumes_path:
11 |
12 | ###############################################################################
13 | # Docker configuration.
14 |
15 | # Name of the docker storage driver. Default is 'overlay2'.
16 | #docker_storage_driver:
17 |
18 | # Name of the docker storage LVM volume group.
19 | #docker_storage_volume_group:
20 |
21 | # Name of the docker storage data LVM volume.
22 | #docker_storage_volume_thinpool:
23 |
24 | # Size of the docker storage data LVM volume (see lvol module size argument).
25 | #docker_storage_volume_thinpool_size:
26 |
27 | # Name of the docker storage metadata LVM volume.
28 | #docker_storage_volume_thinpool_meta:
29 |
30 | # Size of the docker storage metadata LVM volume (see lvol module size
31 | # argument).
32 | #docker_storage_volume_thinpool_meta_size:
33 |
34 | # URL of docker registry
35 | docker_registry: 192.168.33.5:4000
36 |
37 | # Whether docker should be configured to use an insecure registry.
38 | # Default is false, unless docker_registry_enabled is true and
39 | # docker_registry_enable_tls is false.
40 | #docker_registry_insecure:
41 |
42 | # CA of docker registry
43 | #docker_registry_ca:
44 |
45 | # List of Docker registry mirrors.
46 | #docker_registry_mirrors:
47 |
48 | # Enable live-restore on docker daemon
49 | #docker_daemon_live_restore:
50 |
51 | # Path to docker runtime directory. Default is "", which means to use the
52 | # default location: '/var/lib/docker'.
53 | #docker_runtime_directory:
54 |
55 | # Path to docker volumes. Default is '{{ docker_runtime_directory |
56 | # default('/var/lib/docker', true) ~ '/volumes' }}"'.
57 | #docker_volumes_path:
58 |
59 | ###############################################################################
60 | # Podman configuration.
61 |
62 | # URL of podman container registry
63 | #podman_registry:
64 |
65 | # Whether podman should be configured to use an insecure registry.
66 | # Default is false, unless docker_registry_enabled is true and
67 | # docker_registry_enable_tls is false.
68 | #podman_registry_insecure:
69 |
70 | # Path to podman runtime directory. Default is None, which means to use the
71 | # default location: '/var/lib/containers/storage'.
72 | #podman_runtime_directory:
73 |
74 | # Path to podman volumes. Default is '{{ podman_runtime_directory |
75 | # default('/var/lib/containers/storage', true) ~ '/volumes' }}"'.
76 | #podman_volumes_path:
77 |
78 | ###############################################################################
79 | # Dummy variable to allow Ansible to accept this file.
80 | workaround_ansible_issue_8743: yes
81 |
--------------------------------------------------------------------------------
/etc/kayobe/controllers.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Controller node configuration.
4 |
5 | # User with which to access the controllers via SSH during bootstrap, in order
6 | # to setup the Kayobe user account. Default is {{ os_distribution }}.
7 | #controller_bootstrap_user:
8 |
9 | ###############################################################################
10 | # Controller groups.
11 |
12 | # Ansible inventory group in which Ironic conductor services are deployed.
13 | # Default is 'controllers'.
14 | #controller_ironic_conductor_group:
15 |
16 | # Ansible inventory group in which Ironic inspector services are deployed.
17 | # Default is 'controllers'.
18 | #controller_ironic_inspector_group:
19 |
20 | # Ansible inventory group in which control plane load balancer services are
21 | # deployed. Default is 'network'.
22 | #controller_loadbalancer_group:
23 |
24 | # Ansible inventory group in which network data plane services are deployed.
25 | # Default is 'network'.
26 | #controller_network_group:
27 |
28 | ###############################################################################
29 | # Controller network interface configuration.
30 |
31 | # List of networks to which controller nodes are attached.
32 | #controller_network_interfaces:
33 |
34 | # List of default networks to which controller nodes are attached.
35 | #controller_default_network_interfaces:
36 |
37 | # List of extra networks to which controller nodes are attached.
38 | #controller_extra_network_interfaces:
39 |
40 | # List of network interfaces to which network nodes are attached.
41 | #controller_network_host_network_interfaces:
42 |
43 | # List of default network interfaces to which network nodes are attached.
44 | #controller_network_host_default_network_interfaces:
45 |
46 | # List of extra networks to which network nodes are attached.
47 | #controller_network_host_extra_network_interfaces:
48 |
49 | ###############################################################################
50 | # Controller node BIOS configuration.
51 |
52 | # Dict of controller BIOS options. Format is same as that used by stackhpc.drac
53 | # role.
54 | #controller_bios_config:
55 |
56 | # Dict of default controller BIOS options. Format is same as that used by
57 | # stackhpc.drac role.
58 | #controller_bios_config_default:
59 |
60 | # Dict of additional controller BIOS options. Format is same as that used by
61 | # stackhpc.drac role.
62 | #controller_bios_config_extra:
63 |
64 | ###############################################################################
65 | # Controller node RAID configuration.
66 |
67 | # List of controller RAID volumes. Format is same as that used by stackhpc.drac
68 | # role.
69 | #controller_raid_config:
70 |
71 | # List of default controller RAID volumes. Format is same as that used by
72 | # stackhpc.drac role.
73 | #controller_raid_config_default:
74 |
75 | # List of additional controller RAID volumes. Format is same as that used by
76 | # stackhpc.drac role.
77 | #controller_raid_config_extra:
78 |
79 | ###############################################################################
80 | # Controller node software RAID configuration.
81 |
82 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
83 | #controller_mdadm_arrays:
84 |
85 | ###############################################################################
86 | # Controller node encryption configuration.
87 |
88 | # List of block devices to encrypt. See stackhpc.luks role for format.
89 | #controller_luks_devices:
90 |
91 | ###############################################################################
92 | # Controller node LVM configuration.
93 |
94 | # List of controller volume groups. See mrlesmithjr.manage_lvm role for
95 | # format.
96 | #controller_lvm_groups:
97 |
98 | # Default list of controller volume groups. See mrlesmithjr.manage_lvm role for
99 | # format.
100 | #controller_lvm_groups_default:
101 |
102 | # Additional list of controller volume groups. See mrlesmithjr.manage_lvm role
103 | # for format.
104 | #controller_lvm_groups_extra:
105 |
106 | # Whether a 'data' LVM volume group should exist on controller hosts. By
107 | # default this contains a 'docker-volumes' logical volume for Docker volume
108 | # storage.
109 | # Default is false.
110 | #controller_lvm_group_data_enabled:
111 |
112 | # Controller LVM volume group for data. See mrlesmithjr.manage_lvm role for
113 | # format.
114 | #controller_lvm_group_data:
115 |
116 | # List of disks for use by controller LVM data volume group. Default to an
117 | # invalid value to require configuration.
118 | #controller_lvm_group_data_disks:
119 |
120 | # List of LVM logical volumes for the data volume group.
121 | #controller_lvm_group_data_lvs:
122 |
123 | # Docker volumes LVM backing volume.
124 | #controller_lvm_group_data_lv_docker_volumes:
125 |
126 | # Size of docker volumes LVM backing volume.
127 | #controller_lvm_group_data_lv_docker_volumes_size:
128 |
129 | # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
130 | #controller_lvm_group_data_lv_docker_volumes_fs:
131 |
132 | ###############################################################################
133 | # Controller node sysctl configuration.
134 |
135 | # Dict of sysctl parameters to set.
136 | #controller_sysctl_parameters:
137 |
138 | ###############################################################################
139 | # Controller node tuned configuration.
140 |
141 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
142 | # role. Default is throughput-performance.
143 | #controller_tuned_active_builtin_profile:
144 |
145 | ###############################################################################
146 | # Controller node user configuration.
147 |
148 | # List of users to create. This should be in a format accepted by the
149 | # singleplatform-eng.users role.
150 | #controller_users:
151 |
152 | ###############################################################################
153 | # Controller node firewalld configuration.
154 |
155 | # Whether to install and enable firewalld.
156 | #controller_firewalld_enabled:
157 |
158 | # A list of zones to create. Each item is a dict containing a 'zone' item.
159 | #controller_firewalld_zones:
160 |
161 | # A firewalld zone to set as the default. Default is unset, in which case the
162 | # default zone will not be changed.
163 | #controller_firewalld_default_zone:
164 |
165 | # A list of firewall rules to apply. Each item is a dict containing arguments
166 | # to pass to the firewalld module. Arguments are omitted if not provided, with
167 | # the following exceptions:
168 | # - offline: true
169 | # - permanent: true
170 | # - state: enabled
171 | #controller_firewalld_rules:
172 |
173 | ###############################################################################
174 | # Dummy variable to allow Ansible to accept this file.
175 | workaround_ansible_issue_8743: yes
176 |
--------------------------------------------------------------------------------
/etc/kayobe/dell-switch-bmp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Dell Switch Bare Metal Provisioning (BMP) configuration.
3 |
4 | # List of images to provide a BMP configuration for.
5 | # Each item should be a dict with the following keys:
6 | # url: URL of the image to download.
7 | # dest: Name of the file to download the image to.
8 | # match: dnsmasq match rule to match hosts against.
9 | # tag: dnsmasq tag to apply to matching hosts.
10 | # checksum: optional checksum of image, in format required for Ansible's
11 | # get_url module.
12 | #dell_switch_bmp_images:
13 |
14 | ###############################################################################
15 | # Dummy variable to allow Ansible to accept this file.
16 | workaround_ansible_issue_8743: yes
17 |
--------------------------------------------------------------------------------
/etc/kayobe/dnf.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # DNF configuration.
3 |
4 | ###############################################################################
5 | # DNF repository configuration.
6 |
7 | # Yum configuration. Dict mapping Yum config option names to their values.
8 | # dnf_config:
9 | # proxy: http://proxy.example.com
10 | #dnf_config:
11 |
12 | # Whether or not to use a local Yum mirror. Default value is 'false'.
13 | #dnf_use_local_mirror:
14 |
15 | # Mirror FQDN for Yum CentOS repos. Default value is 'mirror.centos.org'.
16 | #dnf_centos_mirror_host:
17 |
18 | # Mirror directory for Yum CentOS repos. Default value is 'centos'.
19 | #dnf_centos_mirror_directory:
20 |
21 | # Mirror FQDN for Yum Rocky repos. Default value is 'dl.rockylinux.org'.
22 | #dnf_rocky_mirror_host:
23 |
24 | # Mirror directory for Yum Rocky repos. Default value is 'pub/rocky'.
25 | #dnf_rocky_mirror_directory:
26 |
27 | # Mirror FQDN for Yum EPEL repos. Default value is
28 | # 'download.fedoraproject.org'.
29 | #dnf_epel_mirror_host:
30 |
31 | # Mirror directory for Yum EPEL repos. Default value is 'pub/epel'.
32 | #dnf_epel_mirror_directory:
33 |
34 | # A dict of custom repositories.
35 | # You can see params on
36 | # http://docs.ansible.com/ansible/latest/modules/yum_repository_module.html.
37 | # For example:
38 | # dnf_custom_repos:
39 | # reponame:
40 | # baseurl: http://repo
41 | # file: myrepo
42 | # gpgkey: http://gpgkey
43 | # gpgcheck: yes
44 | #dnf_custom_repos:
45 |
46 | # Whether to install the epel-release package. This affects RedHat-based
47 | # systems only. Default value is 'false'.
48 | #dnf_install_epel:
49 |
50 | ###############################################################################
51 | # DNF Automatic configuration.
52 |
53 | # Whether DNF Automatic is enabled. This can be used to regularly apply
54 | # security updates. Default value is 'false'.
55 | #dnf_automatic_enabled:
56 |
57 | # DNF Automatic upgrade type. Default value is 'security'.
58 | #dnf_automatic_upgrade_type:
59 |
60 | ###############################################################################
61 | # Dummy variable to allow Ansible to accept this file.
62 | workaround_ansible_issue_8743: yes
63 |
--------------------------------------------------------------------------------
/etc/kayobe/dns.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # DNS.
4 |
5 | # Whether kayobe should configure resolv.conf.
6 | #resolv_is_managed:
7 |
8 | # List of DNS nameservers.
9 | #resolv_nameservers:
10 |
11 | # DNS domain suffix.
12 | #resolv_domain:
13 |
14 | # List of DNS search suffixes.
15 | #resolv_search:
16 |
17 | # List of IP address and netmask pairs to sort addresses returned by
18 | # gethostbyname.
19 | #resolv_sortlist:
20 |
21 | # List of DNS options.
22 | #resolv_options:
23 |
24 | ###############################################################################
25 | # Dummy variable to allow Ansible to accept this file.
26 | workaround_ansible_issue_8743: yes
27 |
--------------------------------------------------------------------------------
/etc/kayobe/docker-registry.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Docker registry configuration.
4 |
5 | # Whether a docker registry is enabled. Default is false.
6 | docker_registry_enabled: true
7 |
8 | # Dict of environment variables to provide to the docker registry container.
9 | # This allows to configure the registry by overriding specific configuration
10 | # options, as described at https://docs.docker.com/registry/configuration/
11 | # For example, the registry can be configured as a pull through cache to Docker
12 | # Hub by setting REGISTRY_PROXY_REMOTEURL to "https://registry-1.docker.io".
13 | # Note that it is not possible to push to a registry configured as a
14 | # pull through cache. Default is an empty dict.
15 | #docker_registry_env:
16 |
17 | # The network mode of the docker registry container. Default is 'host'.
18 | #docker_registry_network_mode:
19 |
20 | # The port on which the docker registry server should listen. Default is 4000.
21 | #docker_registry_port:
22 |
23 | # Name or path to use as the volume for the docker registry. Default is
24 | # 'docker_registry'.
25 | #docker_registry_datadir_volume:
26 |
27 | # Whether to enable TLS for the registry. Default is false.
28 | #docker_registry_enable_tls:
29 |
30 | # Path to a TLS certificate to use when TLS is enabled. Default is none.
31 | #docker_registry_cert_path:
32 |
33 | # Path to a TLS key to use when TLS is enabled. Default is none.
34 | #docker_registry_key_path:
35 |
36 | # Whether to enable basic authentication for the registry. Default is false.
37 | #docker_registry_enable_basic_auth:
38 |
39 | # Path to a htpasswd formatted password store for the registry. Default is
40 | # none.
41 | #docker_registry_basic_auth_htpasswd_path:
42 |
43 | ###############################################################################
44 | # Dummy variable to allow Ansible to accept this file.
45 | workaround_ansible_issue_8743: yes
46 |
--------------------------------------------------------------------------------
/etc/kayobe/globals.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kayobe global configuration.
3 |
4 | ###############################################################################
5 | # Local path configuration (Ansible control host).
6 |
7 | # Path to Kayobe configuration directory on Ansible control host.
8 | #kayobe_config_path:
9 |
10 | # Name of Kayobe environment to use. Default is $KAYOBE_ENVIRONMENT, or an
11 | # empty string if $KAYOBE_ENVIRONMENT is not set. Can also be set via the
12 | # --environment argument when invoking kayobe.
13 | #kayobe_environment:
14 |
15 | # Path to Kayobe configuration directory on Ansible control host with an
16 | # environment path appended if kayobe_environment is set.
17 | #kayobe_env_config_path:
18 |
19 | ###############################################################################
20 | # Remote path configuration (seed, seed-hypervisor and overcloud hosts).
21 |
22 | # Base path for kayobe state on remote hosts.
23 | #base_path:
24 |
25 | # Path in which to store configuration on remote hosts.
26 | #config_path:
27 |
28 | # Path in which to cache downloaded images on remote hosts.
29 | #image_cache_path:
30 |
31 | # Path on which to checkout source code repositories on remote hosts.
32 | #source_checkout_path:
33 |
34 | # Path on which to create python virtualenvs on remote hosts.
35 | #virtualenv_path:
36 |
37 | ###############################################################################
38 | # User configuration.
39 |
40 | # User with which to access remote hosts. This user will be created if it does
41 | # not exist.
42 | #kayobe_ansible_user:
43 |
44 | ###############################################################################
45 | # OS distribution.
46 |
47 | # OS distribution name. Valid options are "centos", "rocky", "ubuntu". Default
48 | # is "rocky".
49 | os_distribution: "{{ lookup('pipe', '. /etc/os-release && echo $ID') | trim }}"
50 |
51 | # OS release. Valid options are "9-stream" when os_distribution is "centos", or
52 | # "9" when os_distribution is "rocky", or "noble" when os_distribution is
53 | # "ubuntu".
54 | #os_release:
55 |
56 | ###############################################################################
57 | # Ansible configuration.
58 |
59 | # Filter to apply to the setup module when gathering facts. Default is to not
60 | # specify a filter.
61 | #kayobe_ansible_setup_filter:
62 |
63 | # Gather subset to apply to the setup module when gathering facts. Default is
64 | # to not specify a gather subset.
65 | #kayobe_ansible_setup_gather_subset:
66 |
67 | # Global maximum failure percentage. By default this is undefined, which is
68 | # equivalent to a value of 100.
69 | #kayobe_max_fail_percentage:
70 |
71 | # Whether or not we should try and escalate privileges on the control host.
72 | # This allows us to install packages and create arbitrary directories that our
73 | # user would not normally have permission to create. Default is true.
74 | #kayobe_control_host_become:
75 |
76 | ###############################################################################
77 | # Dummy variable to allow Ansible to accept this file.
78 | workaround_ansible_issue_8743: yes
79 |
--------------------------------------------------------------------------------
/etc/kayobe/grafana.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Grafana configuration.
4 |
5 | # Grafana local admin user name.
6 | #grafana_local_admin_user_name:
7 |
8 | ###############################################################################
9 | # Dummy variable to allow Ansible to accept this file.
10 | workaround_ansible_issue_8743: yes
11 |
--------------------------------------------------------------------------------
/etc/kayobe/hooks/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/stackhpc/a-universe-from-nothing/2d8b7e10a60d746acb678c7560525efced12d6f7/etc/kayobe/hooks/.gitkeep
--------------------------------------------------------------------------------
/etc/kayobe/hooks/overcloud-inventory-discover/post.d/01-lab-hosts.yml:
--------------------------------------------------------------------------------
1 | ../../../ansible/lab-hosts.yml
--------------------------------------------------------------------------------
/etc/kayobe/idrac.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # iDRAC configuration.
4 |
5 | # Default username for iDRACs.
6 | #idrac_default_username:
7 |
8 | # Default password for iDRACs.
9 | #idrac_default_password:
10 |
11 | # Default IP address for iDRACs.
12 | #idrac_default_ip:
13 |
14 | # Dict mapping host description (as found in switch interface configuration) to
15 | # IP address of the iDRAC for that host.
16 | #idrac_network_ips:
17 |
18 | # Gateway IP address for iDRAC network.
19 | #idrac_network_gateway:
20 |
21 | # IP netmask for iDRAC network.
22 | #idrac_network_netmask:
23 |
24 | # VLAN for iDRAC network.
25 | #idrac_network_vlan:
26 |
27 | # ID of VLAN to use for bootstrapping iDRACs.
28 | #idrac_bootstrap_vlan:
29 |
30 | # Name of network namespace on controller to use for bootstrapping iDRACs.
31 | #idrac_bootstrap_net_namespace:
32 |
33 | # Controller group to use for bootstrapping iDRACs.
34 | #idrac_bootstrap_controller_group:
35 |
36 | # Base network interface on controller to use for bootstrapping iDRACs.
37 | #idrac_bootstrap_controller_interface:
38 |
39 | # VLAN network interface on controller to create for bootstrapping iDRACs.
40 | #idrac_bootstrap_controller_vlan_interface:
41 |
42 | # IP address of controller to use for bootstrapping iDRACs.
43 | #idrac_bootstrap_controller_ip:
44 |
45 | # Name of an Ansible group containing switches forming the iDRAC network.
46 | #idrac_bootstrap_switch_group:
47 |
48 | ###############################################################################
49 | # Dummy variable to allow Ansible to accept this file.
50 | workaround_ansible_issue_8743: yes
51 |
--------------------------------------------------------------------------------
/etc/kayobe/infra-vms.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Infrastructure VM configuration.
4 |
5 | # Name of the infra VM.
6 | #infra_vm_name:
7 |
8 | # Memory in MB.
9 | #infra_vm_memory_mb:
10 |
11 | # Number of vCPUs.
12 | #infra_vm_vcpus:
13 |
14 | # List of volumes.
15 | #infra_vm_volumes:
16 |
17 | # Root volume.
18 | #infra_vm_root_volume:
19 |
20 | # Data volume.
21 | #infra_vm_data_volume:
22 |
23 | # Name of the storage pool for the infra VM volumes.
24 | #infra_vm_pool:
25 |
26 | # Capacity of the infra VM root volume.
27 | #infra_vm_root_capacity:
28 |
29 | # Format of the infra VM root volume.
30 | #infra_vm_root_format:
31 |
32 | # Base image for the infra VM root volume. Default is
33 | # "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
34 | # when os_distribution is "ubuntu", or
35 | # https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2
36 | # when os_distribution is "rocky",
37 | # or
38 | # "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2"
39 | # otherwise.
40 | #infra_vm_root_image:
41 |
42 | # Capacity of the infra VM data volume.
43 | #infra_vm_data_capacity:
44 |
45 | # Format of the infra VM data volume.
46 | #infra_vm_data_format:
47 |
48 | # List of network interfaces to attach to the infra VM.
49 | #infra_vm_interfaces:
50 |
51 | # Hypervisor that the VM runs on.
52 | #infra_vm_hypervisor:
53 |
54 | # Customise ansible_ssh_extra_args for the test that checks SSH connectivity
55 | # after provisioning. Defaults to disabling ssh host key checking.
56 | #infra_vm_wait_connection_ssh_extra_args:
57 |
58 | # OS family. Needed for config drive generation.
59 | #infra_vm_os_family:
60 |
61 | # Boot firmware. Possible values are 'bios' or 'efi'. Default is 'efi'.
62 | #infra_vm_boot_firmware:
63 |
64 | # Machine type. Libvirt default configuration is used.
65 | #infra_vm_machine:
66 |
67 | ###############################################################################
68 | # Infrastructure VM node configuration.
69 |
70 | # User with which to access the infrastructure vm via SSH during bootstrap, in
71 | # order to setup the Kayobe user account.
72 | #infra_vm_bootstrap_user:
73 |
74 | ###############################################################################
75 | # Infrastructure VM network interface configuration.
76 |
77 | # List of networks to which infrastructure vm nodes are attached.
78 | #infra_vm_network_interfaces:
79 |
80 | # List of default networks to which infrastructure vm nodes are attached.
81 | #infra_vm_default_network_interfaces:
82 |
83 | # List of extra networks to which infrastructure vm nodes are attached.
84 | #infra_vm_extra_network_interfaces:
85 |
86 | ###############################################################################
87 | # Infrastructure VM node software RAID configuration.
88 |
89 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
90 | #infra_vm_mdadm_arrays:
91 |
92 | ###############################################################################
93 | # Infrastructure VM node encryption configuration.
94 |
95 | # List of block devices to encrypt. See stackhpc.luks role for format.
96 | #infra_vm_luks_devices:
97 |
98 | ###############################################################################
99 | # Infrastructure VM node LVM configuration.
100 |
101 | # List of infrastructure vm volume groups. See mrlesmithjr.manage_lvm role for
102 | # format.
103 | #infra_vm_lvm_groups:
104 |
105 | # Default list of infrastructure vm volume groups. See mrlesmithjr.manage_lvm
106 | # role for format.
107 | #infra_vm_lvm_groups_default:
108 |
109 | # Additional list of infrastructure vm volume groups. See mrlesmithjr.manage_lvm
110 | # role for format.
111 | #infra_vm_lvm_groups_extra:
112 |
113 | # Whether a 'data' LVM volume group should exist on the infrastructure vm. By
114 | # default this contains a 'docker-volumes' logical volume for Docker volume
115 | # storage. Default is false.
116 | #infra_vm_lvm_group_data_enabled:
117 |
118 | # Infrastructure VM LVM volume group for data. See mrlesmithjr.manage_lvm role
119 | # for format.
120 | #infra_vm_lvm_group_data:
121 |
122 | # List of disks for use by infrastructure vm LVM data volume group. Default to
123 | # an invalid value to require configuration.
124 | #infra_vm_lvm_group_data_disks:
125 |
126 | # List of LVM logical volumes for the data volume group.
127 | #infra_vm_lvm_group_data_lvs:
128 |
129 | # Docker volumes LVM backing volume.
130 | #infra_vm_lvm_group_data_lv_docker_volumes:
131 |
132 | # Size of docker volumes LVM backing volume.
133 | #infra_vm_lvm_group_data_lv_docker_volumes_size:
134 |
135 | # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
136 | #infra_vm_lvm_group_data_lv_docker_volumes_fs:
137 |
138 | ###############################################################################
139 | # Infrastructure VM node sysctl configuration.
140 |
141 | # Dict of sysctl parameters to set.
142 | #infra_vm_sysctl_parameters:
143 |
144 | ###############################################################################
145 | # Infrastructure VM node tuned configuration.
146 |
147 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
148 | # role. Default is virtual-guest.
149 | #infra_vm_tuned_active_builtin_profile:
150 |
151 | ###############################################################################
152 | # Infrastructure VM node user configuration.
153 |
154 | # List of users to create. This should be in a format accepted by the
155 | # singleplatform-eng.users role.
156 | #infra_vm_users:
157 |
158 | ###############################################################################
159 | # Infrastructure VM node firewalld configuration.
160 |
161 | # Whether to install and enable firewalld.
162 | #infra_vm_firewalld_enabled:
163 |
164 | # A list of zones to create. Each item is a dict containing a 'zone' item.
165 | #infra_vm_firewalld_zones:
166 |
167 | # A firewalld zone to set as the default. Default is unset, in which case the
168 | # default zone will not be changed.
169 | #infra_vm_firewalld_default_zone:
170 |
171 | # A list of firewall rules to apply. Each item is a dict containing arguments
172 | # to pass to the firewalld module. Arguments are omitted if not provided, with
173 | # the following exceptions:
174 | # - offline: true
175 | # - permanent: true
176 | # - state: enabled
177 | #infra_vm_firewalld_rules:
178 |
179 | ###############################################################################
180 | # Dummy variable to allow Ansible to accept this file.
181 | workaround_ansible_issue_8743: yes
182 |
--------------------------------------------------------------------------------
/etc/kayobe/inspector.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # General configuration of inspection.
4 |
5 | # Timeout of hardware inspection on baremetal compute nodes, in seconds.
6 | # Default is 1200.
7 | #inspector_inspection_timeout:
8 |
9 | ###############################################################################
10 | # Ironic inspector PXE configuration.
11 |
12 | # List of extra kernel parameters for the inspector default PXE configuration.
13 | #inspector_extra_kernel_options:
14 |
15 | # URL of Ironic Python Agent (IPA) kernel image.
16 | #inspector_ipa_kernel_upstream_url:
17 |
18 | # URL of checksum of Ironic Python Agent (IPA) kernel image.
19 | #inspector_ipa_kernel_checksum_url:
20 |
21 | # Algorithm of checksum of Ironic Python Agent (IPA) kernel image.
22 | #inspector_ipa_kernel_checksum_algorithm:
23 |
24 | # URL of Ironic Python Agent (IPA) ramdisk image.
25 | #inspector_ipa_ramdisk_upstream_url:
26 |
27 | # URL of checksum of Ironic Python Agent (IPA) ramdisk image.
28 | #inspector_ipa_ramdisk_checksum_url:
29 |
30 | # Algorithm of checksum of Ironic Python Agent (IPA) ramdisk image.
31 | #inspector_ipa_ramdisk_checksum_algorithm:
32 |
33 | ###############################################################################
34 | # Ironic inspector processing configuration.
35 |
36 | # List of of default inspector processing plugins.
37 | #inspector_processing_hooks_default:
38 |
39 | # List of of additional inspector processing plugins.
40 | #inspector_processing_hooks_extra:
41 |
42 | # List of of additional inspector processing plugins.
43 | #inspector_processing_hooks:
44 |
45 | # Which MAC addresses to add as ports during introspection. One of 'all',
46 | # 'active' or 'pxe'.
47 | #inspector_add_ports:
48 |
49 | # Which ports to keep after introspection. One of 'all', 'present', or 'added'.
50 | #inspector_keep_ports:
51 |
52 | # Whether to enable discovery of nodes not managed by Ironic.
53 | #inspector_enable_discovery:
54 |
55 | # The Ironic driver with which to register newly discovered nodes.
56 | #inspector_discovery_enroll_node_driver:
57 |
58 | ###############################################################################
59 | # Ironic inspector configuration.
60 |
61 | # Ironic inspector option to enable IPMI rules. Set to 'True' by default.
62 | #inspector_rules_ipmi_enabled:
63 |
64 | # Ironic inspector IPMI username to set.
65 | #inspector_ipmi_username:
66 |
67 | # Ironic inspector IPMI password to set.
68 | #inspector_ipmi_password:
69 |
70 | # Ironic inspector default network interface name on which to check for an LLDP
71 | # switch port description to use as the node's name.
72 | #inspector_lldp_switch_port_interface_default:
73 |
74 | # Ironic inspector map from hostname to network interface name on which to
75 | # check for an LLDP switch port description to use as the node's name.
76 | #inspector_lldp_switch_port_interface_map:
77 |
78 | # Ironic inspector uses IPMI by default enroll the baremetal nodes, however it
79 | # is possible to use Redfish instead. To do that enable Redfish and make sure
80 | # all of the necessary variables below have been properly set.
81 | # Enable inspector Redfish rules. Set to 'False' by default.
82 | #inspector_rules_redfish_enabled:
83 |
84 | # Ironic inspector Redfish username to set.
85 | #inspector_redfish_username:
86 |
87 | # Ironic inspector Redfish password to set.
88 | #inspector_redfish_password:
89 |
90 | # Redfish CA setting. Set to 'True' by default
91 | #inspector_rule_var_redfish_verify_ca:
92 |
93 | ###############################################################################
94 | # Ironic inspector introspection rules configuration.
95 |
96 | # Ironic inspector rule to set IPMI credentials.
97 | #inspector_rule_ipmi_credentials:
98 |
99 | # Ironic inspector rule to set deployment kernel.
100 | #inspector_rule_deploy_kernel:
101 |
102 | # Ironic inspector rule to set deployment ramdisk.
103 | #inspector_rule_deploy_ramdisk:
104 |
105 | # Ironic inspector rule to initialise root device hints.
106 | #inspector_rule_root_hint_init:
107 |
108 | # Ironic inspector rule to set serial root device hint.
109 | #inspector_rule_root_hint_serial:
110 |
111 | # Ironic inspector rule to set the interface on which the node PXE booted.
112 | #inspector_rule_set_pxe_interface_mac:
113 |
114 | # Ironic inspector rule to set the node's name from an interface's LLDP switch
115 | # port description.
116 | #inspector_rule_lldp_switch_port_desc_to_name:
117 |
118 | # Ironic inspector rule to save introspection data to the node.
119 | #inspector_rule_save_data:
120 |
121 | # List of default ironic inspector rules.
122 | #inspector_rules_default:
123 |
124 | # List of additional ironic inspector rules.
125 | #inspector_rules_extra:
126 |
127 | # List of all ironic inspector rules.
128 | #inspector_rules:
129 |
130 | ###############################################################################
131 | # Dell switch LLDP workaround configuration.
132 |
133 | # Some Dell switch OSs (including Dell Network OS 9.10(0.1)) do not support
134 | # sending interface port description TLVs correctly. Instead of sending the
135 | # interface description, they send the interface name (e.g. TenGigabitEthernet
136 | # 1/1/1). This breaks the discovery process which relies on Ironic node
137 | # introspection data containing the node's name in the interface port
138 | # description. We work around this here by creating an introspection rule for
139 | # each ironic node that matches against the switch system and the relevant
140 | # interface name, then sets the node's name appropriately.
141 |
142 | # Ansible group containing switch hosts to which the workaround should be
143 | # applied.
144 | #inspector_dell_switch_lldp_workaround_group:
145 |
146 | ###############################################################################
147 | # Inspection store configuration.
148 | # The inspection store provides a Swift-like service for storing inspection
149 | # data which may be useful in environments without Swift.
150 |
151 | # Whether the inspection data store is enabled.
152 | inspector_store_enabled: false
153 |
154 | # Port on which the inspection data store should listen.
155 | #inspector_store_port:
156 |
157 | ###############################################################################
158 | # Dummy variable to allow Ansible to accept this file.
159 | workaround_ansible_issue_8743: yes
160 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/compute/network-interfaces:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Network interface definitions for the compute group.
4 |
5 | # Controller interface on all-in-one network.
6 | aio_interface: "br{{ aio_bridge_ports[0] }}"
7 | aio_bridge_ports:
8 | - "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}"
9 |
10 | # Route via the seed-hypervisor to the outside world.
11 | aio_gateway: 192.168.33.4
12 |
13 | ###############################################################################
14 | # Dummy variable to allow Ansible to accept this file.
15 | workaround_ansible_issue_8743: yes
16 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/controllers/network-interfaces:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Network interface definitions for the controller group.
4 |
5 | # Controller interface on all-in-one network.
6 | aio_interface: "br{{ aio_bridge_ports[0] }}"
7 | aio_bridge_ports:
8 | - "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}"
9 |
10 | # Route via the seed-hypervisor to the outside world.
11 | aio_gateway: 192.168.33.4
12 |
13 | ###############################################################################
14 | # Dummy variable to allow Ansible to accept this file.
15 | workaround_ansible_issue_8743: yes
16 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/infra-vms/ansible-python-interpreter:
--------------------------------------------------------------------------------
1 | ---
2 | # Use a virtual environment for remote operations.
3 | ansible_python_interpreter: "{{ virtualenv_path }}/kayobe/bin/python"
4 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/infra-vms/network-interfaces:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Network interface definitions for the infra-vms group.
4 |
5 | # Overcloud provisioning network IP information.
6 | # provision_oc_net_interface:
7 | # provision_oc_net_bridge_ports:
8 | # provision_oc_net_bond_slaves:
9 |
10 | ###############################################################################
11 | # Dummy variable to allow Ansible to accept this file.
12 | workaround_ansible_issue_8743: yes
13 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/overcloud/ansible-python-interpreter:
--------------------------------------------------------------------------------
1 | ---
2 | # Use a virtual environment for remote operations.
3 | ansible_python_interpreter: "{{ virtualenv_path }}/kayobe/bin/python"
4 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/seed-hypervisor/ansible-python-interpreter:
--------------------------------------------------------------------------------
1 | ---
2 | # Use a virtual environment for remote operations.
3 | ansible_python_interpreter: "{{ virtualenv_path }}/kayobe/bin/python"
4 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/seed-hypervisor/network-interfaces:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Network interface definitions for the seed-hypervisor group.
4 |
5 | aio_interface: braio
6 | aio_bridge_ports:
7 | - dummy1
8 |
9 | ###############################################################################
10 | # Dummy variable to allow Ansible to accept this file.
11 | workaround_ansible_issue_8743: yes
12 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/seed/ansible-python-interpreter:
--------------------------------------------------------------------------------
1 | ---
2 | # Use a virtual environment for remote operations.
3 | ansible_python_interpreter: "{{ virtualenv_path }}/kayobe/bin/python"
4 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/seed/network-interfaces:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Network interface definitions for the seed group.
4 |
5 | aio_interface: "{{ 'ens2' if os_distribution == 'ubuntu' else 'eth0' }}"
6 |
7 | # Route via the seed-hypervisor to the outside world.
8 | # FIXME: Circular reference between seed & seed-hypervisor?
9 | #aio_gateway: "{{ 'aio' | net_ip('localhost') }}"
10 | aio_gateway: 192.168.33.4
11 |
12 | ###############################################################################
13 | # Dummy variable to allow Ansible to accept this file.
14 | workaround_ansible_issue_8743: yes
15 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/group_vars/storage/network-interfaces:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Network interface definitions for the storage group.
4 |
5 | # Overcloud provisioning network IP information.
6 | # provision_oc_net_interface:
7 | # provision_oc_net_bridge_ports:
8 | # provision_oc_net_bond_slaves:
9 |
10 | # External network IP information.
11 | # external_net_interface:
12 | # external_net_bridge_ports:
13 | # external_net_bond_slaves:
14 |
15 | # Storage network IP information.
16 | # storage_net_interface:
17 | # storage_net_bridge_ports:
18 | # storage_net_bond_slaves:
19 |
20 | # Storage management network IP information.
21 | # storage_mgmt_net_interface:
22 | # storage_mgmt_net_bridge_ports:
23 | # storage_mgmt_net_bond_slaves:
24 |
25 | # Swift storage network IP information.
26 | # swift_storage_net_interface:
27 | # swift_storage_net_bridge_ports:
28 | # swift_storage_net_bond_slaves:
29 |
30 | # Swift storage management network IP information.
31 | # swift_storage_replication_net_interface:
32 | # swift_storage_replication_net_bridge_ports:
33 | # swift_storage_replication_net_bond_slaves:
34 |
35 | ###############################################################################
36 | # Dummy variable to allow Ansible to accept this file.
37 | workaround_ansible_issue_8743: yes
38 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/groups:
--------------------------------------------------------------------------------
1 | # Kayobe groups inventory file. This file should generally not be modified.
2 | # If declares the top-level groups and sub-groups.
3 |
4 | ###############################################################################
5 | # Seed groups.
6 |
7 | [seed]
8 | # Empty group to provide declaration of seed group.
9 |
10 | [seed-hypervisor]
11 | # Empty group to provide declaration of seed-hypervisor group.
12 |
13 | [container-image-builders:children]
14 | # Build container images on the seed by default.
15 | seed
16 |
17 | ###############################################################################
18 | # Infra VM groups.
19 |
20 | [hypervisors:children]
21 | # Group that contains all hypervisors used for infra VMs
22 | seed-hypervisor
23 |
24 | [infra-vms]
25 | # Empty group to provide declaration of infra-vms group.
26 |
27 | ###############################################################################
28 | # Overcloud groups.
29 |
30 | [controllers]
31 | # Empty group to provide declaration of controllers group.
32 |
33 | [network:children]
34 | # Add controllers to network group by default for backwards compatibility,
35 | # although they could be separate hosts.
36 | controllers
37 |
38 | [monitoring]
39 | # Empty group to provide declaration of monitoring group.
40 |
41 | [storage]
42 | # Empty group to provide declaration of storage group.
43 |
44 | [compute-vgpu]
45 | # Empty group to provide declaration of compute-vgpu group.
46 |
47 | [compute:children]
48 | compute-vgpu
49 |
50 | [overcloud:children]
51 | controllers
52 | network
53 | monitoring
54 | storage
55 | compute
56 |
57 | ###############################################################################
58 | # Feature control groups
59 | [vgpu:children]
60 | compute-vgpu
61 |
62 | [iommu:children]
63 | vgpu
64 |
65 | ###############################################################################
66 | # Service groups.
67 |
68 | [container-engine:children]
69 | # Hosts in this group will have Docker/Podman installed.
70 | seed
71 | controllers
72 | network
73 | monitoring
74 | storage
75 | compute
76 |
77 | [docker-registry:children]
78 | # Hosts in this group will have a Docker Registry deployed. This group should
79 | # generally contain only a single host, to avoid deploying multiple independent
80 | # registries which may become unsynchronized.
81 | seed
82 |
83 | [ntp:children]
84 | # Kayobe will configure Chrony on members of this group.
85 | seed
86 | seed-hypervisor
87 | overcloud
88 |
89 | ###############################################################################
90 | # Baremetal compute node groups.
91 |
92 | [baremetal-compute]
93 | # Empty group to provide declaration of baremetal-compute group.
94 |
95 | ###############################################################################
96 | # Networking groups.
97 |
98 | [mgmt-switches]
99 | # Empty group to provide declaration of mgmt-switches group.
100 |
101 | [ctl-switches]
102 | # Empty group to provide declaration of ctl-switches group.
103 |
104 | [hs-switches]
105 | # Empty group to provide declaration of hs-switches group.
106 |
107 | [switches:children]
108 | mgmt-switches
109 | ctl-switches
110 | hs-switches
111 |
--------------------------------------------------------------------------------
/etc/kayobe/inventory/hosts:
--------------------------------------------------------------------------------
1 | # This host acts as the configuration management Ansible control host. This must be
2 | # localhost.
3 | localhost ansible_connection=local
4 |
5 | [seed-hypervisor]
6 | seed-hypervisor
7 |
8 | [seed]
9 | seed
10 |
11 | [controllers]
12 | #controller0
13 |
14 | [compute:children]
15 | #controllers
16 |
17 | [baremetal-compute]
18 | # Add baremetal compute nodes here if required.
19 |
20 | [mgmt-switches]
21 | # Add management network switches here if required.
22 |
23 | [ctl-switches]
24 | # Add control and provisioning switches here if required.
25 |
26 | [hs-switches]
27 | # Add high speed switches here if required.
28 |
--------------------------------------------------------------------------------
/etc/kayobe/ipa.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Ironic Python Agent (IPA) configuration.
3 |
4 | ###############################################################################
5 | # Ironic Python Agent (IPA) image build configuration.
6 |
7 | # Whether to build IPA images from source.
8 | #ipa_build_images:
9 |
10 | # URL of IPA source repository.
11 | #ipa_build_source_url:
12 |
13 | # Version of IPA source repository. Default is {{ openstack_branch }}.
14 | #ipa_build_source_version:
15 |
16 | # URL of IPA builder source repository.
17 | #ipa_builder_source_url:
18 |
19 | # Version of IPA builder source repository. Default is {{ openstack_branch }}.
20 | #ipa_builder_source_version:
21 |
22 | # List of additional build host packages to install. Default is an empty list.
23 | #ipa_build_dib_host_packages_extra:
24 |
25 | # List of default Diskimage Builder (DIB) elements to use when building IPA
26 | # images. Default is ["centos", "dynamic-login", "enable-serial-console",
27 | # "ironic-python-agent-ramdisk"] when os_distribution is "rocky", and
28 | # ["ubuntu", "dynamic-login", "enable-serial-console",
29 | # "ironic-python-agent-ramdisk"] otherwise.
30 | #ipa_build_dib_elements_default:
31 |
32 | # List of additional Diskimage Builder (DIB) elements to use when building IPA
33 | # images. Default is none.
34 | #ipa_build_dib_elements_extra:
35 |
36 | # List of Diskimage Builder (DIB) elements to use when building IPA images.
37 | # Default is combination of ipa_build_dib_elements_default and
38 | # ipa_build_dib_elements_extra.
39 | #ipa_build_dib_elements:
40 |
41 | # Dictionary of default environment variables to provide to Diskimage Builder
42 | # (DIB) during IPA image build.
43 | #ipa_build_dib_env_default:
44 |
45 | # Dictionary of additional environment variables to provide to Diskimage
46 | # Builder (DIB) during IPA image build.
47 | #ipa_build_dib_env_extra:
48 |
49 | # Dictionary of environment variables to provide to Diskimage Builder (DIB)
50 | # during IPA image build.
51 | #ipa_build_dib_env:
52 |
53 | # List of default git repositories containing Diskimage Builder (DIB) elements.
54 | # See stackhpc.openstack.os_images role for usage.
55 | # Default is one item for IPA builder.
56 | #ipa_build_dib_git_elements_default:
57 |
58 | # List of additional git repositories containing Diskimage Builder (DIB)
59 | # elements. See stackhpc.openstack.os_images role for usage. Default is empty.
60 | #ipa_build_dib_git_elements_extra:
61 |
62 | # List of git repositories containing Diskimage Builder (DIB) elements. See
63 | # stackhpc.openstack.os_images role for usage. Default is a combination of
64 | # ipa_build_dib_git_elements_default and ipa_build_dib_git_elements_extra.
65 | #ipa_build_dib_git_elements:
66 |
67 | # List of DIB packages to install. Default is none.
68 | #ipa_build_dib_packages:
69 |
70 | # Upper constraints file for installing packages in the virtual environment
71 | # used for building IPA images. Default is {{ pip_upper_constraints_file }}.
72 | #ipa_build_upper_constraints_file:
73 |
74 | # Upper constraints file for installation of DIB to build IPA images.
75 | # Default is empty string.
76 | #ipa_build_dib_upper_constraints_file:
77 |
78 | ###############################################################################
79 | # Ironic Python Agent (IPA) images configuration.
80 |
81 | # Suffix of upstream Ironic deployment image files. Default is based on
82 | # {{ openstack_branch }}.
83 | #ipa_images_upstream_url_suffix:
84 |
85 | # Name of Ironic deployment kernel image to register in Glance.
86 | #ipa_images_kernel_name:
87 |
88 | # URL of Ironic deployment kernel image to download.
89 | #ipa_kernel_upstream_url:
90 |
91 | # URL of checksum of Ironic deployment kernel image.
92 | #ipa_kernel_checksum_url:
93 |
94 | # Algorithm of checksum of Ironic deployment kernel image.
95 | #ipa_kernel_checksum_algorithm:
96 |
97 | # Name of Ironic deployment ramdisk image to register in Glance.
98 | #ipa_images_ramdisk_name:
99 |
100 | # URL of Ironic deployment ramdisk image to download.
101 | #ipa_ramdisk_upstream_url:
102 |
103 | # URL of checksum of Ironic deployment ramdisk image.
104 | #ipa_ramdisk_checksum_url:
105 |
106 | # Algorithm of checksum of Ironic deployment ramdisk image.
107 | #ipa_ramdisk_checksum_algorithm:
108 |
109 | ###############################################################################
110 | # Ironic Python Agent (IPA) deployment configuration.
111 |
112 | # Whether to enable collection of LLDP TLVs.
113 | #ipa_collect_lldp:
114 |
115 | # List of default inspection collectors to run.
116 | # NOTE: extra-hardware is not currently included as it requires a ramdisk
117 | # with the hardware python module installed.
118 | #ipa_collectors_default:
119 |
120 | # List of additional inspection collectors to run.
121 | #ipa_collectors_extra:
122 |
123 | # List of inspection collectors to run.
124 | #ipa_collectors:
125 |
126 | # List of default inspection benchmarks to run.
127 | #ipa_benchmarks_default:
128 |
129 | # List of extra inspection benchmarks to run.
130 | #ipa_benchmarks_extra:
131 |
132 | # List of inspection benchmarks to run.
133 | #ipa_benchmarks:
134 |
135 | # List of default kernel parameters for Ironic python agent.
136 | #ipa_kernel_options_default:
137 |
138 | # List of additional kernel parameters for Ironic python agent.
139 | #ipa_kernel_options_extra:
140 |
141 | # List of kernel parameters for Ironic python agent.
142 | #ipa_kernel_options:
143 |
144 | ###############################################################################
145 | # Dummy variable to allow Ansible to accept this file.
146 | workaround_ansible_issue_8743: yes
147 |
--------------------------------------------------------------------------------
/etc/kayobe/ironic.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Ironic configuration.
4 |
5 | # Specify the list of hardware types to load during service initialization.
6 | #kolla_ironic_enabled_hardware_types:
7 |
8 | # Specify the list of bios interfaces to load during service initialization.
9 | #kolla_ironic_enabled_bios_interfaces:
10 |
11 | # Default bios interface to be used for nodes that do not have bios_interface
12 | # field set.
13 | #kolla_ironic_default_bios_interface:
14 |
15 | # Specify the list of boot interfaces to load during service initialization.
16 | #kolla_ironic_enabled_boot_interfaces:
17 |
18 | # Default boot interface to be used for nodes that do not have boot_interface
19 | # field set.
20 | #kolla_ironic_default_boot_interface:
21 |
22 | # Specify the list of console interfaces to load during service initialization.
23 | #kolla_ironic_enabled_console_interfaces:
24 |
25 | # Default console interface to be used for nodes that do not have
26 | # console_interface field set.
27 | #kolla_ironic_default_console_interface:
28 |
29 | # Specify the list of deploy interfaces to load during service initialization.
30 | #kolla_ironic_enabled_deploy_interfaces:
31 |
32 | # Default deploy interface to be used for nodes that do not have
33 | # deploy_interface field set.
34 | #kolla_ironic_default_deploy_interface:
35 |
36 | # Specify the list of inspect interfaces to load during service initialization.
37 | #kolla_ironic_enabled_inspect_interfaces:
38 |
39 | # Default inspect interface to be used for nodes that do not have
40 | # inspect_interface field set.
41 | #kolla_ironic_default_inspect_interface:
42 |
43 | # Specify the list of management interfaces to load during service
44 | # initialization.
45 | #kolla_ironic_enabled_management_interfaces:
46 |
47 | # Default management interface to be used for nodes that do not have
48 | # management_interface field set.
49 | #kolla_ironic_default_management_interface:
50 |
51 | # Specify the list of network interfaces to load during service initialization.
52 | #kolla_ironic_enabled_network_interfaces:
53 |
54 | # Default network interface to be used for nodes that do not have
55 | # network_interface field set.
56 | #kolla_ironic_default_network_interface:
57 |
58 | # Specify the list of power interfaces to load during service initialization.
59 | #kolla_ironic_enabled_power_interfaces:
60 |
61 | # Default power interface to be used for nodes that do not have power_interface
62 | # field set.
63 | #kolla_ironic_default_power_interface:
64 |
65 | # Specify the list of raid interfaces to load during service initialization.
66 | #kolla_ironic_enabled_raid_interfaces:
67 |
68 | # Default raid interface to be used for nodes that do not have
69 | # raid_interface field set.
70 | #kolla_ironic_default_raid_interface:
71 |
72 | # Specify the list of rescue interfaces to load during service initialization.
73 | #kolla_ironic_enabled_rescue_interfaces:
74 |
75 | # Default rescue interface to be used for nodes that do not have
76 | # rescue_interface field set.
77 | #kolla_ironic_default_rescue_interface:
78 |
79 | # Specify the list of storage interfaces to load during
80 | # service initialization.
81 | #kolla_ironic_enabled_storage_interfaces:
82 |
83 | # Default storage interface to be used for nodes that do not
84 | # have storage_interface field set.
85 | #kolla_ironic_default_storage_interface:
86 |
87 | # Specify the list of vendor interfaces to load during service initialization.
88 | #kolla_ironic_enabled_vendor_interfaces:
89 |
90 | # Default vendor interface to be used for nodes that do not have
91 | # vendor_interface field set.
92 | #kolla_ironic_default_vendor_interface:
93 |
94 | # Name of the Neutron network to use for cleaning.
95 | #kolla_ironic_cleaning_network:
96 |
97 | # Name of the Neutron network to use for provisioning.
98 | #kolla_ironic_provisioning_network:
99 |
100 | # List of default kernel parameters to append for baremetal PXE boot.
101 | #kolla_ironic_pxe_append_params_default:
102 |
103 | # List of additional kernel parameters to append for baremetal PXE boot.
104 | #kolla_ironic_pxe_append_params_extra:
105 |
106 | # List of kernel parameters to append for baremetal PXE boot.
107 | #kolla_ironic_pxe_append_params:
108 |
109 | ###############################################################################
110 | # Ironic Node Configuration
111 |
112 | # Whether or not to enable the serial consoles on post configure
113 | #ironic_serial_console_autoenable:
114 |
115 | # This defines the start of the range of TCP ports to used for the IPMI socat
116 | # serial consoles
117 | #ironic_serial_console_tcp_pool_start:
118 |
119 | # This defines the end of the range of TCP ports to used for the IPMI socat
120 | # serial consoles
121 | #ironic_serial_console_tcp_pool_end:
122 |
123 | ###############################################################################
124 | # Dummy variable to allow Ansible to accept this file.
125 | workaround_ansible_issue_8743: yes
126 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kayobe Kolla configuration.
3 |
4 | ###############################################################################
5 | # Kolla installation.
6 |
7 | # Type of Kolla control installation. One of 'binary' or 'source'.
8 | #kolla_ctl_install_type:
9 |
10 | # Path to directory for kolla source code checkout.
11 | #kolla_source_path:
12 |
13 | # URL of Kolla source code repository if type is 'source'.
14 | #kolla_source_url:
15 |
16 | # Version (branch, tag, etc.) of Kolla source code repository if type is
17 | # 'source'. Default is {{ openstack_branch }}.
18 | #kolla_source_version:
19 |
20 | # Path to virtualenv in which to install kolla.
21 | #kolla_venv:
22 |
23 | # Path in which to generate kolla configuration.
24 | #kolla_build_config_path:
25 |
26 | ###############################################################################
27 | # Kolla-ansible installation.
28 |
29 | # Type of Kolla-ansible control installation. One of 'binary' or 'source'.
30 | # Default is 'source'.
31 | #kolla_ansible_ctl_install_type:
32 |
33 | # Path to directory for kolla-ansible source code checkout.
34 | # Default is $KOLLA_SOURCE_PATH, or $PWD/src/kolla-ansible if
35 | # $KOLLA_SOURCE_PATH is not set.
36 | #kolla_ansible_source_path:
37 |
38 | # URL of Kolla Ansible source code repository if type is 'source'. Default is
39 | # https://opendev.org/openstack/kolla-ansible.
40 | #kolla_ansible_source_url:
41 |
42 | # Version (branch, tag, etc.) of Kolla Ansible source code repository if type
43 | # is 'source'. Default is {{ openstack_branch }}.
44 | #kolla_ansible_source_version:
45 |
46 | # Path to virtualenv in which to install kolla-ansible. Default is
47 | # $KOLLA_VENV_PATH or $PWD/venvs/kolla-ansible if $KOLLA_VENV_PATH is not set.
48 | #kolla_ansible_venv:
49 |
50 | # Extra requirements to install inside the kolla-ansible virtualenv.
51 | #kolla_ansible_venv_extra_requirements:
52 |
53 | # Pip requirement specifier for the ansible package. NOTE: This limits the
54 | # version of ansible used by kolla-ansible to avoid new releases from breaking
55 | # tested code. Changes to this limit should be tested.
56 | #kolla_ansible_venv_ansible:
57 |
58 | # Path to Kolla-ansible configuration directory. Default is $KOLLA_CONFIG_PATH
59 | # or /etc/kolla if $KOLLA_CONFIG_PATH is not set.
60 | #kolla_config_path:
61 |
62 | # Path to Kolla-ansible node custom configuration directory. Default is
63 | # {{ kolla_config_path }}/config.
64 | #kolla_node_custom_config_path:
65 |
66 | ###############################################################################
67 | # Kolla configuration.
68 |
69 | # Kolla base container image architecture. Options are "x86_64", "aarch64".
70 | # Default is "{{ ansible_facts.architecture }}"
71 | #kolla_base_arch:
72 |
73 | # Kolla base container image distribution. Options are "centos", "debian",
74 | # "rocky", "ubuntu". Default is {{ os_distribution }}.
75 | #kolla_base_distro:
76 |
77 | # Kolla base container image distribution version default map.
78 | # Defines default versions for each distribution.
79 | #kolla_base_distro_version_default_map:
80 |
81 | # Kolla base container image distribution version.
82 | # Default is kolla_base_distro_version_default_map[kolla_base_distro].
83 | #kolla_base_distro_version:
84 |
85 | # URL of docker registry to use for Kolla images. Default is not set, in which
86 | # case Quay.io will be used.
87 | #kolla_docker_registry:
88 |
89 | # Docker namespace to use for Kolla images. Default is 'kolla'.
90 | #kolla_docker_namespace:
91 |
92 | # Username to use to access a docker registry. Default is not set, in which
93 | # case the registry will be used without authentication.
94 | #kolla_docker_registry_username:
95 |
96 | # Password to use to access a docker registry. Default is not set, in which
97 | # case the registry will be used without authentication.
98 | #kolla_docker_registry_password:
99 |
100 | # Kolla OpenStack release version. This should be a Docker image tag.
101 | # Default is {{ openstack_release }}.
102 | #kolla_openstack_release:
103 |
104 | # Docker tag applied to built container images. Default is {{
105 | # kolla_openstack_release }}-{{ kolla_base_distro }}-{{
106 | # kolla_base_distro_version }}.
107 | #kolla_tag:
108 |
109 | # Dict mapping names of sources to their definitions.
110 | # See kolla.common.config for details.
111 | # Example:
112 | # kolla_sources:
113 | # ironic-base:
114 | # type: git
115 | # location: https://github.com/openstack/ironic
116 | # reference: master
117 | #kolla_sources:
118 |
119 | ###############################################################################
120 | # Kolla image build configuration.
121 |
122 | # Dict mapping Jinja2 block names in kolla's Docker images to their contents.
123 | #kolla_build_blocks:
124 |
125 | # Dict mapping image customization variable names to their values.
126 | # Each variable takes the form:
127 | # __
128 | # Hyphens in the image name must be replaced with underscores. The
129 | # customization is most commonly packages. The operation should be one of
130 | # override, append or remove. The value should be a list.
131 | #kolla_build_customizations:
132 |
133 | ###############################################################################
134 | # Kolla-ansible inventory configuration.
135 |
136 | # Full custom seed inventory contents.
137 | #kolla_seed_inventory_custom:
138 |
139 | # List of names of default host variables to pass through from kayobe hosts to
140 | # the kolla-ansible seed host, if set. See also
141 | # kolla_seed_inventory_pass_through_host_vars_map.
142 | #kolla_seed_inventory_pass_through_host_vars_default:
143 |
144 | # List of names of additional host variables to pass through from kayobe hosts
145 | # to the kolla-ansible seed host, if set. See also
146 | # kolla_seed_inventory_pass_through_host_vars_map.
147 | #kolla_seed_inventory_pass_through_host_vars_extra:
148 |
149 | # List of names of host variables to pass through from kayobe hosts to
150 | # the kolla-ansible seed host, if set. See also
151 | # kolla_seed_inventory_pass_through_host_vars_map.
152 | #kolla_seed_inventory_pass_through_host_vars:
153 |
154 | # Dict mapping names of default variables in
155 | # kolla_seed_inventory_pass_through_host_vars to the variable to use in
156 | # kolla-ansible. If a variable name is not in this mapping the kayobe name is
157 | # used.
158 | #kolla_seed_inventory_pass_through_host_vars_map_default:
159 |
160 | # Dict mapping names of extra variables in
161 | # kolla_seed_inventory_pass_through_host_vars to the variable to use in
162 | # kolla-ansible. If a variable name is not in this mapping the kayobe name is
163 | # used.
164 | #kolla_seed_inventory_pass_through_host_vars_map_extra:
165 |
166 | # Dict mapping names of variables in
167 | # kolla_seed_inventory_pass_through_host_vars to the variable to use in
168 | # kolla-ansible. If a variable name is not in this mapping the kayobe name is
169 | # used.
170 | #kolla_seed_inventory_pass_through_host_vars_map:
171 |
172 | # Custom overcloud inventory containing a mapping from top level groups to
173 | # hosts.
174 | #kolla_overcloud_inventory_custom_top_level:
175 |
176 | # Custom overcloud inventory containing a mapping from components to top level
177 | # groups.
178 | #kolla_overcloud_inventory_custom_components:
179 |
180 | # Custom overcloud inventory containing a mapping from services to components.
181 | #kolla_overcloud_inventory_custom_services:
182 |
183 | # Full custom overcloud inventory contents. By default this will be the
184 | # concatenation of the top level, component, and service inventories.
185 | #kolla_overcloud_inventory_custom:
186 |
187 | # Dict mapping from kolla-ansible groups to kayobe groups and variables. Each
188 | # item is a dict with the following items:
189 | # * groups: A list of kayobe ansible groups to map to this kolla-ansible group.
190 | # * vars: A dict mapping variable names to values for hosts in this
191 | # kolla-ansible group.
192 | #kolla_overcloud_inventory_top_level_group_map:
193 |
194 | # List of names of top level kolla-ansible groups. Any of these groups which
195 | # have no hosts mapped to them will be provided with an empty group definition.
196 | #kolla_overcloud_inventory_kolla_top_level_groups:
197 |
198 | # List of names of default host variables to pass through from kayobe hosts to
199 | # kolla-ansible hosts, if set. See also
200 | # kolla_overcloud_inventory_pass_through_host_vars_map.
201 | #kolla_overcloud_inventory_pass_through_host_vars_default:
202 |
203 | # List of names of additional host variables to pass through from kayobe hosts
204 | # to kolla-ansible hosts, if set. See also
205 | # kolla_overcloud_inventory_pass_through_host_vars_map.
206 | #kolla_overcloud_inventory_pass_through_host_vars_extra:
207 |
208 | # List of names of host variables to pass through from kayobe hosts to
209 | # kolla-ansible hosts, if set. See also
210 | # kolla_overcloud_inventory_pass_through_host_vars_map.
211 | #kolla_overcloud_inventory_pass_through_host_vars:
212 |
213 | # Dict mapping names of default variables in
214 | # kolla_overcloud_inventory_pass_through_host_vars to the variable to use in
215 | # kolla-ansible. If a variable name is not in this mapping the kayobe name is
216 | # used.
217 | #kolla_overcloud_inventory_pass_through_host_vars_map_default:
218 |
219 | # Dict mapping names of additional variables in
220 | # kolla_overcloud_inventory_pass_through_host_vars to the variable to use in
221 | # kolla-ansible. If a variable name is not in this mapping the kayobe name is
222 | # used.
223 | #kolla_overcloud_inventory_pass_through_host_vars_map_extra:
224 |
225 | # Dict mapping names of variables in
226 | # kolla_overcloud_inventory_pass_through_host_vars to the variable to use in
227 | # kolla-ansible. If a variable name is not in this mapping the kayobe name is
228 | # used.
229 | #kolla_overcloud_inventory_pass_through_host_vars_map:
230 |
231 | ###############################################################################
232 | # Kolla-ansible configuration.
233 |
234 | # Virtualenv directory where Kolla-ansible's ansible modules will execute
235 | # remotely on the target nodes. If None, no virtualenv will be used.
236 | #kolla_ansible_target_venv:
237 |
238 | # Password to use to encrypt the kolla-ansible passwords.yml file.
239 | #kolla_ansible_vault_password:
240 |
241 | # Hashi Vault
242 | #kolla_ansible_vault_addr:
243 | #kolla_ansible_vault_mount_point:
244 | #kolla_ansible_vault_kv_path:
245 | #kolla_ansible_vault_namespace:
246 | #kolla_ansible_vault_role_id:
247 | #kolla_ansible_vault_secret_id:
248 | #kolla_ansible_vault_token:
249 | #kolla_ansible_vault_cacert:
250 |
251 | # Whether TLS is enabled for the external API endpoints. Default is 'no'.
252 | #kolla_enable_tls_external:
253 |
254 | # Whether TLS is enabled for the internal API endpoints. Default is 'no'.
255 | #kolla_enable_tls_internal:
256 |
257 | # Whether debug logging is enabled. Default is 'false'.
258 | #kolla_openstack_logging_debug:
259 |
260 | # Upper constraints file for installation of Kolla.
261 | # Default value is {{ pip_upper_constraints_file }}.
262 | #kolla_upper_constraints_file:
263 |
264 | # User account to use for Kolla SSH access. Default is 'kolla'.
265 | #kolla_ansible_user:
266 |
267 | # Primary group of Kolla SSH user. Default is 'kolla'.
268 | #kolla_ansible_group:
269 |
270 | # Whether to use privilege escalation for operations on the control host.
271 | # Default is {{ kayobe_control_host_become }}.
272 | #kolla_ansible_control_host_become:
273 |
274 | # Whether to use privilege escalation for all operations performed via Kolla
275 | # Ansible. Default is 'false'.
276 | #kolla_ansible_become:
277 |
278 | # Whether to create a user account, configure passwordless sudo and authorise
279 | # an SSH key for Kolla Ansible. Default is 'true'.
280 | #kolla_ansible_create_user:
281 |
282 | ###############################################################################
283 | # Kolla feature flag configuration.
284 |
285 | #kolla_enable_aodh:
286 | #kolla_enable_barbican:
287 | #kolla_enable_blazar:
288 | #kolla_enable_ceilometer:
289 | #kolla_enable_ceilometer_horizon_policy_file:
290 | #kolla_enable_ceilometer_ipmi:
291 | #kolla_enable_ceilometer_prometheus_pushgateway:
292 | #kolla_enable_cells:
293 | #kolla_enable_central_logging:
294 | #kolla_enable_ceph_rgw:
295 | #kolla_enable_ceph_rgw_loadbalancer:
296 | #kolla_enable_cinder:
297 | #kolla_enable_cinder_backend_iscsi:
298 | #kolla_enable_cinder_backend_lightbits:
299 | #kolla_enable_cinder_backend_lvm:
300 | #kolla_enable_cinder_backend_nfs:
301 | #kolla_enable_cinder_backend_pure_fc:
302 | #kolla_enable_cinder_backend_pure_iscsi:
303 | #kolla_enable_cinder_backend_pure_nvme_tcp:
304 | #kolla_enable_cinder_backend_pure_roce:
305 | #kolla_enable_cinder_backend_quobyte:
306 | #kolla_enable_cinder_backup:
307 | #kolla_enable_cinder_horizon_policy_file:
308 | #kolla_enable_cloudkitty:
309 | #kolla_enable_collectd:
310 | #kolla_enable_container_healthchecks:
311 | #kolla_enable_cyborg:
312 | #kolla_enable_designate:
313 | #kolla_enable_destroy_images:
314 | #kolla_enable_etcd:
315 | #kolla_enable_external_api_firewalld:
316 | #kolla_enable_external_mariadb_load_balancer:
317 | #kolla_enable_fluentd:
318 | #kolla_enable_fluentd_systemd:
319 | #kolla_enable_glance:
320 | #kolla_enable_glance_horizon_policy_file:
321 | #kolla_enable_glance_image_cache:
322 | #kolla_enable_gnocchi:
323 | #kolla_enable_gnocchi_statsd:
324 | #kolla_enable_grafana:
325 | #kolla_enable_grafana_external:
326 | #kolla_enable_hacluster:
327 | #kolla_enable_haproxy:
328 | #kolla_enable_haproxy_memcached:
329 | #kolla_enable_heat:
330 | #kolla_enable_heat_horizon_policy_file:
331 | #kolla_enable_horizon:
332 | #kolla_enable_horizon_blazar:
333 | #kolla_enable_horizon_cloudkitty:
334 | #kolla_enable_horizon_designate:
335 | #kolla_enable_horizon_fwaas:
336 | #kolla_enable_horizon_heat:
337 | #kolla_enable_horizon_ironic:
338 | #kolla_enable_horizon_magnum:
339 | #kolla_enable_horizon_manila:
340 | #kolla_enable_horizon_masakari:
341 | #kolla_enable_horizon_mistral:
342 | #kolla_enable_horizon_neutron_vpnaas:
343 | #kolla_enable_horizon_octavia:
344 | #kolla_enable_horizon_tacker:
345 | #kolla_enable_horizon_trove:
346 | #kolla_enable_horizon_venus:
347 | #kolla_enable_horizon_watcher:
348 | #kolla_enable_horizon_zun:
349 | #kolla_enable_influxdb:
350 | #kolla_enable_ironic:
351 | #kolla_enable_ironic_dnsmasq:
352 | #kolla_enable_ironic_inspector:
353 | #kolla_enable_ironic_neutron_agent:
354 | #kolla_enable_ironic_prometheus_exporter:
355 | #kolla_enable_iscsid:
356 | #kolla_enable_keepalived:
357 | #kolla_enable_keystone:
358 | #kolla_enable_keystone_federation:
359 | #kolla_enable_keystone_horizon_policy_file:
360 | #kolla_enable_kuryr:
361 | #kolla_enable_letsencrypt:
362 | #kolla_enable_loadbalancer:
363 | #kolla_enable_magnum:
364 | #kolla_enable_manila:
365 | #kolla_enable_manila_backend_cephfs_native:
366 | #kolla_enable_manila_backend_cephfs_nfs:
367 | #kolla_enable_manila_backend_flashblade:
368 | #kolla_enable_manila_backend_generic:
369 | #kolla_enable_manila_backend_glusterfs_nfs:
370 | #kolla_enable_manila_backend_hnas:
371 | #kolla_enable_mariabackup:
372 | #kolla_enable_mariadb:
373 | #kolla_enable_masakari:
374 | #kolla_enable_masakari_hostmonitor:
375 | #kolla_enable_masakari_instancemonitor:
376 | #kolla_enable_memcached:
377 | #kolla_enable_mistral:
378 | #kolla_enable_multipathd:
379 | #kolla_enable_neutron:
380 | #kolla_enable_neutron_agent_ha:
381 | #kolla_enable_neutron_bgp_dragent:
382 | #kolla_enable_neutron_dvr:
383 | #kolla_enable_neutron_fwaas:
384 | #kolla_enable_neutron_horizon_policy_file:
385 | #kolla_enable_neutron_infoblox_ipam_agent:
386 | #kolla_enable_neutron_metering:
387 | #kolla_enable_neutron_mlnx:
388 | #kolla_enable_neutron_packet_logging:
389 | #kolla_enable_neutron_port_forwarding:
390 | #kolla_enable_neutron_provider_networks:
391 | #kolla_enable_neutron_qos:
392 | #kolla_enable_neutron_segments:
393 | #kolla_enable_neutron_sfc:
394 | #kolla_enable_neutron_sriov:
395 | #kolla_enable_neutron_taas:
396 | #kolla_enable_neutron_trunk:
397 | #kolla_enable_neutron_vpnaas:
398 | #kolla_enable_nova:
399 | #kolla_enable_nova_fake:
400 | #kolla_enable_nova_horizon_policy_file:
401 | #kolla_enable_nova_libvirt_container:
402 | #kolla_enable_nova_serialconsole_proxy:
403 | #kolla_enable_nova_ssh:
404 | #kolla_enable_octavia:
405 | #kolla_enable_octavia_driver_agent:
406 | #kolla_enable_octavia_jobboard:
407 | #kolla_enable_opensearch:
408 | #kolla_enable_opensearch_dashboards:
409 | #kolla_enable_opensearch_dashboards_external:
410 | #kolla_enable_openstack_core:
411 | #kolla_enable_openvswitch:
412 | #kolla_enable_osprofiler:
413 | #kolla_enable_ovn:
414 | #kolla_enable_ovn_sb_db_relay:
415 | #kolla_enable_ovs_dpdk:
416 | #kolla_enable_placement:
417 | #kolla_enable_prometheus:
418 | #kolla_enable_prometheus_alertmanager:
419 | #kolla_enable_prometheus_alertmanager_external:
420 | #kolla_enable_prometheus_blackbox_exporter:
421 | #kolla_enable_prometheus_cadvisor:
422 | #kolla_enable_prometheus_ceph_mgr_exporter:
423 | #kolla_enable_prometheus_elasticsearch_exporter:
424 | #kolla_enable_prometheus_etcd_integration:
425 | #kolla_enable_prometheus_fluentd_integration:
426 | #kolla_enable_prometheus_haproxy_exporter:
427 | #kolla_enable_prometheus_libvirt_exporter:
428 | #kolla_enable_prometheus_memcached_exporter:
429 | #kolla_enable_prometheus_mysqld_exporter:
430 | #kolla_enable_prometheus_node_exporter:
431 | #kolla_enable_prometheus_openstack_exporter:
432 | #kolla_enable_prometheus_openstack_exporter_external:
433 | #kolla_enable_prometheus_proxysql_exporter:
434 | #kolla_enable_prometheus_rabbitmq_exporter:
435 | #kolla_enable_prometheus_server:
436 | #kolla_enable_proxysql:
437 | #kolla_enable_rabbitmq:
438 | #kolla_enable_redis:
439 | #kolla_enable_skyline:
440 | #kolla_enable_tacker:
441 | #kolla_enable_telegraf:
442 | #kolla_enable_trove:
443 | #kolla_enable_trove_singletenant:
444 | #kolla_enable_venus:
445 | #kolla_enable_watcher:
446 | #kolla_enable_zun:
447 |
448 | ###############################################################################
449 | # Kolla custom config generation.
450 |
451 | # Feature flag to add $KAYOBE_CONFIG_PATH to the list of search paths used
452 | # when searching for Kolla custom service configuration. Only has an effect in
453 | # a multiple environments setup. This allows you to configure merging between
454 | # your environment and the base layer. Defaults to true. Set to false to for
455 | # backwards compatability.
456 | #kolla_openstack_custom_config_environment_merging_enabled:
457 |
458 | # Default value for kolla_openstack_custom_config_include_globs.
459 | #kolla_openstack_custom_config_include_globs_default:
460 |
461 | # Extra items to add to kolla_openstack_custom_config_include_globs_default
462 | # to produce kolla_openstack_custom_config_include_globs.
463 | #kolla_openstack_custom_config_include_globs_extra:
464 |
465 | # List of dictionaries with the following keys:
466 | # glob: a glob pattern. Any files matching this pattern will be copied to the
467 | # the kolla custom config directory
468 | # enabled: boolean to disable the glob.
469 | # This determines the list of files to copy to the generated kolla config
470 | # directory.
471 | #kolla_openstack_custom_config_include_globs:
472 |
473 | # Kolla config generation rules. These operate on the list of files produced by
474 | # applying kolla_openstack_custom_config_include_globs. Each of the paths in
475 | # kolla_openstack_custom_config_paths is searched for files matching one of the
476 | # globs. If a match is found, any files with the same relative path are grouped
477 | # together. The rules determine what to do with these matching files e.g copy
478 | # the most specific file without templating, merge the files with
479 | # merge_configs, etc.
480 | # List of dictionaries with the following keys:
481 | # glob: A glob matching files for this rule to match on (relative to the
482 | # search path)
483 | # priority: The rules are processed in increasing priority order with the
484 | # first rule matching taking effect.
485 | # strategy: How to process the matched file. One of copy, concat, template,
486 | # merge_configs, merge_yaml
487 | # params: List of params to pass to module enacting the strategy
488 | # Strategies:
489 | # copy: Copy most specific file to kolla config without templating
490 | # template: Template most specific file to kolla config
491 | # concat: Concatenate files and copy the result to generated kolla config
492 | # merge_configs: Use the merge_configs module to merge an ini file, before
493 | # copying to the generated kolla-config.
494 | # merge_yaml: Use the merge_yaml module to merge a file, before copying to
495 | # the generated kolla-config.
496 | #kolla_openstack_custom_config_rules:
497 |
498 | # Whether to enable ini merging rules in
499 | # kolla_openstack_custom_config_rules_default. Default is true.
500 | #kolla_openstack_custom_config_merge_configs_enabled:
501 |
502 | # Whether to enable yaml merging rules in
503 | # kolla_openstack_custom_config_rules_default. Default is true.
504 | #kolla_openstack_custom_config_merge_yaml_enabled:
505 |
506 | # Default merge strategy for ini files in
507 | # kolla_openstack_custom_config_rules_default. Default is concat.
508 | #kolla_openstack_custom_config_ini_merge_strategy_default:
509 |
510 | # Default value for kolla_openstack_custom_config_rules.
511 | #kolla_openstack_custom_config_rules_default:
512 |
513 | # List of globs to filter from kolla_openstack_custom_config_rules_default.
514 | # Default is an empty list.
515 | #kolla_openstack_custom_config_rules_default_remove:
516 |
517 | # Extra items to add to kolla_openstack_custom_config_rules_default
518 | # to produce kolla_openstack_custom_config_rules.
519 | #kolla_openstack_custom_config_rules_extra:
520 |
521 | ###############################################################################
522 | # Passwords and credentials.
523 |
524 | # Dictionary containing default custom passwords to add or override in the
525 | # Kolla passwords file.
526 | #kolla_ansible_default_custom_passwords:
527 |
528 | # Dictionary containing extra custom passwords to add or override in the Kolla
529 | # passwords file.
530 | #kolla_ansible_extra_custom_passwords:
531 |
532 | # Dictionary containing custom passwords to add or override in the Kolla
533 | # passwords file.
534 | #kolla_ansible_custom_passwords:
535 |
536 | ###############################################################################
537 | # OpenStack API addresses.
538 |
539 | # Virtual IP address of OpenStack internal API. Default is the vip_address
540 | # attribute of the internal network.
541 | kolla_internal_vip_address: 192.168.33.2
542 |
543 | # Fully Qualified Domain Name (FQDN) of OpenStack internal API. Default is the
544 | # fqdn attribute of the internal network if set, otherwise
545 | # kolla_internal_vip_address.
546 | #kolla_internal_fqdn:
547 |
548 | # Virtual IP address of OpenStack external API. Default is the vip_address
549 | # attribute of the external network.
550 | kolla_external_vip_address: 192.168.33.2
551 |
552 | # Fully Qualified Domain Name (FQDN) of OpenStack external API. Default is the
553 | # fqdn attribute of the external network if set, otherwise
554 | # kolla_external_vip_address.
555 | #kolla_external_fqdn:
556 |
557 | ###############################################################################
558 | # TLS certificate bundle management
559 |
560 | # External API certificate bundle.
561 | #
562 | # When kolla_enable_tls_external is true, this should contain an X.509
563 | # certificate bundle for the external API.
564 | #
565 | # Note that this should be formatted as a literal style block scalar.
566 | #kolla_external_tls_cert:
567 |
568 | # Path to a CA certificate file to use for the OS_CACERT environment variable
569 | # in public-openrc.sh file when TLS is enabled, instead of Kolla-Ansible's
570 | # default.
571 | #kolla_public_openrc_cacert:
572 |
573 | # Internal API certificate bundle.
574 | #
575 | # When kolla_enable_tls_internal is true, this should contain an X.509
576 | # certificate bundle for the internal API.
577 | #
578 | # Note that this should be formatted as a literal style block scalar.
579 | #kolla_internal_tls_cert:
580 |
581 | # Path to a CA certificate file to use for the OS_CACERT environment variable
582 | # in admin-openrc.sh file when TLS is enabled, instead of Kolla-Ansible's
583 | # default.
584 | #kolla_admin_openrc_cacert:
585 |
586 | ###############################################################################
587 | # Proxy configuration
588 |
589 | # HTTP proxy URL (format: http(s)://[user:password@]proxy_name:port) used by
590 | # Kolla. Default value is "{{ http_proxy }}".
591 | #kolla_http_proxy:
592 |
593 | # HTTPS proxy URL (format: http(s)://[user:password@]proxy_name:port) used by
594 | # Kolla. Default value is "{{ https_proxy }}".
595 | #kolla_https_proxy:
596 |
597 | # List of domains, hostnames, IP addresses and networks for which no proxy is
598 | # used. Default value is "{{ no_proxy }}".
599 | #kolla_no_proxy:
600 |
601 | ###############################################################################
602 | # Dummy variable to allow Ansible to accept this file.
603 | workaround_ansible_issue_8743: yes
604 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla/config/bifrost/bifrost.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Don't build an IPA deployment image, instead download upstream images.
3 | create_ipa_image: false
4 | download_ipa: true
5 |
6 | # Use a locally hosted cloud image.
7 | download_custom_deploy_image: true
8 | upstream_deploy_image_distribution: "{{ os_distribution }}"
9 | upstream_deploy_image_release: "{{ os_release }}"
10 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla/config/ironic-inspector.conf:
--------------------------------------------------------------------------------
1 | [processing]
2 | store_data = database
3 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla/config/neutron.conf:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | global_physnet_mtu = {{ aio_mtu }}
3 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla/config/neutron/ml2_conf.ini:
--------------------------------------------------------------------------------
1 | [ml2]
2 | path_mtu = {{ aio_mtu }}
3 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla/globals.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Most development environments will use nested virtualisation, and we can't
3 | # guarantee that nested KVM support is available. Use QEMU as a lowest common
4 | # denominator.
5 | nova_compute_virt_type: qemu
6 |
7 | # Reduce the control plane's memory footprint by limiting the number of worker
8 | # processes to one per-service.
9 | openstack_service_workers: "1"
10 | openstack_service_rpc_workers: "1"
11 |
--------------------------------------------------------------------------------
/etc/kayobe/kolla/kolla-build.conf:
--------------------------------------------------------------------------------
1 | [DEFAULT]
2 | # This is necessary for network connectivity of kolla-build, when Docker
3 | # default iptables rules are disabled.
4 | network_mode = host
5 |
--------------------------------------------------------------------------------
/etc/kayobe/libvirt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Hosts used for development, test or CI may not have Virtualization Technology
3 | # (VT) enabled. Don't fail if it's disabled.
4 | libvirt_host_require_vt: false
5 |
--------------------------------------------------------------------------------
/etc/kayobe/logging.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Logging configuration
4 |
5 | # Journald storage. One of: volatile, persistent, auto, or none. Defaults to
6 | # `persistent`.
7 | #journald_storage:
8 |
9 | ###############################################################################
10 | # Dummy variable to allow Ansible to accept this file.
11 | workaround_ansible_issue_8743: yes
12 |
--------------------------------------------------------------------------------
/etc/kayobe/monitoring.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Monitoring node configuration.
4 |
5 | # User with which to access the monitoring nodes via SSH during bootstrap, in
6 | # order to setup the Kayobe user account.
7 | #monitoring_bootstrap_user:
8 |
9 | ###############################################################################
10 | # Monitoring node network interface configuration.
11 |
12 | # List of networks to which monitoring nodes are attached.
13 | #monitoring_network_interfaces:
14 |
15 | # List of default networks to which monitoring nodes are attached.
16 | #monitoring_default_network_interfaces:
17 |
18 | # List of extra networks to which monitoring nodes are attached.
19 | #monitoring_extra_network_interfaces:
20 |
21 | ###############################################################################
22 | # Monitoring node BIOS configuration.
23 |
24 | # Dict of monitoring node BIOS options. Format is same as that used by
25 | # stackhpc.drac role.
26 | #monitoring_bios_config:
27 |
28 | # Dict of default monitoring node BIOS options. Format is same as that used by
29 | # stackhpc.drac role.
30 | #monitoring_bios_config_default:
31 |
32 | # Dict of additional monitoring node BIOS options. Format is same as that used
33 | # by stackhpc.drac role.
34 | #monitoring_bios_config_extra:
35 |
36 | ###############################################################################
37 | # Monitoring node RAID configuration.
38 |
39 | # List of monitoring node RAID volumes. Format is same as that used by
40 | # stackhpc.drac role.
41 | #monitoring_raid_config:
42 |
43 | # List of default monitoring node RAID volumes. Format is same as that used by
44 | # stackhpc.drac role.
45 | #monitoring_raid_config_default:
46 |
47 | # List of additional monitoring node RAID volumes. Format is same as that used
48 | # by stackhpc.drac role.
49 | #monitoring_raid_config_extra:
50 |
51 | ###############################################################################
52 | # Monitoring node software RAID configuration.
53 |
54 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
55 | #monitoring_mdadm_arrays:
56 |
57 | ###############################################################################
58 | # Monitoring node encryption configuration.
59 |
60 | # List of block devices to encrypt. See stackhpc.luks role for format.
61 | #monitoring_luks_devices:
62 |
63 | ###############################################################################
64 | # Monitoring node LVM configuration.
65 |
66 | # List of monitoring node volume groups. See mrlesmithjr.manage_lvm role for
67 | # format.
68 | #monitoring_lvm_groups:
69 |
70 | # Default list of monitoring node volume groups. See mrlesmithjr.manage_lvm
71 | # role for format.
72 | #monitoring_lvm_groups_default:
73 |
74 | # Additional list of monitoring node volume groups. See mrlesmithjr.manage_lvm
75 | # role for format.
76 | #monitoring_lvm_groups_extra:
77 |
78 | ###############################################################################
79 | # Monitoring node sysctl configuration.
80 |
81 | # Dict of sysctl parameters to set.
82 | #monitoring_sysctl_parameters:
83 |
84 | ###############################################################################
85 | # Monitoring node tuned configuration.
86 |
87 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
88 | # role. Default is throughput-performance.
89 | #monitoring_tuned_active_builtin_profile:
90 |
91 | ###############################################################################
92 | # Monitoring node user configuration.
93 |
94 | # List of users to create. This should be in a format accepted by the
95 | # singleplatform-eng.users role.
96 | #monitoring_users:
97 |
98 | ###############################################################################
99 | # Monitoring node firewalld configuration.
100 |
101 | # Whether to install and enable firewalld.
102 | #monitoring_firewalld_enabled:
103 |
104 | # A list of zones to create. Each item is a dict containing a 'zone' item.
105 | #monitoring_firewalld_zones:
106 |
107 | # A firewalld zone to set as the default. Default is unset, in which case the
108 | # default zone will not be changed.
109 | #monitoring_firewalld_default_zone:
110 |
111 | # A list of firewall rules to apply. Each item is a dict containing arguments
112 | # to pass to the firewalld module. Arguments are omitted if not provided, with
113 | # the following exceptions:
114 | # - offline: true
115 | # - permanent: true
116 | # - state: enabled
117 | #monitoring_firewalld_rules:
118 |
119 | ###############################################################################
120 | # Dummy variable to allow Ansible to accept this file.
121 | workaround_ansible_issue_8743: yes
122 |
--------------------------------------------------------------------------------
/etc/kayobe/network-allocation.yml:
--------------------------------------------------------------------------------
1 | aio_ips:
2 | compute0: 192.168.33.6
3 | controller0: 192.168.33.3
4 | seed: 192.168.33.5
5 | seed-hypervisor: 192.168.33.4
6 |
--------------------------------------------------------------------------------
/etc/kayobe/networks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kayobe network configuration.
3 |
4 | ###############################################################################
5 | # Network role to network mappings.
6 |
7 | # Map all networks to the all-in-one network.
8 |
9 | # Name of the network used for admin access to the overcloud
10 | #admin_oc_net_name:
11 | admin_oc_net_name: aio
12 |
13 | # Name of the network used by the seed to manage the bare metal overcloud
14 | # hosts via their out-of-band management controllers.
15 | #oob_oc_net_name:
16 | oob_oc_net_name: aio
17 |
18 | # Name of the network used by the seed to provision the bare metal overcloud
19 | # hosts.
20 | #provision_oc_net_name:
21 | provision_oc_net_name: aio
22 |
23 | # Name of the network used by the overcloud hosts to manage the bare metal
24 | # compute hosts via their out-of-band management controllers.
25 | #oob_wl_net_name:
26 | oob_wl_net_name: aio
27 |
28 | # Name of the network used by the overcloud hosts to provision the bare metal
29 | # workload hosts.
30 | #provision_wl_net_name:
31 | provision_wl_net_name: aio
32 |
33 | # Name of the network used to expose the internal OpenStack API endpoints.
34 | #internal_net_name:
35 | internal_net_name: aio
36 |
37 | # List of names of networks used to provide external network access via
38 | # Neutron.
39 | # Deprecated name: external_net_name
40 | # If external_net_name is defined, external_net_names will default to a list
41 | # containing one item, external_net_name.
42 | #external_net_names:
43 | external_net_names:
44 | - aio
45 |
46 | # Name of the network used to expose the public OpenStack API endpoints.
47 | #public_net_name:
48 | public_net_name: aio
49 |
50 | # Name of the network used by Neutron to carry tenant overlay network traffic.
51 | #tunnel_net_name:
52 | tunnel_net_name: aio
53 |
54 | # Name of the network used to carry storage data traffic.
55 | #storage_net_name:
56 | storage_net_name: aio
57 |
58 | # Name of the network used to carry storage management traffic.
59 | #storage_mgmt_net_name:
60 | storage_mgmt_net_name: aio
61 |
62 | # Name of the network used to carry swift storage data traffic.
63 | #swift_storage_net_name:
64 | swift_storage_net_name: aio
65 |
66 | # Name of the network used to carry swift storage replication traffic.
67 | #swift_storage_replication_net_name:
68 | swift_storage_replication_net_name: aio
69 |
70 | # Name of the network used to perform hardware introspection on the bare metal
71 | # workload hosts.
72 | #inspection_net_name:
73 | inspection_net_name: aio
74 |
75 | # Name of the network used to perform cleaning on the bare metal workload
76 | # hosts
77 | #cleaning_net_name:
78 | cleaning_net_name: aio
79 |
80 | ###############################################################################
81 | # Network definitions.
82 |
83 | # All-in-one network.
84 | aio_cidr: 192.168.33.0/24
85 | aio_allocation_pool_start: 192.168.33.3
86 | aio_allocation_pool_end: 192.168.33.30
87 | aio_neutron_allocation_pool_start: 192.168.33.31
88 | aio_neutron_allocation_pool_end: 192.168.33.127
89 | aio_inspection_allocation_pool_start: 192.168.33.128
90 | aio_inspection_allocation_pool_end: 192.168.33.254
91 | aio_mtu: 1442
92 |
93 | ###############################################################################
94 | # Network virtual patch link configuration.
95 |
96 | # Suffix for Open vSwitch bridge names.
97 | #network_bridge_suffix_ovs:
98 |
99 | # Prefix for virtual patch interface names.
100 | #network_patch_prefix:
101 |
102 | # Suffix for virtual patch link interface names when connected towards the
103 | # physical interface.
104 | #network_patch_suffix_phy:
105 |
106 | # Suffix for virtual patch link interface names when connected towards the
107 | # OVS bridge.
108 | #network_patch_suffix_ovs:
109 |
110 | ###############################################################################
111 | # Network routing table configuration.
112 |
113 | # List of IP routing tables. Each item should be a dict containing 'id' and
114 | # 'name' items. These tables will be added to /etc/iproute2/rt_tables.
115 | #network_route_tables:
116 |
117 | ###############################################################################
118 | # Network connectivity check configuration.
119 |
120 | # External IP address to check. Default is 8.8.8.8.
121 | #nc_external_ip:
122 |
123 | # External hostname to check. Default is google.com.
124 | #nc_external_hostname:
125 |
126 | ###############################################################################
127 | # Dummy variable to allow Ansible to accept this file.
128 | workaround_ansible_issue_8743: yes
129 |
--------------------------------------------------------------------------------
/etc/kayobe/neutron.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Neutron configuration.
4 |
5 | # List of Neutron ML2 mechanism drivers to use. If unset the kolla-ansible
6 | # defaults will be used.
7 | #kolla_neutron_ml2_mechanism_drivers:
8 |
9 | # List of Neutron ML2 type drivers to use.
10 | #kolla_neutron_ml2_type_drivers:
11 |
12 | # List of Neutron ML2 tenant network types to use.
13 | #kolla_neutron_ml2_tenant_network_types:
14 |
15 | # List of Neutron ML2 network VLAN ranges to use. Each item should be a dict
16 | # containing the following items:
17 | # physical_network: The physical network
18 | # range: Range of allowed VLANs on this physical network (format :,
19 | # optional)
20 | #kolla_neutron_ml2_network_vlan_ranges:
21 |
22 | # List of Neutron ML2 extention drivers to use.
23 | #kolla_neutron_ml2_extension_drivers:
24 |
25 | ###############################################################################
26 | # Neutron ML2 generic switch driver configuration.
27 |
28 | # List of switches to configure for use by genericswitch ML2 mechanism driver.
29 | # Each item should be a dict containing the following items:
30 | # name: Hostname of the switch
31 | # ip: IP address on which to reach the switch
32 | # username: SSH username
33 | # password: SSH password (optional)
34 | # key_file: SSH key file (optional)
35 | # secret: SSH secret (optional)
36 | #kolla_neutron_ml2_generic_switches:
37 |
38 | # List of Ansible hosts representing switches to configure for use by
39 | # genericswitch ML2 mechanism driver. These switches will be appended to
40 | # kolla_neutron_ml2_generic_switches and their configuration will be determined
41 | # by the following host variables:
42 | # name: inventory_hostname
43 | # ip: ansible_host
44 | # username: ansible_user
45 | # password: ansible_ssh_pass
46 | # key_file: not currently supported
47 | # secret: not currently supported
48 | #kolla_neutron_ml2_generic_switch_hosts:
49 |
50 | # List of Ansible hosts whose switch interfaces are to be configured as tagged
51 | # members of all networks managed by the genericswitch ML2 mechanism driver.
52 | # These hosts will be matched against the description fields in the
53 | # switch_interface_config variable for each switch to determine which
54 | # interfaces should be configured.
55 | #kolla_neutron_ml2_generic_switch_trunk_port_hosts:
56 |
57 | # Dict containing additional configuration for switches managed by the
58 | # genericswitch ML2 mechanism driver. For per-switch configuration of switches
59 | # in kolla_neutron_ml2_generic_switch_hosts, this may be set as a group or
60 | # host variable for the switch host.
61 | #kolla_neutron_ml2_generic_switch_extra:
62 |
63 | ###############################################################################
64 | # Dummy variable to allow Ansible to accept this file.
65 | workaround_ansible_issue_8743: yes
66 |
--------------------------------------------------------------------------------
/etc/kayobe/nova.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Nova configuration.
4 |
5 | # Which host to use to deploy the nova-compute services for ironic. By default
6 | # this is none and all hosts in the nova group are used instead - typically the
7 | # controllers.
8 | #kolla_nova_compute_ironic_host:
9 |
10 | ###############################################################################
11 | # Dummy variable to allow Ansible to accept this file.
12 | workaround_ansible_issue_8743: yes
13 |
--------------------------------------------------------------------------------
/etc/kayobe/opensm.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # OpenSM Infiniband subnet manager configuration.
4 |
5 | # Whether OpenSM is enabled.
6 | #opensm_enabled:
7 |
8 | ###############################################################################
9 | # Dummy variable to allow Ansible to accept this file.
10 | workaround_ansible_issue_8743: yes
11 |
--------------------------------------------------------------------------------
/etc/kayobe/openstack.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # OpenStack release configuration.
4 |
5 | # Name of the current OpenStack release. Default is "2025.1".
6 | #openstack_release:
7 |
8 | # Name of the current OpenStack branch. Default is "stable/2025.1".
9 | #openstack_branch:
10 |
11 | ###############################################################################
12 | # Dummy variable to allow Ansible to accept this file.
13 | workaround_ansible_issue_8743: yes
14 |
--------------------------------------------------------------------------------
/etc/kayobe/overcloud-dib.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Overcloud host disk image configuration.
3 |
4 | ###############################################################################
5 | # Diskimage-builder configuration for overcloud host disk images.
6 |
7 | # Whether to build host disk images with DIB directly instead of through
8 | # Bifrost. Setting it to true disables Bifrost image build and allows images to
9 | # be built with the `kayobe overcloud host image build` command. Default value
10 | # is true.
11 | #overcloud_dib_build_host_images:
12 |
13 | # List of additional build host packages to install. Default is an empty list.
14 | #overcloud_dib_host_packages_extra:
15 |
16 | # List of overcloud host disk images to build. Each element is a dict defining
17 | # an image in a format accepted by the stackhpc.openstack.os_images role.
18 | # Default is to build an image named "deployment_image" configured with the
19 | # overcloud_dib_* variables defined below:
20 | # {"name": "deployment_image", "elements": "{{
21 | # overcloud_dib_elements }}", "env": "{{ overcloud_dib_env_vars }}",
22 | # "packages": "{{ overcloud_dib_packages }}"}.
23 | #overcloud_dib_host_images:
24 |
25 | # DIB base OS element. Default is {{ 'rocky-container' if os_distribution ==
26 | # 'rocky' else os_distribution }}.
27 | #overcloud_dib_os_element:
28 |
29 | # DIB image OS release. Default is {{ os_release }}.
30 | #overcloud_dib_os_release:
31 |
32 | # List of default DIB elements. Default is ["{{ overcloud_dib_os_element }}",
33 | # "cloud-init", "cloud-init-datasources", "enable-serial-console", "vm"].
34 | #overcloud_dib_elements_default:
35 |
36 | # List of additional DIB elements. Default is none.
37 | #overcloud_dib_elements_extra:
38 |
39 | # List of DIB elements. Default is a combination of
40 | # overcloud_dib_elements_default and overcloud_dib_elements_extra.
41 | #overcloud_dib_elements:
42 |
43 | # DIB default environment variables. Default is
44 | # {"DIB_BOOTLOADER_DEFAULT_CMDLINE": "nofb nomodeset gfxpayload=text
45 | # net.ifnames=1", "DIB_CLOUD_INIT_DATASOURCES": "ConfigDrive",
46 | # "DIB_CONTAINERFILE_RUNTIME": "docker", "DIB_CONTAINERFILE_NETWORK_DRIVER":
47 | # "host", "DIB_RELEASE": "{{ overcloud_dib_os_release }}"}.
48 | #overcloud_dib_env_vars_default:
49 |
50 | # DIB additional environment variables. Default is none.
51 | #overcloud_dib_env_vars_extra:
52 |
53 | # DIB environment variables. Default is combination of
54 | # overcloud_dib_env_vars_default and overcloud_dib_env_vars_extra.
55 | #overcloud_dib_env_vars:
56 |
57 | # List of DIB packages to install. Default is to install no extra packages.
58 | #overcloud_dib_packages:
59 |
60 | # List of default git repositories containing Diskimage Builder (DIB) elements.
61 | # See stackhpc.openstack.os_images role for usage. Default is empty.
62 | #overcloud_dib_git_elements_default:
63 |
64 | # List of additional git repositories containing Diskimage Builder (DIB)
65 | # elements. See stackhpc.openstack.os_images role for usage. Default is empty.
66 | #overcloud_dib_git_elements_extra:
67 |
68 | # List of git repositories containing Diskimage Builder (DIB) elements. See
69 | # stackhpc.openstack.os_images role for usage. Default is a combination of
70 | # overcloud_dib_git_elements_default and overcloud_dib_git_elements_extra.
71 | #overcloud_dib_git_elements:
72 |
73 | # Upper constraints file for installing packages in the virtual environment
74 | # used for building overcloud host disk images. Default is {{
75 | # pip_upper_constraints_file }}.
76 | #overcloud_dib_upper_constraints_file:
77 |
78 | # Upper constraints file for installation of DIB to build overcloud host
79 | # disk images. Default is empty string.
80 | #overcloud_dib_dib_upper_constraints_file:
81 |
82 | ###############################################################################
83 | # Dummy variable to allow Ansible to accept this file.
84 | workaround_ansible_issue_8743: yes
85 |
--------------------------------------------------------------------------------
/etc/kayobe/overcloud.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Overcloud configuration.
4 |
5 | # Default Ansible group for overcloud hosts.
6 | #overcloud_group_default:
7 |
8 | # List of names of Ansible groups for overcloud hosts.
9 | #overcloud_groups:
10 |
11 | # Dict mapping overcloud Ansible group names to lists of hosts in the group.
12 | # As a special case, the group 'ignore' can be used to specify hosts that
13 | # should not be added to the inventory.
14 | #overcloud_group_hosts_map:
15 | overcloud_group_hosts_map:
16 | controllers:
17 | - controller0
18 | compute:
19 | - compute0
20 |
21 | # To prevent some network issues you can choose to disable cloud-init
22 | #disable_cloud_init:
23 |
24 | ###############################################################################
25 | # Dummy variable to allow Ansible to accept this file.
26 | workaround_ansible_issue_8743: yes
27 |
--------------------------------------------------------------------------------
/etc/kayobe/pip.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | # Upper constraints file for installation of python packages.
4 | # Default value is
5 | # "https://releases.openstack.org/constraints/upper/{{ openstack_release }}"
6 | #pip_upper_constraints_file:
7 |
8 | # Use a local PyPi mirror for installing Pip packages
9 | #pip_local_mirror: false
10 |
11 | # Users for which the necessary configuration will be put in place in order to
12 | # install PyPI packages from a mirror
13 | # NB: The Kolla user will be automatically added to this list if the above is
14 | # set to true
15 | #pip_applicable_users:
16 | # - "{{ kayobe_ansible_user }}"
17 | # - root
18 |
19 | # PyPI local package mirror URL
20 | #pip_index_url: ""
21 |
22 | # Optional: a list of 'trusted' hosts for which SSL verification will be
23 | # disabled
24 | #pip_trusted_hosts: []
25 |
26 | # PyPI proxy URL (format: http(s)://[user:password@]proxy_name:port)
27 | #pip_proxy: ""
28 |
29 | ###############################################################################
30 | # Dummy variable to allow Ansible to accept this file.
31 | workaround_ansible_issue_8743: yes
32 |
--------------------------------------------------------------------------------
/etc/kayobe/proxy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Configuration of HTTP(S) proxies.
4 |
5 | # HTTP proxy URL (format: http(s)://[user:password@]proxy_name:port). By
6 | # default no proxy is used.
7 | #http_proxy:
8 |
9 | # HTTPS proxy URL (format: http(s)://[user:password@]proxy_name:port). By
10 | # default no proxy is used.
11 | #https_proxy:
12 |
13 | # List of domains, hostnames, IP addresses and networks for which no proxy is
14 | # used. Defaults to ["127.0.0.1", "localhost", "{{ ('http://' ~
15 | # docker_registry) | urlsplit('hostname') }}","{{ kolla_internal_vip_address
16 | # }}"] if docker_registry is set, or ["127.0.0.1", "localhost","{{
17 | # kolla_internal_vip_address }}"] otherwise. This is configured only if either
18 | # http_proxy or https_proxy is set.
19 | #no_proxy:
20 |
21 | ###############################################################################
22 | # Dummy variable to allow Ansible to accept this file.
23 | workaround_ansible_issue_8743: yes
24 |
--------------------------------------------------------------------------------
/etc/kayobe/seed-hypervisor.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Seed hypervisor node configuration.
4 |
5 | # User with which to access the seed hypervisor via SSH during bootstrap, in
6 | # order to setup the Kayobe user account. Default is {{ os_distribution }}.
7 | seed_hypervisor_bootstrap_user: "{{ lookup('env', 'USER') }}"
8 |
9 | ###############################################################################
10 | # Seed hypervisor network interface configuration.
11 |
12 | # List of networks to which seed hypervisor nodes are attached.
13 | #seed_hypervisor_network_interfaces:
14 |
15 | # List of default networks to which seed hypervisor nodes are attached.
16 | #seed_hypervisor_default_network_interfaces:
17 |
18 | # List of extra networks to which seed hypervisor nodes are attached.
19 | #seed_hypervisor_extra_network_interfaces:
20 |
21 | # Whether to enable SNAT on seed hypervisor node. Default is false.
22 | #seed_hypervisor_enable_snat:
23 |
24 | ###############################################################################
25 | # Seed hypervisor node software RAID configuration.
26 |
27 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
28 | #seed_hypervisor_mdadm_arrays:
29 |
30 | ###############################################################################
31 | # Seed hypervisor node encryption configuration.
32 |
33 | # List of block devices to encrypt. See stackhpc.luks role for format.
34 | #seed_hypervisor_luks_devices:
35 |
36 | ###############################################################################
37 | # Seed hypervisor node LVM configuration.
38 |
39 | # List of seed hypervisor volume groups. See mrlesmithjr.manage_lvm role for
40 | # format. Set to "{{ seed_hypervisor_lvm_groups_with_data }}" to create a
41 | # volume group for libvirt storage.
42 | #seed_hypervisor_lvm_groups:
43 |
44 | # Suggested list of seed hypervisor volume groups for libvirt. Not used by
45 | # default.
46 | #seed_hypervisor_lvm_groups_with_data:
47 |
48 | # Seed LVM volume group for data. See mrlesmithjr.manage_lvm role for format.
49 | #seed_hypervisor_lvm_group_data:
50 |
51 | # List of disks for use by seed hypervisor LVM data volume group. Default to an
52 | # invalid value to require configuration.
53 | #seed_hypervisor_lvm_group_data_disks:
54 |
55 | # List of LVM logical volumes for the data volume group.
56 | #seed_hypervisor_lvm_group_data_lvs:
57 |
58 | # Libvirt storage LVM backing volume.
59 | #seed_hypervisor_lvm_group_data_lv_libvirt_storage:
60 |
61 | # Size of libvirt storage LVM backing volume.
62 | #seed_hypervisor_lvm_group_data_lv_libvirt_storage_size:
63 |
64 | # Filesystem for libvirt storage LVM backing volume. ext4 allows for shrinking.
65 | #seed_hypervisor_lvm_group_data_lv_libvirt_storage_fs:
66 |
67 | ###############################################################################
68 | # Seed hypervisor libvirt storage pool configuration.
69 |
70 | # List of libvirt storage pools for the seed hypervisor.
71 | #seed_hypervisor_libvirt_pools:
72 |
73 | # Libvirt storage pool for the seed VM.
74 | #seed_hypervisor_libvirt_pool:
75 |
76 | # Name of the libvirt storage pool for the seed VM.
77 | #seed_hypervisor_libvirt_pool_name:
78 |
79 | # Directory path of the libvirt storage pool for the seed VM.
80 | #seed_hypervisor_libvirt_pool_path:
81 |
82 | # Directory mode of the libvirt storage pool for the seed VM.
83 | #seed_hypervisor_libvirt_pool_mode:
84 |
85 | # Directory owner of the libvirt storage pool for the seed VM.
86 | #seed_hypervisor_libvirt_pool_owner:
87 |
88 | # Directory group of the libvirt storage pool for the seed VM.
89 | #seed_hypervisor_libvirt_pool_group:
90 |
91 | ###############################################################################
92 | # Seed hypervisor libvirt network configuration.
93 |
94 | # List of libvirt networks for the seed hypervisor.
95 | #seed_hypervisor_libvirt_networks:
96 |
97 | ###############################################################################
98 | # Seed hypervisor sysctl configuration.
99 |
100 | # Dict of sysctl parameters to set.
101 | #seed_hypervisor_sysctl_parameters:
102 |
103 | ###############################################################################
104 | # Seed hypervisor tuned configuration.
105 |
106 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
107 | # role. Default is virtual-host.
108 | #seed_hypervisor_tuned_active_builtin_profile:
109 |
110 | ###############################################################################
111 | # Seed hypervisor user configuration.
112 |
113 | # List of users to create. This should be in a format accepted by the
114 | # singleplatform-eng.users role.
115 | #seed_hypervisor_users:
116 |
117 | ###############################################################################
118 | # Seed hypervisor node firewalld configuration.
119 |
120 | # Whether to install and enable firewalld.
121 | #seed_hypervisor_firewalld_enabled:
122 |
123 | # A list of zones to create. Each item is a dict containing a 'zone' item.
124 | #seed_hypervisor_firewalld_zones:
125 |
126 | # A firewalld zone to set as the default. Default is unset, in which case the
127 | # default zone will not be changed.
128 | #seed_hypervisor_firewalld_default_zone:
129 |
130 | # A list of firewall rules to apply. Each item is a dict containing arguments
131 | # to pass to the firewalld module. Arguments are omitted if not provided, with
132 | # the following exceptions:
133 | # - offline: true
134 | # - permanent: true
135 | # - state: enabled
136 | #seed_hypervisor_firewalld_rules:
137 |
138 | ###############################################################################
139 | # Dummy variable to allow Ansible to accept this file.
140 | workaround_ansible_issue_8743: yes
141 |
--------------------------------------------------------------------------------
/etc/kayobe/seed-vm.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Seed node VM configuration.
4 |
5 | # Name of the seed VM.
6 | #seed_vm_name:
7 |
8 | # Memory in MB.
9 | #seed_vm_memory_mb:
10 | seed_vm_memory_mb: "{{ 4 * 1024 }}"
11 |
12 | # Number of vCPUs.
13 | #seed_vm_vcpus:
14 | seed_vm_vcpus: 1
15 |
16 | # List of volumes.
17 | #seed_vm_volumes:
18 |
19 | # Root volume.
20 | #seed_vm_root_volume:
21 |
22 | # Data volume.
23 | #seed_vm_data_volume:
24 |
25 | # Name of the storage pool for the seed VM volumes.
26 | #seed_vm_pool:
27 |
28 | # Capacity of the seed VM root volume.
29 | #seed_vm_root_capacity:
30 |
31 | # Format of the seed VM root volume.
32 | #seed_vm_root_format:
33 |
34 | # Base image for the seed VM root volume. Default is
35 | # "https://cloud-images.ubuntu.com/noble/current/noble-server-cloudimg-amd64.img"
36 | # when os_distribution is "ubuntu",
37 | # https://dl.rockylinux.org/pub/rocky/9/images/x86_64/Rocky-9-GenericCloud.latest.x86_64.qcow2
38 | # when os_distribution is "rocky",
39 | # or
40 | # "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-20221206.0.x86_64.qcow2"
41 | # otherwise.
42 | #seed_vm_root_image:
43 |
44 | # Capacity of the seed VM data volume.
45 | #seed_vm_data_capacity:
46 |
47 | # Format of the seed VM data volume.
48 | #seed_vm_data_format:
49 |
50 | # List of network interfaces to attach to the seed VM. Format is as accepted by
51 | # the stackhpc.libvirt-vm role's libvirt_vms.interfaces variable. Default is
52 | # one interface for each network in 'network_interfaces'.
53 | #
54 | # Example with one interface connected to a libvirt network called
55 | # 'libvirt-net', and another interface directly plugged into a host device
56 | # called 'eth1':
57 | #
58 | # seed_vm_interfaces:
59 | # - network: libvirt-net
60 | # - type: direct
61 | # source:
62 | # dev: eth1
63 | #
64 | #seed_vm_interfaces:
65 |
66 | # Boot firmware. Possible values are 'bios' or 'efi'. Default is 'efi'.
67 | #seed_vm_boot_firmware:
68 |
69 | # Machine type. Libvirt default configuration is used.
70 | #seed_vm_machine:
71 |
72 | ###############################################################################
73 | # Dummy variable to allow Ansible to accept this file.
74 | workaround_ansible_issue_8743: yes
75 |
--------------------------------------------------------------------------------
/etc/kayobe/seed.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Seed node configuration.
4 |
5 | # User with which to access the seed via SSH during bootstrap, in order to
6 | # setup the Kayobe user account. Default is {{ os_distribution }}.
7 | #seed_bootstrap_user:
8 |
9 | ###############################################################################
10 | # Seed network interface configuration.
11 |
12 | # List of networks to which seed nodes are attached.
13 | #seed_network_interfaces:
14 |
15 | # List of default networks to which seed nodes are attached.
16 | #seed_default_network_interfaces:
17 |
18 | # List of extra networks to which seed nodes are attached.
19 | #seed_extra_network_interfaces:
20 |
21 | # Whether to enable SNAT on seed nodes. Default is false.
22 | #seed_enable_snat:
23 |
24 | ###############################################################################
25 | # Seed node software RAID configuration.
26 |
27 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
28 | #seed_mdadm_arrays:
29 |
30 | ###############################################################################
31 | # Seed node encryption configuration.
32 |
33 | # List of block devices to encrypt. See stackhpc.luks role for format.
34 | #seed_luks_devices:
35 |
36 | ###############################################################################
37 | # Seed node LVM configuration.
38 |
39 | # List of seed volume groups. See mrlesmithjr.manage_lvm role for format.
40 | #seed_lvm_groups:
41 |
42 | # Default list of seed volume groups. See mrlesmithjr.manage_lvm role for
43 | # format.
44 | #seed_lvm_groups_default:
45 |
46 | # Additional list of seed volume groups. See mrlesmithjr.manage_lvm role for
47 | # format.
48 | #seed_lvm_groups_extra:
49 |
50 | # Whether a 'data' LVM volume group should exist on the seed. By default this
51 | # contains a 'docker-volumes' logical volume for Docker volume storage.
52 | # Default is false.
53 | #seed_lvm_group_data_enabled:
54 |
55 | # Seed LVM volume group for data. See mrlesmithjr.manage_lvm role for format.
56 | #seed_lvm_group_data:
57 |
58 | # List of disks for use by seed LVM data volume group. Default to an invalid
59 | # value to require configuration.
60 | #seed_lvm_group_data_disks:
61 |
62 | # List of LVM logical volumes for the data volume group.
63 | #seed_lvm_group_data_lvs:
64 |
65 | # Docker volumes LVM backing volume.
66 | #seed_lvm_group_data_lv_docker_volumes:
67 |
68 | # Size of docker volumes LVM backing volume.
69 | #seed_lvm_group_data_lv_docker_volumes_size:
70 |
71 | # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
72 | #seed_lvm_group_data_lv_docker_volumes_fs:
73 |
74 | ###############################################################################
75 | # Seed node sysctl configuration.
76 |
77 | # Dict of sysctl parameters to set.
78 | #seed_sysctl_parameters:
79 |
80 | ###############################################################################
81 | # Seed node tuned configuration.
82 |
83 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
84 | # role. Default is virtual-guest.
85 | #seed_tuned_active_builtin_profile:
86 |
87 | ###############################################################################
88 | # Seed node user configuration.
89 |
90 | # List of users to create. This should be in a format accepted by the
91 | # singleplatform-eng.users role.
92 | #seed_users:
93 |
94 | ###############################################################################
95 | # Seed node additional containers configuration
96 |
97 | # Dict of container images to start
98 | # Example:
99 | # seed_containers:
100 | # squid:
101 | # image: "stackhpc/squid:3.5.20-1"
102 | # pre: "{{ kayobe_env_config_path }}/containers/squid/pre.yml"
103 | # post: "{{ kayobe_env_config_path }}/containers/squid/post.yml"
104 | #
105 | #seed_containers:
106 |
107 | # Whether to attempt a basic authentication login to a registry when
108 | # deploying seed containers
109 | #seed_deploy_containers_registry_attempt_login:
110 |
111 | ###############################################################################
112 | # Seed node firewalld configuration.
113 |
114 | # Whether to install and enable firewalld.
115 | #seed_firewalld_enabled:
116 |
117 | # A list of zones to create. Each item is a dict containing a 'zone' item.
118 | #seed_firewalld_zones:
119 |
120 | # A firewalld zone to set as the default. Default is unset, in which case the
121 | # default zone will not be changed.
122 | #seed_firewalld_default_zone:
123 |
124 | # A list of firewall rules to apply. Each item is a dict containing arguments
125 | # to pass to the firewalld module. Arguments are omitted if not provided, with
126 | # the following exceptions:
127 | # - offline: true
128 | # - permanent: true
129 | # - state: enabled
130 | #seed_firewalld_rules:
131 |
132 | ###############################################################################
133 | # Dummy variable to allow Ansible to accept this file.
134 | workaround_ansible_issue_8743: yes
135 |
--------------------------------------------------------------------------------
/etc/kayobe/ssh.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # SSH configuration.
4 |
5 | # Type of SSH key. Default is "rsa".
6 | #ssh_key_type:
7 |
8 | # Name of SSH key.
9 | #ssh_key_name:
10 |
11 | # Path to SSH private key on the Ansible control host.
12 | #ssh_private_key_path:
13 |
14 | # Path to SSH public key on the Ansible control host.
15 | #ssh_public_key_path:
16 |
17 | ###############################################################################
18 | # Dummy variable to allow Ansible to accept this file.
19 | workaround_ansible_issue_8743: yes
20 |
--------------------------------------------------------------------------------
/etc/kayobe/storage.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # Storage node configuration.
4 |
5 | # User with which to access the storages via SSH during bootstrap, in order
6 | # to setup the Kayobe user account. Default is {{ os_distribution }}.
7 | #storage_bootstrap_user:
8 |
9 | ###############################################################################
10 | # Storage network interface configuration.
11 |
12 | # List of networks to which storage nodes are attached.
13 | #storage_network_interfaces:
14 |
15 | # List of default networks to which storage nodes are attached.
16 | #storage_default_network_interfaces:
17 |
18 | # List of extra networks to which storage nodes are attached.
19 | #storage_extra_network_interfaces:
20 |
21 | # Whether this host requires access to Swift networks.
22 | #storage_needs_swift_network:
23 |
24 | #storage_needs_swift_replication_network:
25 |
26 | ###############################################################################
27 | # Storage node BIOS configuration.
28 |
29 | # Dict of storage BIOS options. Format is same as that used by stackhpc.drac
30 | # role.
31 | #storage_bios_config:
32 |
33 | # Dict of default storage BIOS options. Format is same as that used by
34 | # stackhpc.drac role.
35 | #storage_bios_config_default:
36 |
37 | # Dict of additional storage BIOS options. Format is same as that used by
38 | # stackhpc.drac role.
39 | #storage_bios_config_extra:
40 |
41 | ###############################################################################
42 | # Storage node RAID configuration.
43 |
44 | # List of storage RAID volumes. Format is same as that used by stackhpc.drac
45 | # role.
46 | #storage_raid_config:
47 |
48 | # List of default storage RAID volumes. Format is same as that used by
49 | # stackhpc.drac role.
50 | #storage_raid_config_default:
51 |
52 | # List of additional storage RAID volumes. Format is same as that used by
53 | # stackhpc.drac role.
54 | #storage_raid_config_extra:
55 |
56 | ###############################################################################
57 | # Storage node software RAID configuration.
58 |
59 | # List of software RAID arrays. See mrlesmithjr.mdadm role for format.
60 | #storage_mdadm_arrays:
61 |
62 | ###############################################################################
63 | # Storage node encryption configuration.
64 |
65 | # List of block devices to encrypt. See stackhpc.luks role for format.
66 | #storage_luks_devices:
67 |
68 | ###############################################################################
69 | # Storage node LVM configuration.
70 |
71 | # List of storage volume groups. See mrlesmithjr.manage_lvm role for
72 | # format.
73 | #storage_lvm_groups:
74 |
75 | # Default list of storage volume groups. See mrlesmithjr.manage_lvm role for
76 | # format.
77 | #storage_lvm_groups_default:
78 |
79 | # Additional list of storage volume groups. See mrlesmithjr.manage_lvm role
80 | # for format.
81 | #storage_lvm_groups_extra:
82 |
83 | # Whether a 'data' LVM volume group should exist on storage hosts. By default
84 | # this contains a 'docker-volumes' logical volume for Docker volume storage.
85 | # Default is false.
86 | #storage_lvm_group_data_enabled:
87 |
88 | # Storage LVM volume group for data. See mrlesmithjr.manage_lvm role for
89 | # format.
90 | #storage_lvm_group_data:
91 |
92 | # List of disks for use by storage LVM data volume group. Default to an
93 | # invalid value to require configuration.
94 | #storage_lvm_group_data_disks:
95 |
96 | # List of LVM logical volumes for the data volume group.
97 | #storage_lvm_group_data_lvs:
98 |
99 | # Docker volumes LVM backing volume.
100 | #storage_lvm_group_data_lv_docker_volumes:
101 |
102 | # Size of docker volumes LVM backing volume.
103 | #storage_lvm_group_data_lv_docker_volumes_size:
104 |
105 | # Filesystem for docker volumes LVM backing volume. ext4 allows for shrinking.
106 | #storage_lvm_group_data_lv_docker_volumes_fs:
107 |
108 | ###############################################################################
109 | # Storage node sysctl configuration.
110 |
111 | # Dict of sysctl parameters to set.
112 | #storage_sysctl_parameters:
113 |
114 | ###############################################################################
115 | # Storage node tuned configuration.
116 |
117 | # Builtin tuned profile to use. Format is same as that used by giovtorres.tuned
118 | # role. Default is throughput-performance.
119 | #storage_tuned_active_builtin_profile:
120 |
121 | ###############################################################################
122 | # Storage node user configuration.
123 |
124 | # List of users to create. This should be in a format accepted by the
125 | # singleplatform-eng.users role.
126 | #storage_users:
127 |
128 | ###############################################################################
129 | # Storage node firewalld configuration.
130 |
131 | # Whether to install and enable firewalld.
132 | #storage_firewalld_enabled:
133 |
134 | # A list of zones to create. Each item is a dict containing a 'zone' item.
135 | #storage_firewalld_zones:
136 |
137 | # A firewalld zone to set as the default. Default is unset, in which case the
138 | # default zone will not be changed.
139 | #storage_firewalld_default_zone:
140 |
141 | # A list of firewall rules to apply. Each item is a dict containing arguments
142 | # to pass to the firewalld module. Arguments are omitted if not provided, with
143 | # the following exceptions:
144 | # - offline: true
145 | # - permanent: true
146 | # - state: enabled
147 | #storage_firewalld_rules:
148 |
149 | ###############################################################################
150 | # Dummy variable to allow Ansible to accept this file.
151 | workaround_ansible_issue_8743: yes
152 |
--------------------------------------------------------------------------------
/etc/kayobe/swift.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ###############################################################################
3 | # OpenStack Swift configuration.
4 |
5 | # Short name of the kolla container image used to build rings. Default is the
6 | # swift=object image.
7 | #swift_ring_build_image_name:
8 |
9 | # Full name of the kolla container image used to build rings.
10 | #swift_ring_build_image:
11 |
12 | # Ansible host pattern matching hosts on which Swift object storage services
13 | # are deployed. The default is to use hosts in the 'storage' group.
14 | #swift_hosts:
15 |
16 | # Name of the host used to build Swift rings. Default is the first host of
17 | # 'swift_hosts'.
18 | #swift_ring_build_host:
19 |
20 | # ID of the Swift region for this host. Default is 1.
21 | #swift_region:
22 |
23 | # ID of the Swift zone. This can be set to different values for different hosts
24 | # to place them in different zones. Default is 0.
25 | #swift_zone:
26 |
27 | # Base-2 logarithm of the number of partitions.
28 | # i.e. num_partitions=2^. Default is 10.
29 | #swift_part_power:
30 |
31 | # Object replication count. Default is the smaller of the number of Swift
32 | # hosts, or 3.
33 | #swift_replication_count:
34 |
35 | # Minimum time in hours between moving a given partition. Default is 1.
36 | #swift_min_part_hours:
37 |
38 | # Ports on which Swift services listen. Default is:
39 | # object: 6000
40 | # account: 6001
41 | # container: 6002
42 | #swift_service_ports:
43 |
44 | # List of block devices to use for Swift. Each item is a dict with the
45 | # following items:
46 | # - 'device': Block device path. Required.
47 | # - 'fs_label': Name of the label used to create the file system on the device.
48 | # Optional. Default is to use the basename of the device.
49 | # - 'services': List of services that will use this block device. Optional.
50 | # Default is 'swift_block_device_default_services'. Allowed items are
51 | # 'account', 'container', and 'object'.
52 | # - 'weight': Weight of the block device. Optional. Default is
53 | # 'swift_block_device_default_weight'.
54 | #swift_block_devices:
55 |
56 | # Default weight to assign to block devices in the ring. Default is 100.
57 | #swift_block_device_default_weight:
58 |
59 | # Default list of services to assign block devices to. Allowed items are
60 | # 'account', 'container', and 'object'. Default value is all of these.
61 | #swift_block_device_default_services:
62 |
63 | ###############################################################################
64 | # Dummy variable to allow Ansible to accept this file.
65 | workaround_ansible_issue_8743: yes
66 |
--------------------------------------------------------------------------------
/etc/kayobe/time.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Kayobe time configuration.
3 |
4 | ###############################################################################
5 | # Timezone.
6 |
7 | # Name of the local timezone.
8 | #timezone:
9 |
10 | ###############################################################################
11 | # Network Time Protocol (NTP).
12 |
13 | # List of NTP time sources to configure. Format is a list of dictionaries with
14 | # the following keys:
15 | # server: host or pool
16 | # type: (Optional) Defaults to server. Maps to a time source in the
17 | # configuration file. Can be one of server, peer, pool.
18 | # options: (Optional) List of options that depends on type, see Chrony
19 | # documentation for details.
20 | # See: https://chrony.tuxfamily.org/doc/4.0/chrony.conf.html
21 | #
22 | # Example of configuring a pool and customising the pool specific maxsources
23 | # option:
24 | # chrony_ntp_servers:
25 | # - server: pool.ntp.org
26 | # type: pool
27 | # options:
28 | # - option: maxsources
29 | # val: 3
30 | #
31 | #chrony_ntp_servers:
32 |
33 | # Synchronise hardware clock with system time. Default is true.
34 | #chrony_rtcsync_enabled:
35 |
36 | # Force synchronisation from NTP sources. This methods may jump the clock by
37 | # large values which can cause issues with some software. Disabled by default.
38 | #ntp_force_sync:
39 |
40 | # Maximum number of tries used by the `chronyc waitsync` command. Only used
41 | # when ntp_force_sync is true. Default is 60 which waits for a maximum of 10
42 | # minutes (60 times 10 seconds).
43 | #chrony_waitsync_max_tries:
44 |
45 | # Maximum correction used by the `chronyc waitsync` command. Only used when
46 | # ntp_force_sync is true. Default is 0.01 which waits for the remaining
47 | # correction to be less than 10 milliseconds.
48 | #chrony_waitsync_max_correction:
49 |
50 | ###############################################################################
51 | # Dummy variable to allow Ansible to accept this file.
52 | workaround_ansible_issue_8743: yes
53 |
--------------------------------------------------------------------------------
/etc/kayobe/users.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # User configuration.
4 |
5 | # List of users to create. This should be in a format accepted by the
6 | # singleplatform-eng.users role.
7 | #users_default:
8 |
9 | ###############################################################################
10 | # Dummy variable to allow Ansible to accept this file.
11 | workaround_ansible_issue_8743: yes
12 |
--------------------------------------------------------------------------------
/etc/kayobe/vgpu.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ################################################################################
3 | # VGPU configuration.
4 |
5 | # URL pointing to location of GRID driver. Examples are:
6 | # "file://path/on/ansible/control/host"
7 | # "http://webserver/NVIDIA-GRID-Linux-KVM-525.105.14-525.105.17-528.89.zip"
8 | # Default is: None.
9 | #vgpu_driver_url:
10 |
11 | # Flag to control whether the vGPU playbook should automatically reboot the
12 | # hypervisor. Note: this is necessary for the driver to be loaded correctly.
13 | # Caution should be used when changing this option. Default is true.
14 | #vgpu_do_reboot:
15 |
16 | # Time to wait when rebooting the host before failing.
17 | # Default is 600 (seconds).
18 | #vgpu_reboot_timeout:
19 |
20 | ###############################################################################
21 | # Dummy variable to allow Ansible to accept this file.
22 | workaround_ansible_issue_8743: yes
23 |
--------------------------------------------------------------------------------
/init-runonce.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | if [[ ! -d ~/deployment/venvs/os-venv ]]; then
6 | /usr/bin/python3 -m venv ~/deployment/venvs/os-venv
7 | fi
8 | ~/deployment/venvs/os-venv/bin/pip install -U pip
9 | ~/deployment/venvs/os-venv/bin/pip install python-openstackclient -c https://releases.openstack.org/constraints/upper/2025.1
10 |
11 | parent="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
12 | init_runonce=$parent/../kolla-ansible/tools/init-runonce
13 | if [[ ! -f $init_runonce ]]; then
14 | echo "Unable to find kolla-ansible repo"
15 | exit 1
16 | fi
17 |
18 | source ~/deployment/venvs/os-venv/bin/activate
19 | $init_runonce
20 |
--------------------------------------------------------------------------------
/kayobe-env:
--------------------------------------------------------------------------------
1 | # Copyright (c) 2017 StackHPC Ltd.
2 | #
3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
4 | # not use this file except in compliance with the License. You may obtain
5 | # a copy of the License at
6 | #
7 | # http://www.apache.org/licenses/LICENSE-2.0
8 | #
9 | # Unless required by applicable law or agreed to in writing, software
10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 | # License for the specific language governing permissions and limitations
13 | # under the License.
14 |
15 | KAYOBE_CONFIG_ROOT=$(dirname $(realpath ${BASH_SOURCE[0]:-${(%):-%x}}))
16 | echo "Using Kayobe config from $KAYOBE_CONFIG_ROOT"
17 |
18 | export KAYOBE_CONFIG_PATH=$KAYOBE_CONFIG_ROOT/etc/kayobe
19 | export KOLLA_CONFIG_PATH=$KAYOBE_CONFIG_ROOT/etc/kolla
20 |
21 | # NOTE: These paths can be modified depending on the local environment.
22 | # These defaults are set based on the following directory structure:
23 | #
24 | # ${base_path}/
25 | # src/
26 | # kayobe/
27 | # kayobe-config/
28 | # kolla-ansible/
29 | # venvs/
30 | # kayobe/
31 | # kolla-ansible/
32 | base_path=$(realpath $KAYOBE_CONFIG_ROOT/../../)
33 | export KOLLA_SOURCE_PATH=${KOLLA_SOURCE_PATH:-${base_path}/src/kolla-ansible}
34 | export KOLLA_VENV_PATH=${KOLLA_VENV_PATH:-${base_path}/venvs/kolla-ansible}
35 |
36 | function check_and_export_env {
37 | # Look for existing Kayobe environments
38 | if [ -d "${KAYOBE_CONFIG_PATH}/environments" ]; then
39 | if [ -d "${KAYOBE_CONFIG_PATH}/environments/${kayobe_env}" ]; then
40 | export KAYOBE_ENVIRONMENT="${kayobe_env}"
41 | echo "Using Kayobe environment ${KAYOBE_ENVIRONMENT}"
42 | return 0
43 | else
44 | echo "Unable to find Kayobe environment ${kayobe_env} in ${KAYOBE_CONFIG_PATH}/environments"
45 | return 1
46 | fi
47 | else
48 | echo "Cannot find environments folder in ${KAYOBE_CONFIG_PATH}"
49 | return 1
50 | fi
51 | }
52 |
53 | function usage {
54 | echo "usage: ${BASH_SOURCE[0]:-${(%):-%x}} [--environment ]"
55 | return 1
56 | }
57 |
58 | if [ "$#" -ge 1 ]; then
59 | if [ "$1" = "--environment" -a "$#" -eq 2 ]; then
60 | kayobe_env="$2"
61 | check_and_export_env
62 | else
63 | usage
64 | fi
65 | return $?
66 | fi
67 |
68 | if [[ -f "$KAYOBE_CONFIG_ROOT/.environment" ]]; then
69 | kayobe_env=$(cat "$KAYOBE_CONFIG_ROOT/.environment")
70 | check_and_export_env
71 | fi
72 |
--------------------------------------------------------------------------------
/pull-retag-push-images.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | set -e
4 |
5 | PARENT="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
6 | KAYOBE_PATH="$PARENT/../../../"
7 |
8 | if [[ -n $1 ]]; then
9 | KAYOBE_EXTRA_ARGS="-e container_image_regexes=\"$@\""
10 | fi
11 |
12 | # Shift arguments so they are not passed to environment-setup.sh when sourced,
13 | # which would break kayobe-env. See https://unix.stackexchange.com/a/151896 for
14 | # details.
15 | shift $#
16 |
17 | cd ${KAYOBE_PATH}
18 | source ~/deployment/env-vars.sh
19 | kayobe playbook run ${KAYOBE_CONFIG_PATH}/ansible/pull-retag-push.yml ${KAYOBE_EXTRA_ARGS:+"$KAYOBE_EXTRA_ARGS"}
20 |
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | name = kayobe-config-dev
3 | summary = Development configuration for Kayobe
4 | description_file =
5 | README.rst
6 | author = OpenStack
7 | author_email = openstack-discuss@lists.openstack.org
8 | home_page = https://docs.openstack.org/kayobe/latest/
9 | classifier =
10 | Environment :: OpenStack
11 | Intended Audience :: Information Technology
12 | Intended Audience :: System Administrators
13 | License :: OSI Approved :: Apache Software License
14 | Operating System :: POSIX :: Linux
15 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | #!/usr/bin/env python
2 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
3 | #
4 | # Licensed under the Apache License, Version 2.0 (the "License");
5 | # you may not use this file except in compliance with the License.
6 | # You may obtain a copy of the License at
7 | #
8 | # http://www.apache.org/licenses/LICENSE-2.0
9 | #
10 | # Unless required by applicable law or agreed to in writing, software
11 | # distributed under the License is distributed on an "AS IS" BASIS,
12 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
13 | # implied.
14 | # See the License for the specific language governing permissions and
15 | # limitations under the License.
16 |
17 | # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
18 | import setuptools
19 |
20 | setuptools.setup(
21 | setup_requires=['pbr'],
22 | pbr=True)
23 |
--------------------------------------------------------------------------------
/tenks-compute.yml:
--------------------------------------------------------------------------------
1 | ---
2 | node_types:
3 | baremetal-compute:
4 | memory_mb: 4096
5 | vcpus: 1
6 | volumes:
7 | # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent:
8 | # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290
9 | - capacity: 10GiB
10 | physical_networks:
11 | - physnet1
12 | console_log_enabled: true
13 |
14 | specs:
15 | - type: baremetal-compute
16 | count: 2
17 | node_name_prefix: bm
18 | ironic_config:
19 | resource_class: test-rc
20 | network_interface: flat
21 |
22 | ipmi_address: 192.168.33.4
23 |
24 | ipmi_port_range_start: 6238
25 |
26 | nova_flavors:
27 | - resource_class: test-rc
28 | node_type: baremetal-compute
29 |
30 | physnet_mappings:
31 | physnet1: braio
32 |
33 | bridge_type: linuxbridge
34 |
35 | deploy_kernel: ipa.kernel
36 | deploy_ramdisk: ipa.initramfs
37 |
38 | # NOTE(priteau): Disable libvirt_vm_trust_guest_rx_filters, which when enabled
39 | # triggers the following errors when booting baremetal instances with Tenks on
40 | # Libvirt 9: Cannot set interface flags on 'macvtap1': Value too large for
41 | # defined data type
42 | libvirt_vm_trust_guest_rx_filters: false
43 |
44 | # Use a different state file, to avoid deleting the overcloud VMs.
45 | state_file_path: >-
46 | {{ '/'.join([(playbook_dir | dirname), 'state-compute.yml']) }}
47 |
--------------------------------------------------------------------------------
/tenks-storage.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This file holds the config given to Tenks when running `tenks-deploy.sh`. It
3 | # assumes the existence of the bridge `braio`.
4 |
5 | node_types:
6 | storage:
7 | memory_mb: 8192
8 | vcpus: 1
9 | volumes:
10 | # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent:
11 | # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290
12 | - capacity: 20GiB
13 | - capacity: 20GiB
14 | physical_networks:
15 | - physnet1
16 | console_log_enabled: true
17 |
18 | specs:
19 | - type: storage
20 | count: 3
21 | node_name_prefix: storage
22 | ironic_config:
23 | resource_class: test-rc
24 | network_interface: noop
25 |
26 | ipmi_address: 192.168.33.4
27 |
28 | nova_flavors: []
29 |
30 | physnet_mappings:
31 | physnet1: braio
32 |
33 | ipmi_port_range_start: 6235
34 |
35 | bridge_type: linuxbridge
36 |
37 | # No placement service.
38 | wait_for_placement: false
39 |
40 | # NOTE(priteau): Disable libvirt_vm_trust_guest_rx_filters, which when enabled
41 | # triggers the following errors when booting baremetal instances with Tenks on
42 | # Libvirt 9: Cannot set interface flags on 'macvtap1': Value too large for
43 | # defined data type
44 | libvirt_vm_trust_guest_rx_filters: false
45 |
46 | # Use a different state file, to avoid deleting the overcloud VMs.
47 | state_file_path: >-
48 | {{ '/'.join([(playbook_dir | dirname), 'state-storage.yml']) }}
49 |
--------------------------------------------------------------------------------
/tenks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # This file holds the config given to Tenks when running `tenks-deploy.sh`. It
3 | # assumes the existence of the bridge `braio`.
4 |
5 | node_types:
6 | controller:
7 | memory_mb: 16384
8 | vcpus: 4
9 | volumes:
10 | # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent:
11 | # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290
12 | - capacity: 40GiB
13 | physical_networks:
14 | - physnet1
15 | console_log_enabled: true
16 | compute:
17 | memory_mb: 8192
18 | vcpus: 4
19 | volumes:
20 | # There is a minimum disk space capacity requirement of 4GiB when using Ironic Python Agent:
21 | # https://github.com/openstack/ironic-python-agent/blob/master/ironic_python_agent/utils.py#L290
22 | - capacity: 20GiB
23 | physical_networks:
24 | - physnet1
25 | console_log_enabled: true
26 |
27 | specs:
28 | - type: controller
29 | count: 1
30 | node_name_prefix: controller
31 | ironic_config:
32 | resource_class: test-rc
33 | network_interface: noop
34 | - type: compute
35 | count: 1
36 | node_name_prefix: compute
37 | ironic_config:
38 | resource_class: test-rc
39 | network_interface: noop
40 |
41 | ipmi_address: 192.168.33.4
42 |
43 | nova_flavors: []
44 |
45 | physnet_mappings:
46 | physnet1: braio
47 |
48 | bridge_type: linuxbridge
49 |
50 | # No placement service.
51 | wait_for_placement: false
52 |
53 | # NOTE(priteau): Disable libvirt_vm_trust_guest_rx_filters, which when enabled
54 | # triggers the following errors when booting baremetal instances with Tenks on
55 | # Libvirt 9: Cannot set interface flags on 'macvtap1': Value too large for
56 | # defined data type
57 | libvirt_vm_trust_guest_rx_filters: false
58 |
--------------------------------------------------------------------------------
/test-requirements.txt:
--------------------------------------------------------------------------------
1 | # The order of packages is significant, because pip processes them in the order
2 | # of appearance. Changing the order has an impact on the overall integration
3 | # process, which may cause wedges in the gate later.
4 |
5 | yamllint # GPLv3
6 |
--------------------------------------------------------------------------------
/tox.ini:
--------------------------------------------------------------------------------
1 | [tox]
2 | minversion = 2.0
3 | envlist = pep8
4 | skipsdist = True
5 |
6 | [testenv]
7 | install_command = pip install {opts} {packages}
8 | deps =
9 | -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/2025.1}
10 | -r{toxinidir}/test-requirements.txt
11 |
12 | [testenv:pep8]
13 | commands =
14 | yamllint etc/kayobe
15 |
--------------------------------------------------------------------------------
/zuul.d/project.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | - project:
3 | queue: kayobe
4 | check:
5 | jobs:
6 | - openstack-tox-pep8
7 | - kayobe-overcloud-rocky9
8 | - kayobe-overcloud-centos9s
9 | - kayobe-overcloud-ubuntu-noble
10 | - kayobe-overcloud-tls-rocky9
11 | - kayobe-overcloud-host-configure-rocky9
12 | - kayobe-overcloud-host-configure-centos9s
13 | - kayobe-overcloud-host-configure-ubuntu-noble
14 | - kayobe-overcloud-upgrade-rocky9
15 | - kayobe-overcloud-upgrade-ubuntu-jammy
16 | - kayobe-seed-rocky9
17 | - kayobe-seed-ubuntu-noble
18 | - kayobe-seed-images-rocky9
19 | - kayobe-seed-upgrade-rocky9
20 | - kayobe-seed-upgrade-ubuntu-jammy
21 | - kayobe-seed-vm-rocky9
22 | - kayobe-seed-vm-rocky9-efi
23 | - kayobe-seed-vm-ubuntu-noble
24 | - kayobe-seed-vm-ubuntu-noble-efi
25 | - kayobe-infra-vm-rocky9
26 | - kayobe-infra-vm-ubuntu-noble
27 | gate:
28 | jobs:
29 | - openstack-tox-pep8
30 | - kayobe-overcloud-rocky9
31 | - kayobe-overcloud-ubuntu-noble
32 | - kayobe-overcloud-tls-rocky9
33 | - kayobe-overcloud-host-configure-rocky9
34 | - kayobe-overcloud-host-configure-ubuntu-noble
35 | - kayobe-overcloud-upgrade-rocky9
36 | - kayobe-overcloud-upgrade-ubuntu-jammy
37 | - kayobe-seed-rocky9
38 | - kayobe-seed-ubuntu-noble
39 | - kayobe-seed-upgrade-rocky9
40 | - kayobe-seed-upgrade-ubuntu-jammy
41 | - kayobe-seed-vm-rocky9
42 | - kayobe-seed-vm-ubuntu-noble
43 | - kayobe-infra-vm-rocky9
44 | - kayobe-infra-vm-ubuntu-noble
45 |
--------------------------------------------------------------------------------