├── .gitignore ├── .yamllint ├── LICENSE ├── README.md ├── automation.yaml ├── automation ├── README.md ├── build-artifacts-manual.sh ├── build-artifacts.packages ├── build-artifacts.repos.el8 ├── build-artifacts.sh ├── check-patch.packages ├── check-patch.repos.el8 └── check-patch.sh ├── build.sh ├── defaults └── main.yml ├── examples ├── hosted_engine_deploy_localhost.yml ├── hosted_engine_deploy_remotehost.yml ├── iscsi_deployment_remote.json ├── nfs_deployment.json └── passwords.yml ├── handlers └── main.yml ├── hooks ├── after_setup │ ├── README.md │ └── add_host_storage_domain.yml ├── enginevm_after_engine_setup │ └── README.md └── enginevm_before_engine_setup │ └── README.md ├── library └── ovirt_disk_28.py ├── meta └── main.yml ├── ovirt-ansible-hosted-engine-setup.spec.in ├── tasks ├── alter_libvirt_default_net_configuration.yml ├── apply_openscap_profile.yml ├── auth_revoke.yml ├── auth_sso.yml ├── bootstrap_local_vm │ ├── 01_prepare_routing_rules.yml │ ├── 02_create_local_vm.yml │ ├── 03_engine_initial_tasks.yml │ ├── 04_engine_final_tasks.yml │ └── 05_add_host.yml ├── clean_local_storage_pools.yml ├── clean_localvm_dir.yml ├── create_storage_domain.yml ├── create_target_vm │ ├── 01_create_target_hosted_engine_vm.yml │ ├── 02_engine_vm_configuration.yml │ └── 03_hosted_engine_final_tasks.yml ├── fc_getdevices.yml ├── fetch_engine_logs.yml ├── fetch_host_ip.yml ├── filter_team_devices.yml ├── final_clean.yml ├── full_execution.yml ├── get_local_vm_disk_path.yml ├── initial_clean.yml ├── install_appliance.yml ├── install_packages.yml ├── ipv_switch.yml ├── iscsi_discover.yml ├── iscsi_getdevices.yml ├── main.yml ├── partial_execution.yml ├── pause_execution.yml ├── pre_checks │ ├── 001_validate_network_interfaces.yml │ ├── 002_validate_hostname_tasks.yml │ ├── define_variables.yml │ ├── validate_data_center_name.yml │ ├── validate_firewalld.yml │ ├── validate_gateway.yml │ ├── validate_mac_address.yml │ ├── validate_memory_size.yml │ ├── validate_network_test.yml │ ├── validate_services_status.yml │ └── validate_vcpus_count.yml ├── restore_backup.yml ├── search_available_network_subnet.yaml └── validate_ip_prefix.yml ├── templates ├── broker.conf.j2 ├── fhanswers.conf.j2 ├── hosted-engine.conf.j2 ├── ifcfg-eth0-dhcp.j2 ├── ifcfg-eth0-static-ipv6.j2 ├── ifcfg-eth0-static.j2 ├── meta-data.j2 ├── network-config-dhcp.j2 ├── user-data.j2 ├── version.j2 └── vm.conf.j2 └── test_plugins └── ovirt_proxied_check.py /.gitignore: -------------------------------------------------------------------------------- 1 | .tox 2 | *.retry 3 | roles/provision_docker 4 | *.tar.gz 5 | output/ 6 | ovirt-ansible-engine-setup.spec 7 | exported-artifacts/ -------------------------------------------------------------------------------- /.yamllint: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | rules: 4 | braces: 5 | min-spaces-inside: 0 6 | max-spaces-inside: 0 7 | min-spaces-inside-empty: -1 8 | max-spaces-inside-empty: -1 9 | brackets: 10 | min-spaces-inside: 0 11 | max-spaces-inside: 0 12 | min-spaces-inside-empty: -1 13 | max-spaces-inside-empty: -1 14 | colons: 15 | max-spaces-before: 0 16 | max-spaces-after: 1 17 | commas: 18 | max-spaces-before: 0 19 | min-spaces-after: 1 20 | max-spaces-after: 1 21 | comments: 22 | level: warning 23 | require-starting-space: true 24 | min-spaces-from-content: 2 25 | comments-indentation: 26 | level: warning 27 | document-end: disable 28 | document-start: 29 | level: warning 30 | present: true 31 | empty-lines: 32 | max: 2 33 | max-start: 0 34 | max-end: 0 35 | quoted-strings: disable 36 | empty-values: 37 | forbid-in-block-mappings: false 38 | forbid-in-flow-mappings: false 39 | hyphens: 40 | max-spaces-after: 1 41 | indentation: 42 | spaces: consistent 43 | indent-sequences: true 44 | check-multi-line-strings: false 45 | key-duplicates: enable 46 | key-ordering: disable 47 | line-length: 48 | max: 120 49 | allow-non-breakable-words: true 50 | allow-non-breakable-inline-mappings: false 51 | new-line-at-end-of-file: enable 52 | new-lines: 53 | type: unix 54 | octal-values: 55 | forbid-implicit-octal: false 56 | forbid-explicit-octal: false 57 | trailing-spaces: enable 58 | truthy: enable 59 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | Copyright 2014 Red Hat, Inc. 180 | 181 | Licensed under the Apache License, Version 2.0 (the "License"); 182 | you may not use this file except in compliance with the License. 183 | You may obtain a copy of the License at 184 | 185 | http://www.apache.org/licenses/LICENSE-2.0 186 | 187 | Unless required by applicable law or agreed to in writing, software 188 | distributed under the License is distributed on an "AS IS" BASIS, 189 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 190 | See the License for the specific language governing permissions and 191 | limitations under the License. -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ovirt-ansible-hosted-engine-setup 2 | 3 | ## This role has been migrated to [oVirt Ansible Collection](https://github.com/ovirt/ovirt-ansible-collection), please use latest version from there. This repository is now readonly and no longer used for active development. 4 | 5 | Ansible role for deploying oVirt Hosted-Engine 6 | 7 | # Requirements 8 | 9 | * Ansible version 2.9.11 or higher 10 | * Python SDK version 4.2 or higher 11 | * Python netaddr library on the ansible controller node 12 | 13 | # Dependencies 14 | 15 | No. 16 | 17 | # Prerequisites 18 | 19 | * A fully qualified domain name prepared for your Engine and the host. Forward and reverse lookup records must both be set in the DNS. 20 | * `/var/tmp` has at least 5 GB of free space. 21 | * Unless you are using Gluster, you must have prepared storage for your Hosted-Engine environment (choose one): 22 | * [Prepare NFS Storage](https://ovirt.org/documentation/admin-guide/chap-Storage/#preparing-nfs-storage) 23 | * [Prepare ISCSI Storage](https://ovirt.org/documentation/admin-guide/chap-Storage/#preparing-iscsi-storage) 24 | * Install additional oVirt ansible role: 25 | 26 | ```bash 27 | $ ansible-galaxy install ovirt.engine-setup # case-sensitive 28 | ``` 29 | # Role variables 30 | 31 | ## General Variables 32 | 33 | | Name | Default value | Description | 34 | |---------------------------------|-----------------------|-----------------------------------------------------------| 35 | | he_bridge_if | eth0 | The network interface ovirt management bridge will be configured on | 36 | | he_fqdn | null | The engine FQDN as it configured on the DNS | 37 | | he_mem_size_MB | max | The amount of memory used on the engine VM | 38 | | he_reserved_memory_MB | 512 | The amount of memory reserved for the host | 39 | | he_vcpus | max | The amount of CPUs used on the engine VM | 40 | | he_disk_size_GB | 61 | Disk size of the engine VM | 41 | | he_vm_mac_addr | null | MAC address of the engine vm network interface. | 42 | | he_domain_type | null | Storage domain type. available options: *nfs*, *iscsi*, *glusterfs*, *fc* | 43 | | he_storage_domain_addr | null | Storage domain IP/DNS address | 44 | | he_ansible_host_name | localhost | hostname in use on the first HE host (not necessarily the Ansible controller one) | 45 | | he_restore_from_file | null | a backup file created with engine-backup to be restored on the fly | 46 | | he_pki_renew_on_restore | false | Renew engine PKI on restore if needed | 47 | | he_cluster | Default | name of the cluster with hosted-engine hosts | 48 | | he_cluster_cpu_type | null | cluster CPU type to be used in hosted-engine cluster (the same as HE host or lower) | 49 | | he_cluster_comp_version | null | Compatibility version of the hosted-engine cluster. Default value is the latest compatibility version | 50 | | he_data_center | Default | name of the datacenter with hosted-engine hosts | 51 | | he_data_center_comp_version | null | Compatibility version of the hosted-engine data center. Default value is the latest compatibility version | 52 | | he_host_name | $(hostname -f) | name used by the engine for the first host | 53 | | he_host_address | $(hostname -f) | address used by the engine for the first host | 54 | | he_bridge_if | null | interface used for the management bridge | 55 | | he_apply_openscap_profile | false | apply a default OpenSCAP security profile on HE VM | 56 | | he_network_test | dns | the way of the network connectivity check performed by ovirt-hosted-engine-ha and ovirt-hosted-engine-setup, available options: *dns*, *ping*, *tcp* or *none*. | 57 | | he_tcp_t_address | null | hostname to connect if he_network_test is *tcp* | 58 | | he_tcp_t_port | null | port to connect if he_network_test is *tcp* | 59 | | he_pause_host | false | Pause the execution to let the user interactively fix host configuration | 60 | | he_offline_deployment | false | If `True`, updates for all packages will be disabled | 61 | | he_additional_package_list | [] | List of additional packages to be installed on engine VM apart from ovirt-engine package | 62 | | he_debug_mode | false | If `True`, HE deployment will execute additional tasks for debug | 63 | | he_db_password | UNDEF | Engine database password | 64 | | he_dwh_db_password | UNDEF | DWH database password | 65 | 66 | ## NFS / Gluster Variables 67 | 68 | | Name | Default value | Description | 69 | |---------------------------------|-----------------------|-----------------------------------------------------------| 70 | | he_mount_options | '' | NFS mount options 71 | | he_storage_domain_path | null | shared folder path on NFS server | 72 | | he_nfs_version | auto | NFS version. available options: *auto*, *v4*, *v3*, *v4_0*, *v4_1*, *v4_2* 73 | | he_storage_if | null | the network interface name that is connected to the storage network, assumed to be pre-configured| 74 | 75 | 76 | ## ISCSI Variables 77 | 78 | | Name | Default value | Description | 79 | |---------------------------------|-----------------------|-----------------------------------------------------------| 80 | | he_iscsi_username | null | iscsi username | 81 | | he_iscsi_password | null | iscsi password | 82 | | he_iscsi_target | null | iscsi target | 83 | | he_lun_id | null | Lun ID | 84 | | he_iscsi_portal_port | null | iscsi portal port | 85 | | he_iscsi_portal_addr | null | iscsi portal address (just for interactive iSCSI discovery, use he_storage_domain_addr for the deployment) | 86 | | he_iscsi_tpgt | null | iscsi tpgt | 87 | | he_discard | false | Discard the whole disk space when removed. more info [here](https://ovirt.org/develop/release-management/features/storage/discard-after-delete/) 88 | 89 | ## Static IP configuration Variables 90 | 91 | DHCP configuration is used on the engine VM by default. However, if you would like to use static ip instead, 92 | define the following variables: 93 | 94 | | Name | Default value | Description | 95 | |---------------------------------|-----------------------|-----------------------------------------------------------| 96 | | he_vm_ip_addr | null | engine VM ip address | 97 | | he_vm_ip_prefix | null | engine VM ip prefix | 98 | | he_dns_addr | null | engine VM DNS server | 99 | | he_default_gateway | null | engine VM default gateway | 100 | | he_vm_etc_hosts | false | Add engine VM ip and fqdn to /etc/hosts on the host | 101 | 102 | # Example Playbook 103 | This is a simple example for deploying Hosted-Engine with NFS storage domain. 104 | 105 | This role can be used to deploy on localhost (the ansible controller one) or on a remote host (please correctly set he_ansible_host_name). 106 | All the playbooks can be found inside the `examples/` folder. 107 | 108 | ## hosted_engine_deploy_localhost.yml 109 | 110 | ```yml 111 | --- 112 | - name: Deploy oVirt hosted engine 113 | hosts: localhost 114 | connection: local 115 | roles: 116 | - role: ovirt.hosted_engine_setup 117 | ``` 118 | 119 | ## hosted_engine_deploy_remotehost.yml 120 | 121 | ```yml 122 | --- 123 | - name: Deploy oVirt hosted engine 124 | hosts: host123.localdomain 125 | roles: 126 | - role: ovirt.hosted_engine_setup 127 | ``` 128 | 129 | ## passwords.yml 130 | 131 | ```yml 132 | --- 133 | # As an example this file is keep in plaintext, if you want to 134 | # encrypt this file, please execute following command: 135 | # 136 | # $ ansible-vault encrypt passwords.yml 137 | # 138 | # It will ask you for a password, which you must then pass to 139 | # ansible interactively when executing the playbook. 140 | # 141 | # $ ansible-playbook myplaybook.yml --ask-vault-pass 142 | # 143 | he_appliance_password: 123456 144 | he_admin_password: 123456 145 | ``` 146 | 147 | ## Example 1: extra vars for NFS deployment with DHCP - he_deployment.json 148 | 149 | ```json 150 | { 151 | "he_bridge_if": "eth0", 152 | "he_fqdn": "he-engine.example.com", 153 | "he_vm_mac_addr": "00:a5:3f:66:ba:12", 154 | "he_domain_type": "nfs", 155 | "he_storage_domain_addr": "192.168.100.50", 156 | "he_storage_domain_path": "/var/nfs_folder" 157 | } 158 | ``` 159 | 160 | ## Example 2: extra vars for iSCSI deployment with static IP, remote host - he_deployment_remote.json 161 | 162 | ```json 163 | { 164 | "he_bridge_if": "eth0", 165 | "he_fqdn": "he-engine.example.com", 166 | "he_vm_ip_addr": "192.168.1.214", 167 | "he_vm_ip_prefix": "24", 168 | "he_gateway": "192.168.1.1", 169 | "he_dns_addr": "192.168.1.1", 170 | "he_vm_etc_hosts": true, 171 | "he_vm_mac_addr": "00:a5:3f:66:ba:12", 172 | "he_domain_type": "iscsi", 173 | "he_storage_domain_addr": "192.168.1.125", 174 | "he_iscsi_portal_port": "3260", 175 | "he_iscsi_tpgt": "1", 176 | "he_iscsi_target": "iqn.2017-10.com.redhat.stirabos:he", 177 | "he_lun_id": "36589cfc000000e8a909165bdfb47b3d9", 178 | "he_mem_size_MB": "4096", 179 | "he_ansible_host_name": "host123.localdomain" 180 | } 181 | ``` 182 | 183 | ### Test iSCSI connectivity and get LUN WWID before deploying 184 | 185 | ``` 186 | [root@c75he20180820h1 ~]# iscsiadm -m node --targetname iqn.2017-10.com.redhat.stirabos:he -p 192.168.1.125:3260 -l 187 | [root@c75he20180820h1 ~]# iscsiadm -m session -P3 188 | iSCSI Transport Class version 2.0-870 189 | version 6.2.0.874-7 190 | Target: iqn.2017-10.com.redhat.stirabos:data (non-flash) 191 | Current Portal: 192.168.1.125:3260,1 192 | Persistent Portal: 192.168.1.125:3260,1 193 | ********** 194 | Interface: 195 | ********** 196 | Iface Name: default 197 | Iface Transport: tcp 198 | Iface Initiatorname: iqn.1994-05.com.redhat:6a4517b3773a 199 | Iface IPaddress: 192.168.1.14 200 | Iface HWaddress: 201 | Iface Netdev: 202 | SID: 1 203 | iSCSI Connection State: LOGGED IN 204 | iSCSI Session State: LOGGED_IN 205 | Internal iscsid Session State: NO CHANGE 206 | ********* 207 | Timeouts: 208 | ********* 209 | Recovery Timeout: 5 210 | Target Reset Timeout: 30 211 | LUN Reset Timeout: 30 212 | Abort Timeout: 15 213 | ***** 214 | CHAP: 215 | ***** 216 | username: 217 | password: ******** 218 | username_in: 219 | password_in: ******** 220 | ************************ 221 | Negotiated iSCSI params: 222 | ************************ 223 | HeaderDigest: None 224 | DataDigest: None 225 | MaxRecvDataSegmentLength: 262144 226 | MaxXmitDataSegmentLength: 131072 227 | FirstBurstLength: 131072 228 | MaxBurstLength: 16776192 229 | ImmediateData: Yes 230 | InitialR2T: Yes 231 | MaxOutstandingR2T: 1 232 | ************************ 233 | Attached SCSI devices: 234 | ************************ 235 | Host Number: 3 State: running 236 | scsi3 Channel 00 Id 0 Lun: 2 237 | Attached scsi disk sdb State: running 238 | scsi3 Channel 00 Id 0 Lun: 3 239 | Attached scsi disk sdc State: running 240 | Target: iqn.2017-10.com.redhat.stirabos:he (non-flash) 241 | Current Portal: 192.168.1.125:3260,1 242 | Persistent Portal: 192.168.1.125:3260,1 243 | ********** 244 | Interface: 245 | ********** 246 | Iface Name: default 247 | Iface Transport: tcp 248 | Iface Initiatorname: iqn.1994-05.com.redhat:6a4517b3773a 249 | Iface IPaddress: 192.168.1.14 250 | Iface HWaddress: 251 | Iface Netdev: 252 | SID: 4 253 | iSCSI Connection State: LOGGED IN 254 | iSCSI Session State: LOGGED_IN 255 | Internal iscsid Session State: NO CHANGE 256 | ********* 257 | Timeouts: 258 | ********* 259 | Recovery Timeout: 5 260 | Target Reset Timeout: 30 261 | LUN Reset Timeout: 30 262 | Abort Timeout: 15 263 | ***** 264 | CHAP: 265 | ***** 266 | username: 267 | password: ******** 268 | username_in: 269 | password_in: ******** 270 | ************************ 271 | Negotiated iSCSI params: 272 | ************************ 273 | HeaderDigest: None 274 | DataDigest: None 275 | MaxRecvDataSegmentLength: 262144 276 | MaxXmitDataSegmentLength: 131072 277 | FirstBurstLength: 131072 278 | MaxBurstLength: 16776192 279 | ImmediateData: Yes 280 | InitialR2T: Yes 281 | MaxOutstandingR2T: 1 282 | ************************ 283 | Attached SCSI devices: 284 | ************************ 285 | Host Number: 6 State: running 286 | scsi6 Channel 00 Id 0 Lun: 0 287 | Attached scsi disk sdd State: running 288 | scsi6 Channel 00 Id 0 Lun: 1 289 | Attached scsi disk sde State: running 290 | [root@c75he20180820h1 ~]# lsblk /dev/sdd 291 | NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT 292 | sdd 8:48 0 100G 0 disk 293 | └─36589cfc000000e8a909165bdfb47b3d9 253:10 0 100G 0 mpath 294 | [root@c75he20180820h1 ~]# lsblk /dev/sde 295 | NAME MAJ:MIN RM SIZE RO TYPE MOUNTPOINT 296 | sde 8:64 0 10G 0 disk 297 | └─36589cfc000000ab67ee1427370d68436 253:0 0 10G 0 mpath 298 | [root@c75he20180820h1 ~]# /lib/udev/scsi_id --page=0x83 --whitelisted --device=/dev/sdd 299 | 36589cfc000000e8a909165bdfb47b3d9 300 | [root@c75he20180820h1 ~]# iscsiadm -m node --targetname iqn.2017-10.com.redhat.stirabos:he -p 192.168.1.125:3260 -u 301 | Logging out of session [sid: 4, target: iqn.2017-10.com.redhat.stirabos:he, portal: 192.168.1.125,3260] 302 | Logout of [sid: 4, target: iqn.2017-10.com.redhat.stirabos:he, portal: 192.168.1.125,3260] successful. 303 | ``` 304 | 305 | # Usage 306 | 1. Check all the prerequisites and requirements are met. 307 | 2. Encrypt passwords.yml 308 | ```sh 309 | $ ansible-vault encrypt passwords.yml 310 | ``` 311 | 312 | 3. Execute the playbook 313 | 314 | Local deployment: 315 | ```sh 316 | $ ansible-playbook hosted_engine_deploy.yml --extra-vars='@he_deployment.json' --extra-vars='@passwords.yml' --ask-vault-pass 317 | ``` 318 | 319 | Deployment over a remote host: 320 | ```sh 321 | ansible-playbook -i host123.localdomain, hosted_engine_deploy.yml --extra-vars='@he_deployment.json' --extra-vars='@passwords.yml' --ask-vault-pass 322 | ``` 323 | 324 | Deploy over a remote host from Ansible AWX/Tower 325 | --- 326 | 327 | The flow creates a temporary VM with a running engine to use for configuring and bootstrapping the whole environment. 328 | The bootstrap engine VM runs over libvirt natted network so, in that stage, is not reachable from outside the host where it's running on. 329 | 330 | When the role dynamically adds the freshly created engine VM to the inventory, it also configures the host to be used as an ssh proxy and this perfectly works directly running the playbook with ansible-playbook. 331 | On the other side, Ansible AWX/Tower by defaults relies on PRoot to isolate jobs and so the credentials supplied by AWX/Tower will not flow to the jump host configured with ProxyCommand. 332 | 333 | [This can be avoided disabling job isolation in AWX/Tower](https://docs.ansible.com/ansible-tower/latest/html/administration/tipsandtricks.html#setting-up-a-jump-host-to-use-with-tower) 334 | 335 | Please notice that *job isolation* can be configured system wise but not only for the HE deploy job and so it's not a recommended practice on production environments. 336 | 337 | Deployment time improvements 338 | --- 339 | 340 | To significantly reduce the amount of time it takes to deploy a hosted engine __over a remote host__, add the following lines to `/etc/ansible/ansible.cfg` under the `[ssh_connection]` section: 341 | 342 | ``` 343 | ssh_args = -C -o ControlMaster=auto -o ControlPersist=30m 344 | control_path_dir = /root/cp 345 | control_path = %(directory)s/%%h-%%r 346 | pipelining = True 347 | ``` 348 | 349 | Make changes in the engine VM during the deployment 350 | --- 351 | In some cases, a user may want to make adjustments to the engine VM 352 | during the deployment process. There are 2 ways to do that: 353 | 354 | **Automatic:** 355 | 356 | Write ansible playbooks that will run on the engine VM before or after the engine VM installation. 357 | 358 | Add the playbooks that will run __before__ the engine setup to 359 | ```hooks/enginevm_before_engine_setup``` and the playbooks that will run __after__ the engine setup to ```hooks/enginevm_after_engine_setup```. 360 | 361 | These playbooks will be consumed automatically by the role when you execute it. 362 | 363 | **Manual:** 364 | 365 | To make manual adjustments you can set the variable ```he_pause_host``` to true. This will pause the deployment after the engine has been setup and create a lock-file at /tmp that ends with ```_he_setup_lock``` on the machine the role was executed on. The deployment will continue after deleting the lock-file, or after 24 hours ( if the lock-file hasn't been removed ). 366 | 367 | In order to proceed with the deployment, before deleting the lock-file, make sure that the host is on 'up' state at the engine's URL. 368 | 369 | Both of the lock-file path and the engine's URL will be presented during the role execution. 370 | 371 | Demo 372 | ---- 373 | Here a demo showing a deployment on NFS configuring the engine VM with static IP. 374 | [![asciicast](https://asciinema.org/a/205639.png)](https://asciinema.org/a/205639) 375 | 376 | # License 377 | 378 | Apache License 2.0 379 | -------------------------------------------------------------------------------- /automation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | distros: 3 | - fc30 4 | - el8 5 | release_branches: 6 | master: ["ovirt-master"] 7 | -------------------------------------------------------------------------------- /automation/README.md: -------------------------------------------------------------------------------- 1 | Continuous Integration Scripts 2 | ============================== 3 | 4 | This directory contains scripts for Continuous Integration provided by 5 | [oVirt Jenkins](http://jenkins.ovirt.org/) 6 | system and follows the standard defined in 7 | [Build and test standards](http://www.ovirt.org/CI/Build_and_test_standards) 8 | wiki page. 9 | -------------------------------------------------------------------------------- /automation/build-artifacts-manual.sh: -------------------------------------------------------------------------------- 1 | build-artifacts.sh -------------------------------------------------------------------------------- /automation/build-artifacts.packages: -------------------------------------------------------------------------------- 1 | yum-utils 2 | ansible 3 | git 4 | -------------------------------------------------------------------------------- /automation/build-artifacts.repos.el8: -------------------------------------------------------------------------------- 1 | # Provides ansible, ansible-lint, yamllint 2 | el8collection,https://copr-be.cloud.fedoraproject.org/results/sbonazzo/EL8_collection/epel-8-x86_64/ 3 | -------------------------------------------------------------------------------- /automation/build-artifacts.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -xe 2 | 3 | # Remove any previous artifacts 4 | rm -rf output 5 | rm -f ./*tar.gz 6 | 7 | # Run linters if available 8 | if [ -x "$(command -v ansible-lint)" ] ; then 9 | ansible-lint . 10 | else 11 | echo "Skipping ansible-lint because it's not available" 12 | fi 13 | if [ -x "$(command -v yamllint)" ] ; then 14 | yamllint . 15 | else 16 | echo "Skipping yamllint because it's not available" 17 | fi 18 | 19 | # Get the tarball 20 | ./build.sh dist 21 | 22 | # Create the src.rpm 23 | rpmbuild \ 24 | -D "_srcrpmdir $PWD/output" \ 25 | -D "_topmdir $PWD/rpmbuild" \ 26 | -ts ./*.gz 27 | 28 | # Install any build requirements 29 | yum-builddep output/*src.rpm 30 | 31 | # Create the rpms 32 | rpmbuild \ 33 | -D "_rpmdir $PWD/output" \ 34 | -D "_topmdir $PWD/rpmbuild" \ 35 | --rebuild output/*.src.rpm 36 | 37 | # Store any relevant artifacts in exported-artifacts for the ci system to 38 | # archive 39 | [[ -d exported-artifacts ]] || mkdir -p exported-artifacts 40 | find output -iname \*rpm -exec mv "{}" exported-artifacts/ \; 41 | mv ./*tar.gz exported-artifacts/ 42 | -------------------------------------------------------------------------------- /automation/check-patch.packages: -------------------------------------------------------------------------------- 1 | yum-utils 2 | git 3 | ansible 4 | ansible-lint 5 | yamllint 6 | -------------------------------------------------------------------------------- /automation/check-patch.repos.el8: -------------------------------------------------------------------------------- 1 | # Provides ansible, ansible-lint, yamllint 2 | el8collection,https://copr-be.cloud.fedoraproject.org/results/sbonazzo/EL8_collection/epel-8-x86_64/ 3 | -------------------------------------------------------------------------------- /automation/check-patch.sh: -------------------------------------------------------------------------------- 1 | build-artifacts.sh -------------------------------------------------------------------------------- /build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | VERSION="1.1.9" 4 | MILESTONE=master 5 | RPM_RELEASE="0.1.$MILESTONE.$(date -u +%Y%m%d%H%M%S).git$(git rev-parse --short HEAD)" 6 | 7 | ROLE_NAME="ovirt.hosted_engine_setup" 8 | PACKAGE_NAME="ovirt-ansible-hosted-engine-setup" 9 | PREFIX=/usr/local 10 | DATAROOT_DIR=$PREFIX/share 11 | ROLES_DATAROOT_DIR=$DATAROOT_DIR/ansible/roles 12 | DOC_DIR=$DATAROOT_DIR/doc 13 | PKG_DATA_DIR=${PKG_DATA_DIR:-$ROLES_DATAROOT_DIR/$PACKAGE_NAME} 14 | PKG_DOC_DIR=${PKG_DOC_DIR:-$DOC_DIR/$PACKAGE_NAME} 15 | ROLENAME_LEGACY="${ROLENAME_LEGACY:-$ROLES_DATAROOT_DIR/ovirt.hosted-engine-setup}" 16 | ROLENAME_LEGACY_UPPERCASE="${ROLENAME_LEGACY_UPPERCASE:-$ROLES_DATAROOT_DIR/oVirt.hosted-engine-setup}" 17 | 18 | RPM_VERSION=$VERSION 19 | PACKAGE_VERSION=$VERSION 20 | [ -n "$MILESTONE" ] && PACKAGE_VERSION+="_$MILESTONE" 21 | DISPLAY_VERSION=$PACKAGE$VERSION 22 | 23 | TARBALL="$PACKAGE_NAME-$PACKAGE_VERSION.tar.gz" 24 | 25 | dist() { 26 | echo "Creating tar archive '$TARBALL' ... " 27 | sed \ 28 | -e "s|@RPM_VERSION@|$RPM_VERSION|g" \ 29 | -e "s|@RPM_RELEASE@|$RPM_RELEASE|g" \ 30 | -e "s|@PACKAGE_NAME@|$PACKAGE_NAME|g" \ 31 | -e "s|@PACKAGE_VERSION@|$PACKAGE_VERSION|g" \ 32 | < ovirt-ansible-hosted-engine-setup.spec.in > ovirt-ansible-hosted-engine-setup.spec 33 | 34 | git ls-files | tar --files-from /proc/self/fd/0 -czf "$TARBALL" ovirt-ansible-hosted-engine-setup.spec 35 | echo "tar archive '$TARBALL' created." 36 | } 37 | 38 | install() { 39 | echo "Installing data..." 40 | mkdir -p $PKG_DATA_DIR 41 | mkdir -p $PKG_DOC_DIR 42 | 43 | # Create a symlink, so legacy role name does work: 44 | ln -f -s $PKG_DATA_DIR_ORIG $ROLENAME_LEGACY 45 | 46 | # Create a symlink, so legacy role name does work with upper case: 47 | ln -f -s $PKG_DATA_DIR_ORIG $ROLENAME_LEGACY_UPPERCASE 48 | 49 | cp -pR defaults/ $PKG_DATA_DIR 50 | cp -pR examples/ $PKG_DATA_DIR 51 | cp -pR handlers/ $PKG_DATA_DIR 52 | cp -pR hooks/ $PKG_DATA_DIR 53 | cp -pR library/ $PKG_DATA_DIR 54 | cp -pR meta/ $PKG_DATA_DIR 55 | cp -pR tasks/ $PKG_DATA_DIR 56 | cp -pR templates/ $PKG_DATA_DIR 57 | cp -pR test_plugins/ $PKG_DATA_DIR 58 | 59 | echo "Installation done." 60 | } 61 | 62 | $1 63 | -------------------------------------------------------------------------------- /defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Default vars 3 | # Do not change these variables 4 | # Changes in this section are NOT supported 5 | 6 | he_cmd_lang: 7 | LANGUAGE: en_US.UTF-8 8 | LANG: en_US.UTF-8 9 | LC_MESSAGES: en_US.UTF-8 10 | LC_ALL: en_US.UTF-8 11 | 12 | he_vm_name: HostedEngine 13 | he_data_center: Default 14 | he_cluster: Default 15 | he_local_vm_dir_path: /var/tmp 16 | he_local_vm_dir_prefix: localvm 17 | he_appliance_ova: '' 18 | he_root_ssh_pubkey: '' 19 | he_root_ssh_access: 'yes' 20 | he_apply_openscap_profile: false 21 | he_cdrom: '' 22 | he_console_type: vnc 23 | he_video_device: vga 24 | he_graphic_device: vnc 25 | he_emulated_machine: pc 26 | he_minimal_mem_size_MB: 4096 27 | he_minimal_disk_size_GB: 50 28 | he_mgmt_network: ovirtmgmt 29 | he_storage_domain_name: hosted_storage 30 | he_ansible_host_name: localhost 31 | he_ipv4_subnet_prefix: "192.168.222" 32 | he_ipv6_subnet_prefix: fd00:1234:5678:900 33 | he_webui_forward_port: 6900 # by default already open for VM console 34 | he_reserved_memory_MB: 512 35 | he_avail_memory_grace_MB: 200 36 | 37 | he_host_ip: null 38 | he_host_name: null 39 | he_host_address: null 40 | he_cloud_init_host_name: null 41 | he_cloud_init_domain_name: null 42 | 43 | he_smtp_port: 25 44 | he_smtp_server: localhost 45 | he_dest_email: root@localhost 46 | he_source_email: root@localhost 47 | 48 | he_force_ip4: false 49 | he_force_ip6: false 50 | 51 | he_pause_host: false 52 | he_debug_mode: false 53 | 54 | ## Mandatory variables: 55 | 56 | he_bridge_if: null 57 | he_fqdn: null 58 | he_mem_size_MB: max 59 | he_vcpus: max 60 | he_disk_size_GB: 61 61 | 62 | he_enable_libgfapi: false 63 | he_enable_hc_gluster_service: false 64 | he_vm_mac_addr: null 65 | he_remove_appliance_rpm: true 66 | he_pki_renew_on_restore: false 67 | 68 | ## Storage domain vars: 69 | he_domain_type: null # can be: nfs | iscsi | glusterfs | fc 70 | he_storage_domain_addr: null 71 | 72 | ## NFS vars: 73 | ## Defaults are null, user should specify if NFS is chosen 74 | he_mount_options: '' 75 | he_storage_domain_path: null 76 | he_nfs_version: auto # can be: auto, v4 or v3 77 | he_storage_if: null 78 | 79 | ## ISCSI vars: 80 | ## Defaults are null, user should specify if ISCSI is chosen 81 | he_iscsi_username: null 82 | he_iscsi_password: null 83 | he_iscsi_discover_username: null 84 | he_iscsi_discover_password: null 85 | he_iscsi_target: null 86 | he_lun_id: null 87 | he_iscsi_portal_port: null 88 | he_iscsi_portal_addr: null 89 | he_iscsi_tpgt: null 90 | he_discard: false 91 | 92 | # Define if using STATIC ip configuration 93 | he_vm_ip_addr: null 94 | he_vm_ip_prefix: null 95 | he_dns_addr: null # up to 3 DNS servers IPs can be added 96 | he_vm_etc_hosts: false # user can add lines to /etc/hosts on the engine VM 97 | he_default_gateway: null 98 | he_network_test: 'dns' # can be: 'dns', 'ping', 'tcp' or 'none' 99 | he_tcp_t_address: null 100 | he_tcp_t_port: null 101 | 102 | # ovirt-hosted-engine-setup variables 103 | he_just_collect_network_interfaces: false 104 | he_libvirt_authfile: '/etc/ovirt-hosted-engine/virsh_auth.conf' 105 | he_offline_deployment: false 106 | he_additional_package_list: [] 107 | 108 | # *** Do Not Use On Production Environment *** 109 | # ********** Used for testing ONLY *********** 110 | he_requirements_check_enabled: true 111 | he_memory_requirements_check_enabled: true 112 | -------------------------------------------------------------------------------- /examples/hosted_engine_deploy_localhost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy oVirt hosted engine 3 | hosts: localhost 4 | connection: local 5 | roles: 6 | - role: ovirt.hosted_engine_setup 7 | -------------------------------------------------------------------------------- /examples/hosted_engine_deploy_remotehost.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy oVirt hosted engine 3 | hosts: host123.localdomain 4 | roles: 5 | - role: ovirt.hosted_engine_setup 6 | -------------------------------------------------------------------------------- /examples/iscsi_deployment_remote.json: -------------------------------------------------------------------------------- 1 | { 2 | "he_bridge_if": "eth0", 3 | "he_fqdn": "he-engine.example.com", 4 | "he_vm_ip_addr": "192.168.1.214", 5 | "he_vm_ip_prefix": "24", 6 | "he_gateway": "192.168.1.1", 7 | "he_dns_addr": "192.168.1.1", 8 | "he_vm_etc_hosts": true, 9 | "he_vm_mac_addr": "00:a5:3f:66:ba:12", 10 | "he_domain_type": "iscsi", 11 | "he_storage_domain_addr": "192.168.1.125", 12 | "he_iscsi_portal_port": "3260", 13 | "he_iscsi_tpgt": "1", 14 | "he_iscsi_target": "iqn.2017-10.com.redhat.stirabos:he", 15 | "he_lun_id": "36589cfc000000e8a909165bdfb47b3d9", 16 | "he_mem_size_MB": "4096", 17 | "he_ansible_host_name": "host123.localdomain" 18 | } 19 | -------------------------------------------------------------------------------- /examples/nfs_deployment.json: -------------------------------------------------------------------------------- 1 | { 2 | "he_bridge_if": "eth0", 3 | "he_fqdn": "he-engine.example.com", 4 | "he_vm_mac_addr": "00:a5:3f:66:ba:12", 5 | "he_domain_type": "nfs", 6 | "he_storage_domain_addr": "192.168.100.50", 7 | "he_storage_domain_path": "/var/nfs_folder" 8 | } -------------------------------------------------------------------------------- /examples/passwords.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # As an example this file is keep in plaintext, if you want to 3 | # encrypt this file, please execute following command: 4 | # 5 | # $ ansible-vault encrypt passwords.yml 6 | # 7 | # It will ask you for a password, which you must then pass to 8 | # ansible interactively when executing the playbook. 9 | # 10 | # $ ansible-playbook myplaybook.yml --ask-vault-pass 11 | # 12 | he_appliance_password: 123456 13 | he_admin_password: 123456 14 | -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/oVirt/ovirt-ansible-hosted-engine-setup/7e18a26b9584eabf4732d4476bec4cdbcac7f24f/handlers/main.yml -------------------------------------------------------------------------------- /hooks/after_setup/README.md: -------------------------------------------------------------------------------- 1 | # USAGE 2 | 3 | Place here playbooks to be executed after hosted-engine-setup finishes. -------------------------------------------------------------------------------- /hooks/after_setup/add_host_storage_domain.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Include Host vars 3 | include_vars: "{{ file_item }}" 4 | with_fileglob: "/usr/share/ovirt-hosted-engine-setup/gdeploy-inventory.yml" 5 | loop_control: 6 | loop_var: file_item 7 | - debug: var=gluster 8 | 9 | - name: Set Engine public key as authorized key without validating the TLS/SSL certificates 10 | connection: ssh 11 | authorized_key: 12 | user: root 13 | state: present 14 | key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY 15 | validate_certs: false 16 | delegate_to: "{{ host }}" 17 | with_items: "{{ gluster.hosts }}" 18 | loop_control: 19 | loop_var: host 20 | when: "gluster is defined and 'hosts' in gluster" 21 | 22 | - name: Add additional gluster hosts to engine 23 | async: 50 24 | poll: 0 25 | ignore_errors: true 26 | ovirt_host: 27 | cluster: "{{ he_cluster }}" 28 | name: "{{ host }}" 29 | address: "{{ host }}" 30 | state: present 31 | public_key: true 32 | auth: "{{ ovirt_auth }}" 33 | hosted_engine: deploy 34 | with_items: "{{ gluster.hosts }}" 35 | loop_control: 36 | loop_var: host 37 | when: "gluster is defined and 'hosts' in gluster and gluster.hosts | length > 1" 38 | 39 | - name: "Add additional glusterfs storage domains" 40 | ignore_errors: true 41 | ovirt_storage_domain: 42 | name: "{{ sd.name }}" 43 | host: "{{ he_host_name }}" 44 | auth: "{{ ovirt_auth }}" 45 | data_center: "{{ datacenter_name }}" 46 | glusterfs: 47 | address: "{{ he_storage_domain_addr }}" 48 | mount_options: "{{ sd.mount_options }}" 49 | path: "{{ sd.path }}" 50 | with_items: "{{ gluster.vars.storage_domains }}" 51 | loop_control: 52 | loop_var: sd 53 | when: "gluster is defined and 'hosts' in gluster and 'vars' in gluster" 54 | -------------------------------------------------------------------------------- /hooks/enginevm_after_engine_setup/README.md: -------------------------------------------------------------------------------- 1 | # USAGE 2 | 3 | Place here playbooks to be executed on the engine VM after engine-setup finishes. -------------------------------------------------------------------------------- /hooks/enginevm_before_engine_setup/README.md: -------------------------------------------------------------------------------- 1 | # USAGE 2 | 3 | Place here playbooks to be executed on the engine VM before engine-setup starts. -------------------------------------------------------------------------------- /library/ovirt_disk_28.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | # 4 | # Copyright (c) 2016 Red Hat, Inc. 5 | # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) 6 | # 7 | # This is an override of the corresponding module in Ansible and should be removed once we upgrade to Ansible 2.8 8 | 9 | ANSIBLE_METADATA = {'metadata_version': '1.1', 10 | 'status': ['preview'], 11 | 'supported_by': 'community'} 12 | 13 | 14 | DOCUMENTATION = ''' 15 | --- 16 | module: ovirt_disk 17 | short_description: "Module to manage Virtual Machine and floating disks in oVirt/RHV" 18 | version_added: "2.2" 19 | author: "Ondra Machacek (@machacekondra)" 20 | description: 21 | - "Module to manage Virtual Machine and floating disks in oVirt/RHV." 22 | options: 23 | id: 24 | description: 25 | - "ID of the disk to manage. Either C(id) or C(name) is required." 26 | name: 27 | description: 28 | - "Name of the disk to manage. Either C(id) or C(name)/C(alias) is required." 29 | aliases: ['alias'] 30 | description: 31 | description: 32 | - "Description of the disk image to manage." 33 | version_added: "2.5" 34 | vm_name: 35 | description: 36 | - "Name of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)." 37 | vm_id: 38 | description: 39 | - "ID of the Virtual Machine to manage. Either C(vm_id) or C(vm_name) is required if C(state) is I(attached) or I(detached)." 40 | state: 41 | description: 42 | - "Should the Virtual Machine disk be present/absent/attached/detached." 43 | choices: ['present', 'absent', 'attached', 'detached'] 44 | default: 'present' 45 | download_image_path: 46 | description: 47 | - "Path on a file system where disk should be downloaded." 48 | - "Note that you must have an valid oVirt/RHV engine CA in your system trust store 49 | or you must provide it in C(ca_file) parameter." 50 | - "Note that the disk is not downloaded when the file already exists, 51 | but you can forcibly download the disk when using C(force) I (true)." 52 | version_added: "2.3" 53 | upload_image_path: 54 | description: 55 | - "Path to disk image, which should be uploaded." 56 | - "Note that currently we support only compatibility version 0.10 of the qcow disk." 57 | - "Note that you must have an valid oVirt/RHV engine CA in your system trust store 58 | or you must provide it in C(ca_file) parameter." 59 | - "Note that there is no reliable way to achieve idempotency, so 60 | if you want to upload the disk even if the disk with C(id) or C(name) exists, 61 | then please use C(force) I(true). If you will use C(force) I(false), which 62 | is default, then the disk image won't be uploaded." 63 | version_added: "2.3" 64 | size: 65 | description: 66 | - "Size of the disk. Size should be specified using IEC standard units. 67 | For example 10GiB, 1024MiB, etc." 68 | - "Size can be only increased, not decreased." 69 | interface: 70 | description: 71 | - "Driver of the storage interface." 72 | - "It's required parameter when creating the new disk." 73 | choices: ['virtio', 'ide', 'virtio_scsi'] 74 | default: 'virtio' 75 | format: 76 | description: 77 | - Specify format of the disk. 78 | - Note that this option isn't idempotent as it's not currently possible to change format of the disk via API. 79 | choices: ['raw', 'cow'] 80 | content_type: 81 | description: 82 | - Specify if the disk is a data disk or ISO image or a one of a the Hosted Engine disk types 83 | - The Hosted Engine disk content types are available with Engine 4.3+ and Ansible 2.8 84 | choices: ['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] 85 | default: 'data' 86 | version_added: "2.8" 87 | sparse: 88 | required: False 89 | type: bool 90 | version_added: "2.5" 91 | description: 92 | - "I(True) if the disk should be sparse (also known as I(thin provision)). 93 | If the parameter is omitted, cow disks will be created as sparse and raw disks as I(preallocated)" 94 | - Note that this option isn't idempotent as it's not currently possible to change sparseness of the disk via API. 95 | storage_domain: 96 | description: 97 | - "Storage domain name where disk should be created. By default storage is chosen by oVirt/RHV engine." 98 | storage_domains: 99 | description: 100 | - "Storage domain names where disk should be copied." 101 | - "C(**IMPORTANT**)" 102 | - "There is no reliable way to achieve idempotency, so every time 103 | you specify this parameter the disks are copied, so please handle 104 | your playbook accordingly to not copy the disks all the time. This 105 | is valid only for VM and floating disks, template disks works 106 | as expected." 107 | version_added: "2.3" 108 | force: 109 | description: 110 | - "Please take a look at C(image_path) documentation to see the correct 111 | usage of this parameter." 112 | version_added: "2.3" 113 | type: bool 114 | profile: 115 | description: 116 | - "Disk profile name to be attached to disk. By default profile is chosen by oVirt/RHV engine." 117 | quota_id: 118 | description: 119 | - "Disk quota ID to be used for disk. By default quota is chosen by oVirt/RHV engine." 120 | version_added: "2.5" 121 | bootable: 122 | description: 123 | - "I(True) if the disk should be bootable. By default when disk is created it isn't bootable." 124 | type: bool 125 | shareable: 126 | description: 127 | - "I(True) if the disk should be shareable. By default when disk is created it isn't shareable." 128 | type: bool 129 | logical_unit: 130 | description: 131 | - "Dictionary which describes LUN to be directly attached to VM:" 132 | - "C(address) - Address of the storage server. Used by iSCSI." 133 | - "C(port) - Port of the storage server. Used by iSCSI." 134 | - "C(target) - iSCSI target." 135 | - "C(lun_id) - LUN id." 136 | - "C(username) - CHAP Username to be used to access storage server. Used by iSCSI." 137 | - "C(password) - CHAP Password of the user to be used to access storage server. Used by iSCSI." 138 | - "C(storage_type) - Storage type either I(fcp) or I(iscsi)." 139 | sparsify: 140 | description: 141 | - "I(True) if the disk should be sparsified." 142 | - "Sparsification frees space in the disk image that is not used by 143 | its filesystem. As a result, the image will occupy less space on 144 | the storage." 145 | - "Note that this parameter isn't idempotent, as it's not possible 146 | to check if the disk should be or should not be sparsified." 147 | version_added: "2.4" 148 | type: bool 149 | openstack_volume_type: 150 | description: 151 | - "Name of the openstack volume type. This is valid when working 152 | with cinder." 153 | version_added: "2.4" 154 | image_provider: 155 | description: 156 | - "When C(state) is I(exported) disk is exported to given Glance image provider." 157 | - "C(**IMPORTANT**)" 158 | - "There is no reliable way to achieve idempotency, so every time 159 | you specify this parameter the disk is exported, so please handle 160 | your playbook accordingly to not export the disk all the time. 161 | This option is valid only for template disks." 162 | version_added: "2.4" 163 | host: 164 | description: 165 | - "When the hypervisor name is specified the newly created disk or 166 | an existing disk will refresh its information about the 167 | underlying storage( Disk size, Serial, Product ID, Vendor ID ...) 168 | The specified host will be used for gathering the storage 169 | related information. This option is only valid for passthrough 170 | disks. This option requires at least the logical_unit.id to be 171 | specified" 172 | version_added: "2.8" 173 | wipe_after_delete: 174 | description: 175 | - "If the disk's Wipe After Delete is enabled, then the disk is first wiped." 176 | type: bool 177 | activate: 178 | description: 179 | - I(True) if the disk should be activated. 180 | version_added: "2.8" 181 | type: bool 182 | extends_documentation_fragment: ovirt 183 | ''' 184 | 185 | 186 | EXAMPLES = ''' 187 | # Examples don't contain auth parameter for simplicity, 188 | # look at ovirt_auth module to see how to reuse authentication: 189 | # Create and attach new disk to VM 190 | - ovirt_disk: 191 | name: myvm_disk 192 | vm_name: rhel7 193 | size: 10GiB 194 | format: cow 195 | interface: virtio 196 | storage_domain: data 197 | # Attach logical unit to VM rhel7 198 | - ovirt_disk: 199 | vm_name: rhel7 200 | logical_unit: 201 | target: iqn.2016-08-09.brq.str-01:omachace 202 | id: 1IET_000d0001 203 | address: 10.34.63.204 204 | interface: virtio 205 | # Detach disk from VM 206 | - ovirt_disk: 207 | state: detached 208 | name: myvm_disk 209 | vm_name: rhel7 210 | size: 10GiB 211 | format: cow 212 | interface: virtio 213 | # Change Disk Name 214 | - ovirt_disk: 215 | id: 00000000-0000-0000-0000-000000000000 216 | storage_domain: data 217 | name: "new_disk_name" 218 | vm_name: rhel7 219 | # Upload local image to disk and attach it to vm: 220 | # Since Ansible 2.3 221 | - ovirt_disk: 222 | name: mydisk 223 | vm_name: myvm 224 | interface: virtio 225 | size: 10GiB 226 | format: cow 227 | image_path: /path/to/mydisk.qcow2 228 | storage_domain: data 229 | # Download disk to local file system: 230 | # Since Ansible 2.3 231 | - ovirt_disk: 232 | id: 7de90f31-222c-436c-a1ca-7e655bd5b60c 233 | download_image_path: /home/user/mydisk.qcow2 234 | # Export disk as image to Glance domain 235 | # Since Ansible 2.4 236 | - ovirt_disks: 237 | id: 7de90f31-222c-436c-a1ca-7e655bd5b60c 238 | image_provider: myglance 239 | state: exported 240 | # Defining a specific quota while creating a disk image: 241 | # Since Ansible 2.5 242 | - ovirt_quotas_facts: 243 | data_center: Default 244 | name: myquota 245 | - ovirt_disk: 246 | name: mydisk 247 | size: 10GiB 248 | storage_domain: data 249 | description: somedescriptionhere 250 | quota_id: "{{ ovirt_quotas[0]['id'] }}" 251 | # Upload an ISO image 252 | # Since Ansible 2.8 253 | - ovirt_disk: 254 | name: myiso 255 | upload_image_path: /path/to/iso/image 256 | storage_domain: data 257 | size: 4 GiB 258 | wait: true 259 | bootable: true 260 | format: raw 261 | content_type: iso 262 | ''' 263 | 264 | 265 | RETURN = ''' 266 | id: 267 | description: "ID of the managed disk" 268 | returned: "On success if disk is found." 269 | type: str 270 | sample: 7de90f31-222c-436c-a1ca-7e655bd5b60c 271 | disk: 272 | description: "Dictionary of all the disk attributes. Disk attributes can be found on your oVirt/RHV instance 273 | at following url: http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk." 274 | returned: "On success if disk is found and C(vm_id) or C(vm_name) wasn't passed." 275 | type: dict 276 | disk_attachment: 277 | description: "Dictionary of all the disk attachment attributes. Disk attachment attributes can be found 278 | on your oVirt/RHV instance at following url: 279 | http://ovirt.github.io/ovirt-engine-api-model/master/#types/disk_attachment." 280 | returned: "On success if disk is found and C(vm_id) or C(vm_name) was passed and VM was found." 281 | type: dict 282 | ''' 283 | 284 | import os 285 | import time 286 | import traceback 287 | import ssl 288 | 289 | from ansible.module_utils.six.moves.http_client import HTTPSConnection, IncompleteRead 290 | from ansible.module_utils.six.moves.urllib.parse import urlparse 291 | try: 292 | import ovirtsdk4.types as otypes 293 | except ImportError: 294 | pass 295 | from ansible.module_utils.basic import AnsibleModule 296 | from ansible.module_utils.ovirt import ( 297 | BaseModule, 298 | check_sdk, 299 | check_params, 300 | create_connection, 301 | convert_to_bytes, 302 | equal, 303 | follow_link, 304 | get_id_by_name, 305 | ovirt_full_argument_spec, 306 | search_by_name, 307 | wait, 308 | ) 309 | 310 | 311 | def _search_by_lun(disks_service, lun_id): 312 | """ 313 | Find disk by LUN ID. 314 | """ 315 | res = [ 316 | disk for disk in disks_service.list(search='disk_type=lun') if ( 317 | disk.lun_storage.id == lun_id 318 | ) 319 | ] 320 | return res[0] if res else None 321 | 322 | 323 | def transfer(connection, module, direction, transfer_func): 324 | transfers_service = connection.system_service().image_transfers_service() 325 | transfer = transfers_service.add( 326 | otypes.ImageTransfer( 327 | image=otypes.Image( 328 | id=module.params['id'], 329 | ), 330 | direction=direction, 331 | ) 332 | ) 333 | transfer_service = transfers_service.image_transfer_service(transfer.id) 334 | 335 | try: 336 | # After adding a new transfer for the disk, the transfer's status will be INITIALIZING. 337 | # Wait until the init phase is over. The actual transfer can start when its status is "Transferring". 338 | while transfer.phase == otypes.ImageTransferPhase.INITIALIZING: 339 | time.sleep(module.params['poll_interval']) 340 | transfer = transfer_service.get() 341 | 342 | proxy_url = urlparse(transfer.proxy_url) 343 | context = ssl.create_default_context() 344 | auth = module.params['auth'] 345 | if auth.get('insecure'): 346 | context.check_hostname = False 347 | context.verify_mode = ssl.CERT_NONE 348 | elif auth.get('ca_file'): 349 | context.load_verify_locations(cafile=auth.get('ca_file')) 350 | 351 | proxy_connection = HTTPSConnection( 352 | proxy_url.hostname, 353 | proxy_url.port, 354 | context=context, 355 | ) 356 | 357 | transfer_func( 358 | transfer_service, 359 | proxy_connection, 360 | proxy_url, 361 | transfer.signed_ticket 362 | ) 363 | return True 364 | finally: 365 | transfer_service.finalize() 366 | while transfer.phase in [ 367 | otypes.ImageTransferPhase.TRANSFERRING, 368 | otypes.ImageTransferPhase.FINALIZING_SUCCESS, 369 | ]: 370 | time.sleep(module.params['poll_interval']) 371 | transfer = transfer_service.get() 372 | if transfer.phase in [ 373 | otypes.ImageTransferPhase.UNKNOWN, 374 | otypes.ImageTransferPhase.FINISHED_FAILURE, 375 | otypes.ImageTransferPhase.FINALIZING_FAILURE, 376 | otypes.ImageTransferPhase.CANCELLED, 377 | ]: 378 | raise Exception( 379 | "Error occurred while uploading image. The transfer is in %s" % transfer.phase 380 | ) 381 | if module.params.get('logical_unit'): 382 | disks_service = connection.system_service().disks_service() 383 | wait( 384 | service=disks_service.service(module.params['id']), 385 | condition=lambda d: d.status == otypes.DiskStatus.OK, 386 | wait=module.params['wait'], 387 | timeout=module.params['timeout'], 388 | ) 389 | 390 | 391 | def download_disk_image(connection, module): 392 | def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket): 393 | BUF_SIZE = 128 * 1024 394 | transfer_headers = { 395 | 'Authorization': transfer_ticket, 396 | } 397 | proxy_connection.request( 398 | 'GET', 399 | proxy_url.path, 400 | headers=transfer_headers, 401 | ) 402 | r = proxy_connection.getresponse() 403 | path = module.params["download_image_path"] 404 | image_size = int(r.getheader('Content-Length')) 405 | with open(path, "wb") as mydisk: 406 | pos = 0 407 | while pos < image_size: 408 | to_read = min(image_size - pos, BUF_SIZE) 409 | chunk = r.read(to_read) 410 | if not chunk: 411 | raise RuntimeError("Socket disconnected") 412 | mydisk.write(chunk) 413 | pos += len(chunk) 414 | 415 | return transfer( 416 | connection, 417 | module, 418 | otypes.ImageTransferDirection.DOWNLOAD, 419 | transfer_func=_transfer, 420 | ) 421 | 422 | 423 | def upload_disk_image(connection, module): 424 | def _transfer(transfer_service, proxy_connection, proxy_url, transfer_ticket): 425 | BUF_SIZE = 128 * 1024 426 | path = module.params['upload_image_path'] 427 | 428 | image_size = os.path.getsize(path) 429 | proxy_connection.putrequest("PUT", proxy_url.path) 430 | proxy_connection.putheader('Content-Length', "%d" % (image_size,)) 431 | proxy_connection.endheaders() 432 | with open(path, "rb") as disk: 433 | pos = 0 434 | while pos < image_size: 435 | to_read = min(image_size - pos, BUF_SIZE) 436 | chunk = disk.read(to_read) 437 | if not chunk: 438 | transfer_service.pause() 439 | raise RuntimeError("Unexpected end of file at pos=%d" % pos) 440 | proxy_connection.send(chunk) 441 | pos += len(chunk) 442 | 443 | return transfer( 444 | connection, 445 | module, 446 | otypes.ImageTransferDirection.UPLOAD, 447 | transfer_func=_transfer, 448 | ) 449 | 450 | 451 | class DisksModule(BaseModule): 452 | 453 | def build_entity(self): 454 | logical_unit = self._module.params.get('logical_unit') 455 | disk = otypes.Disk( 456 | id=self._module.params.get('id'), 457 | name=self._module.params.get('name'), 458 | description=self._module.params.get('description'), 459 | format=otypes.DiskFormat( 460 | self._module.params.get('format') 461 | ) if self._module.params.get('format') else None, 462 | content_type=otypes.DiskContentType( 463 | self._module.params.get('content_type') 464 | ) if self._module.params.get('content_type') else None, 465 | sparse=self._module.params.get( 466 | 'sparse' 467 | ) if self._module.params.get( 468 | 'sparse' 469 | ) is not None else self._module.params.get('format') != 'raw', 470 | openstack_volume_type=otypes.OpenStackVolumeType( 471 | name=self.param('openstack_volume_type') 472 | ) if self.param('openstack_volume_type') else None, 473 | provisioned_size=convert_to_bytes( 474 | self._module.params.get('size') 475 | ), 476 | storage_domains=[ 477 | otypes.StorageDomain( 478 | name=self._module.params.get('storage_domain'), 479 | ), 480 | ], 481 | quota=otypes.Quota(id=self._module.params.get('quota_id')) if self.param('quota_id') else None, 482 | shareable=self._module.params.get('shareable'), 483 | wipe_after_delete=self.param('wipe_after_delete'), 484 | lun_storage=otypes.HostStorage( 485 | type=otypes.StorageType( 486 | logical_unit.get('storage_type', 'iscsi') 487 | ), 488 | logical_units=[ 489 | otypes.LogicalUnit( 490 | address=logical_unit.get('address'), 491 | port=logical_unit.get('port', 3260), 492 | target=logical_unit.get('target'), 493 | id=logical_unit.get('id'), 494 | username=logical_unit.get('username'), 495 | password=logical_unit.get('password'), 496 | ) 497 | ], 498 | ) if logical_unit else None, 499 | ) 500 | if hasattr(disk, 'initial_size') and self._module.params['upload_image_path']: 501 | disk.initial_size = convert_to_bytes( 502 | self._module.params.get('size') 503 | ) 504 | 505 | return disk 506 | 507 | def update_storage_domains(self, disk_id): 508 | changed = False 509 | disk_service = self._service.service(disk_id) 510 | disk = disk_service.get() 511 | sds_service = self._connection.system_service().storage_domains_service() 512 | 513 | # We don't support move© for non file based storages: 514 | if disk.storage_type != otypes.DiskStorageType.IMAGE: 515 | return changed 516 | 517 | # Initiate move: 518 | if self._module.params['storage_domain']: 519 | new_disk_storage_id = get_id_by_name(sds_service, self._module.params['storage_domain']) 520 | changed = self.action( 521 | action='move', 522 | entity=disk, 523 | action_condition=lambda d: new_disk_storage_id != d.storage_domains[0].id, 524 | wait_condition=lambda d: d.status == otypes.DiskStatus.OK, 525 | storage_domain=otypes.StorageDomain( 526 | id=new_disk_storage_id, 527 | ), 528 | post_action=lambda _: time.sleep(self._module.params['poll_interval']), 529 | )['changed'] 530 | 531 | if self._module.params['storage_domains']: 532 | for sd in self._module.params['storage_domains']: 533 | new_disk_storage = search_by_name(sds_service, sd) 534 | changed = changed or self.action( 535 | action='copy', 536 | entity=disk, 537 | action_condition=( 538 | lambda disk: new_disk_storage.id not in [sd.id for sd in disk.storage_domains] 539 | ), 540 | wait_condition=lambda disk: disk.status == otypes.DiskStatus.OK, 541 | storage_domain=otypes.StorageDomain( 542 | id=new_disk_storage.id, 543 | ), 544 | )['changed'] 545 | 546 | return changed 547 | 548 | def _update_check(self, entity): 549 | return ( 550 | equal(self._module.params.get('name'), entity.name) and 551 | equal(self._module.params.get('description'), entity.description) and 552 | equal(self.param('quota_id'), getattr(entity.quota, 'id', None)) and 553 | equal(convert_to_bytes(self._module.params.get('size')), entity.provisioned_size) and 554 | equal(self._module.params.get('shareable'), entity.shareable) and 555 | equal(self.param('wipe_after_delete'), entity.wipe_after_delete) 556 | ) 557 | 558 | 559 | class DiskAttachmentsModule(DisksModule): 560 | 561 | def build_entity(self): 562 | return otypes.DiskAttachment( 563 | disk=super(DiskAttachmentsModule, self).build_entity(), 564 | interface=otypes.DiskInterface( 565 | self._module.params.get('interface') 566 | ) if self._module.params.get('interface') else None, 567 | bootable=self._module.params.get('bootable'), 568 | active=self.param('activate'), 569 | ) 570 | 571 | def update_check(self, entity): 572 | return ( 573 | super(DiskAttachmentsModule, self)._update_check(follow_link(self._connection, entity.disk)) and 574 | equal(self._module.params.get('interface'), str(entity.interface)) and 575 | equal(self._module.params.get('bootable'), entity.bootable) and 576 | equal(self.param('activate'), entity.active) 577 | ) 578 | 579 | 580 | def searchable_attributes(module): 581 | """ 582 | Return all searchable disk attributes passed to module. 583 | """ 584 | attributes = { 585 | 'name': module.params.get('name'), 586 | 'Storage.name': module.params.get('storage_domain'), 587 | 'vm_names': module.params.get('vm_name'), 588 | } 589 | return dict((k, v) for k, v in attributes.items() if v is not None) 590 | 591 | 592 | def main(): 593 | argument_spec = ovirt_full_argument_spec( 594 | state=dict( 595 | choices=['present', 'absent', 'attached', 'detached', 'exported'], 596 | default='present' 597 | ), 598 | id=dict(default=None), 599 | name=dict(default=None, aliases=['alias']), 600 | description=dict(default=None), 601 | vm_name=dict(default=None), 602 | vm_id=dict(default=None), 603 | size=dict(default=None), 604 | interface=dict(default=None,), 605 | storage_domain=dict(default=None), 606 | storage_domains=dict(default=None, type='list'), 607 | profile=dict(default=None), 608 | quota_id=dict(default=None), 609 | format=dict(default='cow', choices=['raw', 'cow']), 610 | content_type=dict( 611 | default='data', 612 | choices=['data', 'iso', 'hosted_engine', 'hosted_engine_sanlock', 'hosted_engine_metadata', 'hosted_engine_configuration'] 613 | ), 614 | sparse=dict(default=None, type='bool'), 615 | bootable=dict(default=None, type='bool'), 616 | shareable=dict(default=None, type='bool'), 617 | logical_unit=dict(default=None, type='dict'), 618 | download_image_path=dict(default=None), 619 | upload_image_path=dict(default=None, aliases=['image_path']), 620 | force=dict(default=False, type='bool'), 621 | sparsify=dict(default=None, type='bool'), 622 | openstack_volume_type=dict(default=None), 623 | image_provider=dict(default=None), 624 | host=dict(default=None), 625 | wipe_after_delete=dict(type='bool', default=None), 626 | activate=dict(default=None, type='bool'), 627 | ) 628 | module = AnsibleModule( 629 | argument_spec=argument_spec, 630 | supports_check_mode=True, 631 | ) 632 | 633 | lun = module.params.get('logical_unit') 634 | host = module.params['host'] 635 | # Fail when host is specified with the LUN id. Lun id is needed to identify 636 | # an existing disk if already available inthe environment. 637 | if (host and lun is None) or (host and lun.get("id") is None): 638 | module.fail_json( 639 | msg="Can not use parameter host ({0!s}) without " 640 | "specifying the logical_unit id".format(host) 641 | ) 642 | 643 | check_sdk(module) 644 | check_params(module) 645 | 646 | try: 647 | disk = None 648 | state = module.params['state'] 649 | auth = module.params.get('auth') 650 | connection = create_connection(auth) 651 | disks_service = connection.system_service().disks_service() 652 | disks_module = DisksModule( 653 | connection=connection, 654 | module=module, 655 | service=disks_service, 656 | ) 657 | 658 | if lun: 659 | disk = _search_by_lun(disks_service, lun.get('id')) 660 | 661 | ret = None 662 | # First take care of creating the VM, if needed: 663 | if state in ('present', 'detached', 'attached'): 664 | ret = disks_module.create( 665 | entity=disk, 666 | search_params=searchable_attributes(module), 667 | result_state=otypes.DiskStatus.OK if lun is None else None, 668 | fail_condition=lambda d: d.status == otypes.DiskStatus.ILLEGAL if lun is None else False, 669 | ) 670 | is_new_disk = ret['changed'] 671 | ret['changed'] = ret['changed'] or disks_module.update_storage_domains(ret['id']) 672 | # We need to pass ID to the module, so in case we want detach/attach disk 673 | # we have this ID specified to attach/detach method: 674 | module.params['id'] = ret['id'] if disk is None else disk.id 675 | 676 | # Upload disk image in case it's new disk or force parameter is passed: 677 | if module.params['upload_image_path'] and (is_new_disk or module.params['force']): 678 | uploaded = upload_disk_image(connection, module) 679 | ret['changed'] = ret['changed'] or uploaded 680 | # Download disk image in case it's file don't exist or force parameter is passed: 681 | if ( 682 | module.params['download_image_path'] and (not os.path.isfile(module.params['download_image_path']) or module.params['force']) 683 | ): 684 | downloaded = download_disk_image(connection, module) 685 | ret['changed'] = ret['changed'] or downloaded 686 | 687 | # Disk sparsify, only if disk is of image type: 688 | if not module.check_mode: 689 | disk = disks_service.disk_service(module.params['id']).get() 690 | if disk.storage_type == otypes.DiskStorageType.IMAGE: 691 | ret = disks_module.action( 692 | action='sparsify', 693 | action_condition=lambda d: module.params['sparsify'], 694 | wait_condition=lambda d: d.status == otypes.DiskStatus.OK, 695 | ) 696 | 697 | # Export disk as image to glance domain 698 | elif state == 'exported': 699 | disk = disks_module.search_entity() 700 | if disk is None: 701 | module.fail_json( 702 | msg="Can not export given disk '%s', it doesn't exist" % 703 | module.params.get('name') or module.params.get('id') 704 | ) 705 | if disk.storage_type == otypes.DiskStorageType.IMAGE: 706 | ret = disks_module.action( 707 | action='export', 708 | action_condition=lambda d: module.params['image_provider'], 709 | wait_condition=lambda d: d.status == otypes.DiskStatus.OK, 710 | storage_domain=otypes.StorageDomain(name=module.params['image_provider']), 711 | ) 712 | elif state == 'absent': 713 | ret = disks_module.remove() 714 | 715 | # If VM was passed attach/detach disks to/from the VM: 716 | if module.params.get('vm_id') is not None or module.params.get('vm_name') is not None and state != 'absent': 717 | vms_service = connection.system_service().vms_service() 718 | 719 | # If `vm_id` isn't specified, find VM by name: 720 | vm_id = module.params['vm_id'] 721 | if vm_id is None: 722 | vm_id = getattr(search_by_name(vms_service, module.params['vm_name']), 'id', None) 723 | 724 | if vm_id is None: 725 | module.fail_json( 726 | msg="VM don't exists, please create it first." 727 | ) 728 | 729 | disk_attachments_service = vms_service.vm_service(vm_id).disk_attachments_service() 730 | disk_attachments_module = DiskAttachmentsModule( 731 | connection=connection, 732 | module=module, 733 | service=disk_attachments_service, 734 | changed=ret['changed'] if ret else False, 735 | ) 736 | 737 | if state == 'present' or state == 'attached': 738 | ret = disk_attachments_module.create() 739 | if lun is None: 740 | wait( 741 | service=disk_attachments_service.service(ret['id']), 742 | condition=lambda d: follow_link(connection, d.disk).status == otypes.DiskStatus.OK, 743 | wait=module.params['wait'], 744 | timeout=module.params['timeout'], 745 | ) 746 | elif state == 'detached': 747 | ret = disk_attachments_module.remove() 748 | 749 | # When the host parameter is specified and the disk is not being 750 | # removed, refresh the information about the LUN. 751 | if state != 'absent' and host: 752 | hosts_service = connection.system_service().hosts_service() 753 | host_id = get_id_by_name(hosts_service, host) 754 | disks_service.disk_service(disk.id).refresh_lun(otypes.Host(id=host_id)) 755 | 756 | module.exit_json(**ret) 757 | except Exception as e: 758 | module.fail_json(msg=str(e), exception=traceback.format_exc()) 759 | finally: 760 | connection.close(logout=auth.get('token') is None) 761 | 762 | 763 | if __name__ == "__main__": 764 | main() 765 | -------------------------------------------------------------------------------- /meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | galaxy_info: 3 | role_name: hosted_engine_setup 4 | author: Ido Rosenzwig, Simone Tiraboschi 5 | description: Role to deploy oVirt Hosted-Engine 6 | company: Red Hat, Inc. 7 | 8 | license: Apache License 2.0 9 | 10 | min_ansible_version: 2.7 11 | 12 | platforms: 13 | - name: EL 14 | versions: 15 | - 7 16 | - name: Fedora 17 | versions: 18 | - all 19 | 20 | galaxy_tags: [ovirt, rhv, rhev, virtualization] 21 | 22 | dependencies: [] 23 | -------------------------------------------------------------------------------- /ovirt-ansible-hosted-engine-setup.spec.in: -------------------------------------------------------------------------------- 1 | %global rolename hosted_engine_setup 2 | %global roleprefix ovirt. 3 | %global legacy_roleprefix oVirt. 4 | %global legacy_rolename hosted-engine-setup 5 | %global ansible_roles_dir ansible/roles 6 | 7 | Name: @PACKAGE_NAME@ 8 | Summary: Ansible role to install required packages for oVirt Engine deployment, generate answerfile and run engine-setup with it. 9 | Version: @RPM_VERSION@ 10 | Release: @RPM_RELEASE@%{?release_suffix}%{?dist} 11 | Source0: http://resources.ovirt.org/pub/src/@PACKAGE_NAME@/@PACKAGE_NAME@-@PACKAGE_VERSION@.tar.gz 12 | License: ASL 2.0 13 | Group: Virtualization/Management 14 | BuildArch: noarch 15 | Url: http://www.ovirt.org 16 | 17 | Requires: ansible >= 2.9.11 18 | %if 0%{?fedora} >= 30 || 0%{?rhel} >= 8 19 | Requires: python3-netaddr 20 | Requires: python3-jmespath 21 | Requires: python3-passlib 22 | %else 23 | Requires: python-netaddr 24 | Requires: python-jmespath 25 | Requires: python-passlib 26 | %endif 27 | Requires: ovirt-ansible-engine-setup >= 1.1.5 28 | 29 | %description 30 | This Ansible role installs required packages for oVirt Hosted-Engine deployment. 31 | 32 | %pretrans -p 33 | -- Remove the legacy directory before installing the symlink. This is known issue in RPM: 34 | -- https://fedoraproject.org/wiki/Packaging:Directory_Replacement 35 | rolename_legacy = "%{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{legacy_rolename}" 36 | rolename_legacy_uppercase="%{_datadir}/%{ansible_roles_dir}/%{legacy_roleprefix}%{legacy_rolename}" 37 | 38 | st1 = posix.stat(rolename_legacy) 39 | if st1 and st1.type == "directory" then 40 | os.execute('rm -rf "'..rolename_legacy..'"') 41 | end 42 | 43 | st2 = posix.stat(rolename_legacy_uppercase) 44 | if st2 and st2.type == "directory" then 45 | os.execute('rm -rf "'..rolename_legacy_uppercase..'"') 46 | end 47 | 48 | %prep 49 | %setup -c -q 50 | 51 | %build 52 | 53 | %install 54 | export PKG_DATA_DIR_ORIG=%{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{rolename} 55 | export PKG_DATA_DIR=%{buildroot}$PKG_DATA_DIR_ORIG 56 | export PKG_DOC_DIR=%{buildroot}%{_pkgdocdir} 57 | export ROLENAME_LEGACY=%{buildroot}%{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{legacy_rolename} 58 | export ROLENAME_LEGACY_UPPERCASE=%{buildroot}%{_datadir}/%{ansible_roles_dir}/%{legacy_roleprefix}%{legacy_rolename} 59 | sh build.sh install 60 | 61 | %files 62 | %{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{rolename} 63 | %{_datadir}/%{ansible_roles_dir}/%{legacy_roleprefix}%{legacy_rolename} 64 | %{_datadir}/%{ansible_roles_dir}/%{roleprefix}%{legacy_rolename} 65 | 66 | # Turn off the brp-python-bytecompile for python2 and python2 67 | %define __os_install_post %{___build_post} 68 | 69 | %doc README.md 70 | %doc examples/ 71 | 72 | %license LICENSE 73 | 74 | %changelog 75 | -------------------------------------------------------------------------------- /tasks/alter_libvirt_default_net_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Parse libvirt default network configuration 3 | virt_net: 4 | command: get_xml 5 | name: default 6 | register: default_net_xml 7 | - debug: var=default_net_xml.stdout 8 | 9 | - name: IPv6 configuration 10 | block: 11 | - name: Remove IPv4 configuration 12 | xml: 13 | xmlstring: "{{ default_net_xml.get_xml }}" 14 | xpath: /network/ip 15 | state: absent 16 | register: editednet_noipv4 17 | - name: Configure it as an isolated network 18 | xml: 19 | xmlstring: "{{ editednet_noipv4.xmlstring }}" 20 | xpath: /network/forward 21 | state: absent 22 | register: editednet_isolated 23 | - name: Edit libvirt default network configuration, set IPv6 address 24 | xml: 25 | xmlstring: "{{ editednet_isolated.xmlstring }}" 26 | xpath: /network/ip[@family='ipv6'] 27 | attribute: address 28 | value: "{{ he_ipv6_subnet_prefix + '::1' }}" 29 | register: editednet1 30 | - name: Edit libvirt default network configuration, set IPv6 prefix 31 | xml: 32 | xmlstring: "{{ editednet1.xmlstring }}" 33 | xpath: /network/ip[@family='ipv6'] 34 | attribute: prefix 35 | value: "64" 36 | register: editednet2 37 | - debug: var=editednet2 38 | - name: Edit libvirt default network configuration, enable DHCPv6 39 | xml: 40 | xmlstring: "{{ editednet2.xmlstring }}" 41 | xpath: /network/ip[@family='ipv6']/dhcp/range 42 | attribute: start 43 | value: "{{ he_ipv6_subnet_prefix + '::10' }}" 44 | register: editednet3 45 | - debug: var=editednet3 46 | - name: Edit libvirt default network configuration, set DHCPv6 range 47 | xml: 48 | xmlstring: "{{ editednet3.xmlstring }}" 49 | xpath: /network/ip[@family='ipv6']/dhcp/range 50 | attribute: end 51 | value: "{{ he_ipv6_subnet_prefix + '::ff' }}" 52 | register: finaledit6 53 | - debug: var=finaledit 54 | when: ipv6_deployment|bool 55 | 56 | - name: IPv4 configuration 57 | block: 58 | - name: Edit libvirt default network configuration, change default address 59 | xml: 60 | xmlstring: "{{ default_net_xml.get_xml }}" 61 | xpath: /network/ip 62 | attribute: address 63 | value: "{{ he_ipv4_subnet_prefix + '.1' }}" 64 | register: editednet1 65 | - name: Edit libvirt default network configuration, change DHCP start range 66 | xml: 67 | xmlstring: "{{ editednet1.xmlstring }}" 68 | xpath: /network/ip/dhcp/range 69 | attribute: start 70 | value: "{{ he_ipv4_subnet_prefix + '.2' }}" 71 | register: editednet2 72 | - name: Edit libvirt default network configuration, change DHCP end range 73 | xml: 74 | xmlstring: "{{ editednet2.xmlstring }}" 75 | xpath: /network/ip/dhcp/range 76 | attribute: end 77 | value: "{{ he_ipv4_subnet_prefix + '.254' }}" 78 | register: finaledit4 79 | when: not ipv6_deployment|bool 80 | 81 | - name: Update libvirt default network configuration, destroy 82 | virt_net: 83 | command: destroy 84 | name: default 85 | - name: Update libvirt default network configuration, undefine 86 | virt_net: 87 | command: undefine 88 | name: default 89 | ignore_errors: true 90 | - name: Update libvirt default network configuration, define 91 | virt_net: 92 | command: define 93 | name: default 94 | xml: "{{ finaledit6.xmlstring if ipv6_deployment else finaledit4.xmlstring }}" 95 | - name: Activate default libvirt network 96 | virt_net: 97 | name: default 98 | state: active 99 | register: virt_net_out 100 | -------------------------------------------------------------------------------- /tasks/apply_openscap_profile.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Initialize OpenSCAP variables 3 | set_fact: 4 | oscap_dir: "/usr/share/xml/scap/ssg/content" 5 | oscap_dist: "{{ ansible_distribution | replace('RedHat', 'rhel') | lower }}" 6 | oscap_ver: "{{ ansible_distribution_major_version if ansible_distribution != 'Fedora' else '' }}" 7 | - name: Set OpenSCAP datastream path 8 | set_fact: 9 | oscap_datastream: "{{ oscap_dir }}/ssg-{{ oscap_dist }}{{ oscap_ver }}-ds.xml" 10 | - debug: var=oscap_datastream 11 | - name: Verify OpenSCAP datastream 12 | stat: 13 | path: "{{ oscap_datastream }}" 14 | register: oscap_ds_stat 15 | - name: Set default OpenSCAP profile 16 | shell: >- 17 | set -euo pipefail && oscap info --profiles {{ oscap_datastream }} | 18 | grep -Ei "(standard|disa)" | sort | tail -1 | cut -d':' -f1 19 | register: oscap_profile 20 | changed_when: true 21 | when: oscap_ds_stat.stat.exists 22 | - debug: var=oscap_profile 23 | - name: Apply OpenSCAP profile 24 | command: >- 25 | oscap xccdf eval --profile {{ oscap_profile.stdout }} --remediate 26 | --report /root/openscap-report.html {{ oscap_datastream }} 27 | failed_when: false 28 | ignore_errors: true 29 | changed_when: true 30 | - name: Reset PermitRootLogin for sshd 31 | lineinfile: dest=/etc/ssh/sshd_config 32 | regexp="^\s*PermitRootLogin" 33 | line="PermitRootLogin yes" 34 | state=present 35 | - name: Reboot the engine VM to ensure that FIPS is enabled 36 | reboot: 37 | reboot_timeout: 1200 38 | - block: 39 | - name: Check if FIPS is enabled 40 | command: sysctl -n crypto.fips_enabled 41 | changed_when: true 42 | register: he_fips_enabled 43 | - debug: var=he_fips_enabled 44 | - name: Enforce FIPS mode 45 | fail: 46 | msg: "FIPS mode is not enabled as required" 47 | when: he_fips_enabled.stdout != "1" 48 | when: ansible_distribution is search("RedHat") 49 | -------------------------------------------------------------------------------- /tasks/auth_revoke.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Always revoke the SSO token 3 | ovirt_auth: 4 | state: absent 5 | ovirt_auth: "{{ ovirt_sso_auth }}" 6 | ignore_errors: true 7 | -------------------------------------------------------------------------------- /tasks/auth_sso.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Obtain SSO token using username/password credentials 3 | # TODO: remove library/ovirt_auth.py when Ansible 2.5 is out explicitly requiring it 4 | environment: 5 | OVIRT_URL: https://{{ he_fqdn }}/ovirt-engine/api 6 | OVIRT_USERNAME: admin@internal 7 | OVIRT_PASSWORD: "{{ he_admin_password }}" 8 | ovirt_auth: 9 | insecure: true 10 | register: ovirt_sso_auth 11 | until: ovirt_sso_auth is succeeded 12 | retries: 50 13 | delay: 10 14 | -------------------------------------------------------------------------------- /tasks/bootstrap_local_vm/01_prepare_routing_rules.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare routing rules 3 | block: 4 | - name: Check IPv6 5 | set_fact: 6 | ipv6_deployment: >- 7 | {{ true if he_host_ip not in target_address_v4.stdout_lines and 8 | he_host_ip in target_address_v6.stdout_lines 9 | else false }} 10 | - include_tasks: validate_ip_prefix.yml 11 | - include_tasks: alter_libvirt_default_net_configuration.yml 12 | # all of the next is a workaround for a network issue: 13 | # vdsm installation breaks the routing by defining separate 14 | # routing table for ovirtmgmt. But we need to enable communication 15 | # between virbr0 and ovirtmgmt 16 | - name: Start libvirt 17 | service: 18 | name: libvirtd 19 | state: started 20 | enabled: true 21 | - name: Activate default libvirt network 22 | virt_net: 23 | name: default 24 | state: active 25 | register: virt_net_out 26 | - debug: var=virt_net_out 27 | - name: Get libvirt interfaces 28 | virt_net: 29 | command: facts 30 | - name: Get routing rules, IPv4 31 | command: ip -j rule 32 | environment: "{{ he_cmd_lang }}" 33 | register: route_rules_ipv4 34 | changed_when: true 35 | - debug: var=route_rules_ipv4 36 | - name: Get routing rules, IPv6 37 | command: ip -6 rule 38 | environment: "{{ he_cmd_lang }}" 39 | register: route_rules_ipv6 40 | changed_when: true 41 | when: ipv6_deployment|bool 42 | - debug: var=route_rules_ipv6 43 | - name: Save bridge name 44 | set_fact: 45 | virbr_default: "{{ ansible_libvirt_networks['default']['bridge'] }}" 46 | - name: Wait for the bridge to appear on the host 47 | command: ip link show {{ virbr_default }} 48 | environment: "{{ he_cmd_lang }}" 49 | changed_when: true 50 | register: ip_link_show_bridge 51 | until: ip_link_show_bridge.rc == 0 52 | retries: 30 53 | delay: 3 54 | - name: Refresh network facts 55 | setup: 56 | tags: ['skip_ansible_lint'] 57 | - name: Fetch IPv4 CIDR for {{ virbr_default }} 58 | set_fact: 59 | virbr_cidr_ipv4: >- 60 | {{ (hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv4']['address']+'/' 61 | +hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv4']['netmask']) |ipv4('host/prefix') }} 62 | when: not ipv6_deployment|bool 63 | - debug: var=virbr_cidr_ipv4 64 | - name: Fetch IPv6 CIDR for {{ virbr_default }} 65 | set_fact: 66 | virbr_cidr_ipv6: >- 67 | {{ (hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv6'][0]['address']+'/'+ 68 | hostvars[inventory_hostname]['ansible_'+virbr_default]['ipv6'][0]['prefix']) | 69 | ipv6('host/prefix') if 'ipv6' in hostvars[inventory_hostname]['ansible_'+virbr_default] else None }} 70 | when: ipv6_deployment|bool 71 | - debug: var=virbr_cidr_ipv6 72 | - name: Add IPv4 outbound route rules 73 | command: ip rule add from {{ virbr_cidr_ipv4 }} priority 101 table main 74 | environment: "{{ he_cmd_lang }}" 75 | register: result 76 | when: >- 77 | not ipv6_deployment|bool and 78 | route_rules_ipv4.stdout | from_json | 79 | selectattr('priority', 'equalto', 101) | 80 | selectattr('src', 'equalto', virbr_cidr_ipv4 | ipaddr('address') ) | 81 | list | length == 0 82 | changed_when: true 83 | - debug: var=result 84 | - name: Add IPv4 inbound route rules 85 | command: ip rule add from all to {{ virbr_cidr_ipv4 }} priority 100 table main 86 | environment: "{{ he_cmd_lang }}" 87 | register: result 88 | changed_when: true 89 | when: >- 90 | not ipv6_deployment|bool and 91 | route_rules_ipv4.stdout | from_json | 92 | selectattr('priority', 'equalto', 100) | 93 | selectattr('dst', 'equalto', virbr_cidr_ipv4 | ipaddr('address') ) | 94 | list | length == 0 95 | - debug: var=result 96 | - name: Add IPv6 outbound route rules 97 | command: ip -6 rule add from {{ virbr_cidr_ipv6 }} priority 101 table main 98 | environment: "{{ he_cmd_lang }}" 99 | register: result 100 | when: ipv6_deployment|bool and "\"101:\tfrom \"+virbr_cidr_ipv6+\" lookup main\" not in route_rules_ipv6.stdout" 101 | changed_when: true 102 | - debug: var=result 103 | - name: Add IPv6 inbound route rules 104 | command: ip -6 rule add from all to {{ virbr_cidr_ipv6 }} priority 100 table main 105 | environment: "{{ he_cmd_lang }}" 106 | register: result 107 | changed_when: true 108 | when: >- 109 | ipv6_deployment|bool and "\"100:\tfrom all to \"+virbr_cidr_ipv6+\" lookup main\" not in route_rules_ipv6.stdout" 110 | - debug: var=result 111 | -------------------------------------------------------------------------------- /tasks/bootstrap_local_vm/02_create_local_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create hosted engine local vm 3 | block: 4 | - name: Fetch the value of HOST_KEY_CHECKING 5 | set_fact: host_key_checking="{{ lookup('config', 'HOST_KEY_CHECKING') }}" 6 | - debug: var=host_key_checking 7 | - name: Get the username running the deploy 8 | become: false 9 | command: whoami 10 | register: username_on_host 11 | - name: Register the engine FQDN as a host 12 | add_host: 13 | name: "{{ he_fqdn }}" 14 | groups: engine 15 | ansible_connection: smart 16 | ansible_ssh_extra_args: >- 17 | -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% if he_ansible_host_name != "localhost" %} 18 | -o ProxyCommand="ssh -W %h:%p -q 19 | {% if not host_key_checking %} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% endif %} 20 | {{ username_on_host.stdout }}@{{ he_ansible_host_name }}" {% endif %} 21 | ansible_ssh_pass: "{{ he_appliance_password }}" 22 | ansible_user: root 23 | no_log: true 24 | - name: Initial tasks 25 | block: 26 | - name: Get host unique id 27 | shell: | 28 | if [ -e /etc/vdsm/vdsm.id ]; 29 | then cat /etc/vdsm/vdsm.id; 30 | elif [ -e /proc/device-tree/system-id ]; 31 | then cat /proc/device-tree/system-id; #ppc64le 32 | else dmidecode -s system-uuid; 33 | fi; 34 | environment: "{{ he_cmd_lang }}" 35 | changed_when: true 36 | register: unique_id_out 37 | - name: Create directory for local VM 38 | tempfile: 39 | state: directory 40 | path: "{{ he_local_vm_dir_path }}" 41 | prefix: "{{ he_local_vm_dir_prefix }}" 42 | register: otopi_localvm_dir 43 | - name: Set local vm dir path 44 | set_fact: 45 | he_local_vm_dir: "{{ otopi_localvm_dir.path }}" 46 | - name: Fix local VM directory permission 47 | file: 48 | state: directory 49 | path: "{{ he_local_vm_dir }}" 50 | owner: vdsm 51 | group: kvm 52 | mode: 0775 53 | - include_tasks: install_appliance.yml 54 | when: he_appliance_ova is none or he_appliance_ova|length == 0 55 | - name: Register appliance PATH 56 | set_fact: 57 | he_appliance_ova_path: "{{ he_appliance_ova }}" 58 | when: he_appliance_ova is not none and he_appliance_ova|length > 0 59 | - debug: var=he_appliance_ova_path 60 | - name: Check available space on local VM directory 61 | shell: df -k --output=avail "{{ he_local_vm_dir_path }}" | grep -v Avail | cat 62 | environment: "{{ he_cmd_lang }}" 63 | changed_when: true 64 | register: local_vm_dir_space_out 65 | - name: Check appliance size 66 | shell: gzip -l "{{ he_appliance_ova_path }}" | grep -v uncompressed | awk '{print $2}' 67 | environment: "{{ he_cmd_lang }}" 68 | changed_when: true 69 | register: appliance_size 70 | - name: Ensure we have enough space to extract the appliance 71 | assert: 72 | that: 73 | - "local_vm_dir_space_out.stdout_lines[0]|int * 1024 > appliance_size.stdout_lines[0]|int * 1.1" 74 | msg: > 75 | {{ he_local_vm_dir_path }} doesn't provide enough free space to extract the 76 | engine appliance: {{ local_vm_dir_space_out.stdout_lines[0]|int / 1024 | int }} Mb 77 | are available while {{ appliance_size.stdout_lines[0]|int / 1024 / 1024 * 1.1 | int }} Mb 78 | are required. 79 | - name: Extract appliance to local VM directory 80 | unarchive: 81 | remote_src: true 82 | src: "{{ he_appliance_ova_path }}" 83 | dest: "{{ he_local_vm_dir }}" 84 | extra_opts: ['--sparse'] 85 | - include_tasks: get_local_vm_disk_path.yml 86 | - name: Get appliance disk size 87 | command: qemu-img info --output=json {{ local_vm_disk_path }} 88 | environment: "{{ he_cmd_lang }}" 89 | changed_when: true 90 | register: qemu_img_out 91 | - debug: var=qemu_img_out 92 | - name: Parse qemu-img output 93 | set_fact: 94 | virtual_size={{ qemu_img_out.stdout|from_json|json_query('"virtual-size"') }} 95 | register: otopi_appliance_disk_size 96 | - debug: var=virtual_size 97 | - name: Hash the appliance root password 98 | set_fact: 99 | he_hashed_appliance_password: "{{ he_appliance_password | string | password_hash('sha512') }}" 100 | no_log: true 101 | - name: Create cloud init user-data and meta-data files 102 | template: 103 | src: "{{ item.src }}" 104 | dest: "{{ item.dest }}" 105 | with_items: 106 | - {src: templates/user-data.j2, dest: "{{ he_local_vm_dir }}/user-data"} 107 | - {src: templates/meta-data.j2, dest: "{{ he_local_vm_dir }}/meta-data"} 108 | - {src: templates/network-config-dhcp.j2, dest: "{{ he_local_vm_dir }}/network-config"} 109 | - name: Create ISO disk 110 | command: >- 111 | mkisofs -output {{ he_local_vm_dir }}/seed.iso -volid cidata -joliet -rock -input-charset utf-8 112 | {{ he_local_vm_dir }}/meta-data {{ he_local_vm_dir }}/user-data 113 | {{ he_local_vm_dir }}/network-config 114 | environment: "{{ he_cmd_lang }}" 115 | changed_when: true 116 | - name: Create local VM 117 | command: >- 118 | virt-install -n {{ he_vm_name }}Local --os-variant rhel8.0 --virt-type kvm --memory {{ he_mem_size_MB }} 119 | --vcpus {{ he_vcpus }} --network network=default,mac={{ he_vm_mac_addr }},model=virtio 120 | --disk {{ local_vm_disk_path }} --import --disk path={{ he_local_vm_dir }}/seed.iso,device=cdrom 121 | --noautoconsole --rng /dev/random --graphics vnc --video vga --sound none --controller usb,model=none 122 | --memballoon none --boot hd,menu=off --clock kvmclock_present=yes 123 | environment: "{{ he_cmd_lang }}" 124 | register: create_local_vm 125 | changed_when: true 126 | - debug: var=create_local_vm 127 | - name: Get local VM IP 128 | shell: virsh -r net-dhcp-leases default | grep -i {{ he_vm_mac_addr }} | awk '{ print $5 }' | cut -f1 -d'/' 129 | environment: "{{ he_cmd_lang }}" 130 | register: local_vm_ip 131 | until: local_vm_ip.stdout_lines|length >= 1 132 | retries: 90 133 | delay: 10 134 | changed_when: true 135 | - debug: var=local_vm_ip 136 | - name: Remove leftover entries in /etc/hosts for the local VM 137 | lineinfile: 138 | dest: /etc/hosts 139 | regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$" 140 | state: absent 141 | - name: Create an entry in /etc/hosts for the local VM 142 | lineinfile: 143 | dest: /etc/hosts 144 | line: 145 | "{{ local_vm_ip.stdout_lines[0] }} \ 146 | {{ he_fqdn }} # temporary entry added by hosted-engine-setup for the bootstrap VM" 147 | insertbefore: BOF 148 | backup: true 149 | - name: Wait for SSH to restart on the local VM 150 | wait_for: 151 | host='{{ he_fqdn }}' 152 | port=22 153 | delay=30 154 | timeout=300 155 | rescue: 156 | - include_tasks: clean_localvm_dir.yml 157 | - include_tasks: clean_local_storage_pools.yml 158 | - name: Notify the user about a failure 159 | fail: 160 | msg: > 161 | The system may not be provisioned according to the playbook 162 | results: please check the logs for the issue, 163 | fix accordingly or re-deploy from scratch. 164 | -------------------------------------------------------------------------------- /tasks/bootstrap_local_vm/03_engine_initial_tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Initial engine tasks 3 | block: 4 | - name: Wait for the local VM 5 | wait_for_connection: 6 | delay: 5 7 | timeout: 3600 8 | - name: Add an entry for this host on /etc/hosts on the local VM 9 | lineinfile: 10 | dest: /etc/hosts 11 | line: >- 12 | {{ hostvars[he_ansible_host_name]['he_host_ip'] }} {{ hostvars[he_ansible_host_name]['he_host_address'] }} 13 | - name: Set FQDN 14 | command: hostnamectl set-hostname {{ he_fqdn }} 15 | environment: "{{ he_cmd_lang }}" 16 | changed_when: true 17 | - name: Force the local VM FQDN to temporary resolve on the natted network address 18 | lineinfile: 19 | path: /etc/hosts 20 | line: 21 | "{{ hostvars[he_ansible_host_name]['local_vm_ip']['stdout_lines'][0] }} {{ he_fqdn }} # hosted-engine-setup-{{ \ 22 | hostvars[he_ansible_host_name]['he_local_vm_dir'] }}" 23 | - name: Reconfigure IPv6 default gateway 24 | command: ip -6 route add default via "{{ he_ipv6_subnet_prefix + '::1' }}" 25 | environment: "{{ he_cmd_lang }}" 26 | changed_when: true 27 | when: hostvars[he_ansible_host_name]['ipv6_deployment']|bool 28 | - name: Restore sshd reverse DNS lookups 29 | lineinfile: 30 | path: /etc/ssh/sshd_config 31 | regexp: '^UseDNS' 32 | line: "UseDNS yes" 33 | - name: Add lines to answerfile 34 | lineinfile: 35 | path: /root/ovirt-engine-answers 36 | line: "{{ item }}" 37 | no_log: true 38 | with_items: 39 | - "OVESETUP_CONFIG/adminPassword=str:{{ he_admin_password }}" 40 | - name: Add lines to answerfile 41 | lineinfile: 42 | path: /root/ovirt-engine-answers 43 | line: "{{ item }}" 44 | no_log: true 45 | with_items: 46 | - "OVESETUP_DB/password=str:{{ he_db_password }}" 47 | when: he_db_password is defined 48 | - name: Add lines to answerfile 49 | lineinfile: 50 | path: /root/ovirt-engine-answers 51 | line: "{{ item }}" 52 | no_log: true 53 | with_items: 54 | - "OVESETUP_DWH_DB/password=str:{{ he_dwh_db_password }}" 55 | when: he_dwh_db_password is defined 56 | - name: Import OpenSCAP task 57 | import_tasks: apply_openscap_profile.yml 58 | when: he_apply_openscap_profile|bool 59 | - name: Include before engine-setup custom tasks files for the engine VM 60 | include_tasks: "{{ item }}" 61 | with_fileglob: "hooks/enginevm_before_engine_setup/*.yml" 62 | register: include_before_engine_setup_results 63 | - debug: var=include_before_engine_setup_results 64 | - name: Restore a backup 65 | block: 66 | - include_tasks: restore_backup.yml 67 | when: he_restore_from_file is defined and he_restore_from_file 68 | rescue: 69 | - name: Sync on engine machine 70 | command: sync 71 | changed_when: true 72 | - name: Fetch logs from the engine VM 73 | import_tasks: fetch_engine_logs.yml 74 | ignore_errors: true 75 | delegate_to: "{{ he_ansible_host_name }}" 76 | - name: Get local VM dir path 77 | set_fact: 78 | he_local_vm_dir={{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }} 79 | - name: Clean bootstrap VM 80 | import_tasks: clean_localvm_dir.yml 81 | delegate_to: "{{ he_ansible_host_name }}" 82 | - name: Clean local storage pools 83 | import_tasks: clean_local_storage_pools.yml 84 | delegate_to: "{{ he_ansible_host_name }}" 85 | - name: Notify the user about a failure 86 | fail: 87 | msg: > 88 | There was a failure deploying the engine on the local engine VM. 89 | The system may not be provisioned according to the playbook 90 | results: please check the logs for the issue, 91 | fix accordingly or re-deploy from scratch. 92 | -------------------------------------------------------------------------------- /tasks/bootstrap_local_vm/04_engine_final_tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Final engine tasks 3 | block: 4 | - name: Include after engine-setup custom tasks files for the engine VM 5 | include_tasks: "{{ item }}" 6 | with_fileglob: "hooks/enginevm_after_engine_setup/*.yml" 7 | register: include_after_engine_setup_results 8 | - debug: var=include_after_engine_setup_results 9 | # After a restart the engine has a 5 minute grace time, 10 | # other actions like electing a new SPM host or reconstructing 11 | # the master storage domain could require more time 12 | - name: Wait for the engine to reach a stable condition 13 | wait_for: timeout=600 14 | when: he_restore_from_file is defined and he_restore_from_file 15 | - name: Configure LibgfApi support 16 | command: engine-config -s LibgfApiSupported=true --cver=4.2 17 | environment: "{{ he_cmd_lang }}" 18 | register: libgfapi_support_out 19 | changed_when: true 20 | when: he_enable_libgfapi|bool 21 | - debug: var=libgfapi_support_out 22 | - name: Save original OvfUpdateIntervalInMinutes 23 | shell: "engine-config -g OvfUpdateIntervalInMinutes | cut -d' ' -f2 > /root/OvfUpdateIntervalInMinutes.txt" 24 | environment: "{{ he_cmd_lang }}" 25 | changed_when: true 26 | - name: Set OVF update interval to 1 minute 27 | command: engine-config -s OvfUpdateIntervalInMinutes=1 28 | environment: "{{ he_cmd_lang }}" 29 | changed_when: true 30 | - name: Allow the webadmin UI to be accessed over the first host 31 | block: 32 | - name: Saving original value 33 | replace: 34 | path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf 35 | regexp: '^(SSO_ALTERNATE_ENGINE_FQDNS=.*)' 36 | replace: '#\1 # pre hosted-engine-setup' 37 | - name: Adding new SSO_ALTERNATE_ENGINE_FQDNS line 38 | lineinfile: 39 | path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf 40 | line: 'SSO_ALTERNATE_ENGINE_FQDNS="{{ he_host_address }}" # hosted-engine-setup' 41 | - name: Restart ovirt-engine service for changed OVF Update configuration and LibgfApi support 42 | systemd: 43 | state: restarted 44 | name: ovirt-engine 45 | register: restart_out 46 | - debug: var=restart_out 47 | - name: Mask cloud-init services to speed up future boot 48 | systemd: 49 | masked: true 50 | name: "{{ item }}" 51 | with_items: 52 | - cloud-init-local 53 | - cloud-init 54 | rescue: 55 | - name: Sync on engine machine 56 | command: sync 57 | changed_when: true 58 | - name: Fetch logs from the engine VM 59 | import_tasks: fetch_engine_logs.yml 60 | ignore_errors: true 61 | delegate_to: "{{ he_ansible_host_name }}" 62 | - name: Get local VM dir path 63 | set_fact: 64 | he_local_vm_dir={{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }} 65 | - name: Clean bootstrap VM 66 | import_tasks: clean_localvm_dir.yml 67 | delegate_to: "{{ he_ansible_host_name }}" 68 | - name: Clean local storage pools 69 | import_tasks: clean_local_storage_pools.yml 70 | delegate_to: "{{ he_ansible_host_name }}" 71 | - name: Notify the user about a failure 72 | fail: 73 | msg: 74 | There was a failure deploying the engine on the local engine VM. 75 | The system may not be provisioned according to the playbook results, 76 | please check the logs for the issue, 77 | fix accordingly or re-deploy from scratch. 78 | ... 79 | -------------------------------------------------------------------------------- /tasks/bootstrap_local_vm/05_add_host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add host 3 | block: 4 | - name: Wait for ovirt-engine service to start 5 | uri: 6 | url: http://{{ he_fqdn }}/ovirt-engine/services/health 7 | return_content: true 8 | register: engine_status 9 | until: "'DB Up!Welcome to Health Status!' in engine_status.content" 10 | retries: 30 11 | delay: 20 12 | - debug: var=engine_status 13 | - name: Open a port on firewalld 14 | firewalld: 15 | port: "{{ he_webui_forward_port }}/tcp" 16 | permanent: false 17 | immediate: true 18 | state: enabled 19 | - name: Expose engine VM webui over a local port via ssh port forwarding 20 | command: >- 21 | sshpass -e ssh -tt -o ServerAliveInterval=5 -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -g -L 22 | {{ he_webui_forward_port }}:{{ he_fqdn }}:443 {{ he_fqdn }} 23 | environment: 24 | - "{{ he_cmd_lang }}" 25 | - SSHPASS: "{{ he_appliance_password }}" 26 | changed_when: true 27 | async: 86400 28 | poll: 0 29 | register: sshpf 30 | - debug: var=sshpf 31 | - name: Evaluate temporary bootstrap engine URL 32 | set_fact: bootstrap_engine_url="https://{{ he_host_address }}:{{ he_webui_forward_port }}/ovirt-engine/" 33 | - debug: 34 | msg: >- 35 | The bootstrap engine is temporary accessible over {{ bootstrap_engine_url }} 36 | - name: Detect VLAN ID 37 | shell: ip -d link show {{ he_bridge_if }} | grep 'vlan ' | grep -Po 'id \K[\d]+' | cat 38 | environment: "{{ he_cmd_lang }}" 39 | register: vlan_id_out 40 | changed_when: true 41 | - debug: var=vlan_id_out 42 | - name: Set Engine public key as authorized key without validating the TLS/SSL certificates 43 | authorized_key: 44 | user: root 45 | state: present 46 | key: https://{{ he_fqdn }}/ovirt-engine/services/pki-resource?resource=engine-certificate&format=OPENSSH-PUBKEY 47 | validate_certs: false 48 | - include_tasks: auth_sso.yml 49 | - name: Ensure that the target datacenter is present 50 | ovirt_datacenter: 51 | state: present 52 | name: "{{ he_data_center }}" 53 | compatibility_version: "{{ he_data_center_comp_version | default(omit) }}" 54 | wait: true 55 | local: false 56 | auth: "{{ ovirt_auth }}" 57 | register: dc_result_presence 58 | - name: Ensure that the target cluster is present in the target datacenter 59 | ovirt_cluster: 60 | state: present 61 | name: "{{ he_cluster }}" 62 | compatibility_version: "{{ he_cluster_comp_version | default(omit) }}" 63 | data_center: "{{ he_data_center }}" 64 | cpu_type: "{{ he_cluster_cpu_type | default(omit) }}" 65 | wait: true 66 | auth: "{{ ovirt_auth }}" 67 | register: cluster_result_presence 68 | - name: Check actual cluster location 69 | fail: 70 | msg: >- 71 | A cluster named '{{ he_cluster }}' has been created earlier in a different 72 | datacenter and cluster moving is still not supported. 73 | You can avoid this specifying a different cluster name; 74 | please fix accordingly and try again. 75 | when: cluster_result_presence.cluster.data_center.id != dc_result_presence.datacenter.id 76 | - name: Enable GlusterFS at cluster level 77 | ovirt_cluster: 78 | data_center: "{{ he_data_center }}" 79 | name: "{{ he_cluster }}" 80 | compatibility_version: "{{ he_cluster_comp_version | default(omit) }}" 81 | auth: "{{ ovirt_auth }}" 82 | virt: true 83 | gluster: true 84 | fence_skip_if_gluster_bricks_up: true 85 | fence_skip_if_gluster_quorum_not_met: true 86 | when: he_enable_hc_gluster_service is defined and he_enable_hc_gluster_service 87 | - name: Set VLAN ID at datacenter level 88 | ovirt_network: 89 | data_center: "{{ he_data_center }}" 90 | name: "{{ he_mgmt_network }}" 91 | vlan_tag: "{{ vlan_id_out.stdout }}" 92 | auth: "{{ ovirt_auth }}" 93 | when: vlan_id_out.stdout|length > 0 94 | - name: Get active list of active firewalld zones 95 | shell: set -euo pipefail && firewall-cmd --get-active-zones | grep -v "^\s*interfaces" 96 | environment: "{{ he_cmd_lang }}" 97 | register: active_f_zone 98 | changed_when: true 99 | - name: Configure libvirt firewalld zone 100 | firewalld: 101 | zone: libvirt 102 | service: "{{ service_item }}" 103 | permanent: true 104 | immediate: true 105 | state: enabled 106 | with_items: 107 | - vdsm 108 | - libvirt-tls 109 | - ovirt-imageio 110 | - ovirt-vmconsole 111 | - ssh 112 | - vdsm 113 | loop_control: 114 | loop_var: service_item 115 | when: "'libvirt' in active_f_zone.stdout_lines" 116 | - name: Add host 117 | ovirt_host: 118 | cluster: "{{ he_cluster }}" 119 | name: "{{ he_host_name }}" 120 | state: present 121 | public_key: true 122 | address: "{{ he_host_address }}" 123 | auth: "{{ ovirt_auth }}" 124 | async: 1 125 | poll: 0 126 | - name: Pause the execution to let the user interactively reconfigure the host 127 | block: 128 | - name: Let the user connect to the bootstrap engine to manually fix host configuration 129 | debug: 130 | msg: >- 131 | You can now connect to {{ bootstrap_engine_url }} and check the status of this host and 132 | eventually remediate it, please continue only when the host is listed as 'up' 133 | - include_tasks: pause_execution.yml 134 | when: he_pause_host|bool 135 | # refresh the auth token after a long operation to avoid having it expired 136 | - include_tasks: auth_revoke.yml 137 | - include_tasks: auth_sso.yml 138 | - name: Wait for the host to be up 139 | ovirt_host_info: 140 | pattern: name={{ he_host_name }} 141 | auth: "{{ ovirt_auth }}" 142 | register: host_result_up_check 143 | until: >- 144 | host_result_up_check is succeeded and 145 | host_result_up_check.ovirt_hosts|length >= 1 and 146 | ( 147 | host_result_up_check.ovirt_hosts[0].status == 'up' or 148 | host_result_up_check.ovirt_hosts[0].status == 'non_operational' 149 | ) 150 | retries: 120 151 | delay: 10 152 | ignore_errors: true 153 | - debug: var=host_result_up_check 154 | - name: Notify the user about a failure 155 | fail: 156 | msg: >- 157 | Host is not up, please check logs, perhaps also on the engine machine 158 | when: host_result_up_check is failed 159 | - name: Handle deployment failure 160 | block: 161 | - set_fact: host_id={{ host_result_up_check.ovirt_hosts[0].id }} 162 | - name: Collect error events from the Engine 163 | ovirt_event_info: 164 | auth: "{{ ovirt_auth }}" 165 | search: "severity>=error" 166 | register: error_events 167 | 168 | - name: Generate the error message from the engine events 169 | set_fact: 170 | error_description: >- 171 | {% for event in error_events.ovirt_events | groupby('code') %} 172 | {% if event[1][0].host.id == host_id %} 173 | code {{ event[0] }}: {{ event[1][0].description }}, 174 | {% endif %} 175 | {% endfor %} 176 | ignore_errors: true 177 | 178 | - name: Fail with error description 179 | fail: 180 | msg: >- 181 | The host has been set in non_operational status, 182 | deployment errors: {{ error_description }} 183 | fix accordingly and re-deploy. 184 | when: error_description is defined 185 | 186 | - name: Fail with generic error 187 | fail: 188 | msg: >- 189 | The host has been set in non_operational status, 190 | please check engine logs, 191 | more info can be found in the engine logs, 192 | fix accordingly and re-deploy. 193 | when: error_description is not defined 194 | 195 | when: >- 196 | host_result_up_check is succeeded and 197 | host_result_up_check.ovirt_hosts|length >= 1 and 198 | host_result_up_check.ovirt_hosts[0].status == 'non_operational' 199 | rescue: 200 | - name: Sync on engine machine 201 | command: sync 202 | changed_when: true 203 | - name: Fetch logs from the engine VM 204 | include_tasks: fetch_engine_logs.yml 205 | ignore_errors: true 206 | - include_tasks: clean_localvm_dir.yml 207 | - include_tasks: clean_local_storage_pools.yml 208 | - name: Notify the user about a failure 209 | fail: 210 | msg: > 211 | The system may not be provisioned according to the playbook 212 | results: please check the logs for the issue, 213 | fix accordingly or re-deploy from scratch. 214 | -------------------------------------------------------------------------------- /tasks/clean_local_storage_pools.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clean storage-pool 3 | block: 4 | - name: Destroy local storage-pool {{ he_local_vm_dir | basename }} 5 | command: >- 6 | virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} 7 | pool-destroy {{ he_local_vm_dir | basename }} 8 | environment: "{{ he_cmd_lang }}" 9 | changed_when: true 10 | - name: Undefine local storage-pool {{ he_local_vm_dir | basename }} 11 | command: >- 12 | virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} 13 | pool-undefine {{ he_local_vm_dir | basename }} 14 | environment: "{{ he_cmd_lang }}" 15 | changed_when: true 16 | - name: Destroy local storage-pool {{ local_vm_disk_path.split('/')[5] }} 17 | command: >- 18 | virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} 19 | pool-destroy {{ local_vm_disk_path.split('/')[5] }} 20 | environment: "{{ he_cmd_lang }}" 21 | changed_when: true 22 | - name: Undefine local storage-pool {{ local_vm_disk_path.split('/')[5] }} 23 | command: >- 24 | virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} 25 | pool-undefine {{ local_vm_disk_path.split('/')[5] }} 26 | environment: "{{ he_cmd_lang }}" 27 | changed_when: true 28 | ignore_errors: true 29 | -------------------------------------------------------------------------------- /tasks/clean_localvm_dir.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Remove local vm dir 3 | file: 4 | path: "{{ he_local_vm_dir }}" 5 | state: absent 6 | register: rm_localvm_dir 7 | - debug: var=rm_localvm_dir 8 | - name: Remove temporary entry in /etc/hosts for the local VM 9 | lineinfile: 10 | dest: /etc/hosts 11 | regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$" 12 | state: absent 13 | -------------------------------------------------------------------------------- /tasks/create_storage_domain.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create hosted engine local vm 3 | block: 4 | - name: Wait for the storage interface to be up 5 | command: ip -j link show '{{ he_storage_if }}' 6 | register: storage_if_result_up_check 7 | until: >- 8 | storage_if_result_up_check.stdout|from_json|map(attribute='operstate')|join('') == 'UP' 9 | retries: 120 10 | delay: 5 11 | delegate_to: "{{ he_ansible_host_name }}" 12 | when: (he_domain_type == "glusterfs" or he_domain_type == "nfs") and he_storage_if is not none 13 | - name: Check local VM dir stat 14 | stat: 15 | path: "{{ he_local_vm_dir }}" 16 | register: local_vm_dir_stat 17 | - name: Enforce local VM dir existence 18 | fail: 19 | msg: "Local VM dir '{{ he_local_vm_dir }}' doesn't exist" 20 | when: not local_vm_dir_stat.stat.exists 21 | - include_tasks: auth_sso.yml 22 | - name: Fetch host facts 23 | ovirt_host_info: 24 | pattern: name={{ he_host_name }} 25 | auth: "{{ ovirt_auth }}" 26 | register: host_result 27 | until: >- 28 | host_result and 'ovirt_hosts' in host_result 29 | and host_result.ovirt_hosts|length >= 1 and 30 | 'up' in host_result.ovirt_hosts[0].status 31 | retries: 50 32 | delay: 10 33 | - debug: var=host_result 34 | - name: Fetch cluster ID 35 | set_fact: cluster_id="{{ host_result.ovirt_hosts[0].cluster.id }}" 36 | - name: Fetch cluster facts 37 | ovirt_cluster_info: 38 | auth: "{{ ovirt_auth }}" 39 | register: cluster_facts 40 | - debug: var=cluster_facts 41 | - name: Fetch Datacenter facts 42 | ovirt_datacenter_info: 43 | auth: "{{ ovirt_auth }}" 44 | register: datacenter_facts 45 | - debug: var=datacenter_facts 46 | - name: Fetch Datacenter ID 47 | set_fact: >- 48 | datacenter_id={{ cluster_facts.ovirt_clusters|json_query("[?id=='" + cluster_id + "'].data_center.id")|first }} 49 | - name: Fetch Datacenter name 50 | set_fact: >- 51 | datacenter_name={{ datacenter_facts.ovirt_datacenters|json_query("[?id=='" + datacenter_id + "'].name")|first }} 52 | - name: Add NFS storage domain 53 | ovirt_storage_domain: 54 | state: unattached 55 | name: "{{ he_storage_domain_name }}" 56 | host: "{{ he_host_name }}" 57 | data_center: "{{ datacenter_name }}" 58 | wait: true 59 | nfs: 60 | address: "{{ he_storage_domain_addr }}" 61 | path: "{{ he_storage_domain_path }}" 62 | mount_options: "{{ he_mount_options }}" 63 | version: "{{ he_nfs_version }}" 64 | auth: "{{ ovirt_auth }}" 65 | when: he_domain_type == "nfs" 66 | register: otopi_storage_domain_details_nfs 67 | - name: Add glusterfs storage domain 68 | ovirt_storage_domain: 69 | state: unattached 70 | name: "{{ he_storage_domain_name }}" 71 | host: "{{ he_host_name }}" 72 | data_center: "{{ datacenter_name }}" 73 | wait: true 74 | glusterfs: 75 | address: "{{ he_storage_domain_addr }}" 76 | path: "{{ he_storage_domain_path }}" 77 | mount_options: "{{ he_mount_options }}" 78 | auth: "{{ ovirt_auth }}" 79 | when: he_domain_type == "glusterfs" 80 | register: otopi_storage_domain_details_gluster 81 | - name: Add iSCSI storage domain 82 | ovirt_storage_domain: 83 | state: unattached 84 | name: "{{ he_storage_domain_name }}" 85 | host: "{{ he_host_name }}" 86 | data_center: "{{ datacenter_name }}" 87 | wait: true 88 | discard_after_delete: "{{ he_discard }}" 89 | # we are sending a single iSCSI path but, not sure if intended or if 90 | # it's bug, the engine is implicitly creating the storage domain 91 | # consuming all the path that are already connected on the host (we 92 | # cannot logout since there is not logout command in the rest API, see 93 | # https://bugzilla.redhat.com/show_bug.cgi?id=1535951 ). 94 | iscsi: 95 | address: "{{ he_storage_domain_addr.split(',')|first }}" 96 | port: "{{ he_iscsi_portal_port.split(',')|first if he_iscsi_portal_port is string else he_iscsi_portal_port }}" 97 | target: "{{ he_iscsi_target }}" 98 | lun_id: "{{ he_lun_id }}" 99 | username: "{{ he_iscsi_username }}" 100 | password: "{{ he_iscsi_password }}" 101 | auth: "{{ ovirt_auth }}" 102 | when: he_domain_type == "iscsi" 103 | register: otopi_storage_domain_details_iscsi 104 | - name: Add Fibre Channel storage domain 105 | ovirt_storage_domain: 106 | state: unattached 107 | name: "{{ he_storage_domain_name }}" 108 | host: "{{ he_host_name }}" 109 | data_center: "{{ datacenter_name }}" 110 | wait: true 111 | discard_after_delete: "{{ he_discard }}" 112 | fcp: 113 | lun_id: "{{ he_lun_id }}" 114 | auth: "{{ ovirt_auth }}" 115 | register: otopi_storage_domain_details_fc 116 | when: he_domain_type == "fc" 117 | - name: Get storage domain details 118 | ovirt_storage_domain_info: 119 | pattern: name={{ he_storage_domain_name }} 120 | auth: "{{ ovirt_auth }}" 121 | register: storage_domain_details 122 | - debug: var=storage_domain_details 123 | - name: Find the appliance OVF 124 | find: 125 | paths: "{{ he_local_vm_dir }}/master" 126 | recurse: true 127 | patterns: ^.*.(?- 145 | {{ disk_size_xml.matches[0].Disk['{http://schemas.dmtf.org/ovf/envelope/1/}size']|int * 1024 * 1024 * 1024 + 146 | storage_domain_details.ovirt_storage_domains[0].critical_space_action_blocker|int * 147 | 1024 * 1024 * 1024 + 5 * 1024 * 1024 * 1024 }} 148 | # +5G: 2xOVF_STORE, lockspace, metadata, configuration 149 | - debug: var=required_size 150 | - name: Remove unsuitable storage domain 151 | ovirt_storage_domain: 152 | host: "{{ he_host_name }}" 153 | data_center: "{{ datacenter_name }}" 154 | name: "{{ he_storage_domain_name }}" 155 | wait: true 156 | state: absent 157 | destroy: true 158 | auth: "{{ ovirt_auth }}" 159 | when: storage_domain_details.ovirt_storage_domains[0].available|int < required_size|int 160 | register: remove_storage_domain_details 161 | - debug: var=remove_storage_domain_details 162 | - name: Check storage domain free space 163 | fail: 164 | msg: >- 165 | Error: the target storage domain contains only 166 | {{ storage_domain_details.ovirt_storage_domains[0].available|int / 1024 / 1024 / 1024 }}GiB of 167 | available space while a minimum of {{ required_size|int / 1024 / 1024 / 1024 }}GiB is required 168 | If you wish to use the current target storage domain by extending it, make sure it contains nothing 169 | before adding it. 170 | when: storage_domain_details.ovirt_storage_domains[0].available|int < required_size|int 171 | - name: Activate storage domain 172 | ovirt_storage_domain: 173 | host: "{{ he_host_name }}" 174 | data_center: "{{ datacenter_name }}" 175 | name: "{{ he_storage_domain_name }}" 176 | wait: true 177 | state: present 178 | auth: "{{ ovirt_auth }}" 179 | when: storage_domain_details.ovirt_storage_domains[0].available|int >= required_size|int 180 | register: otopi_storage_domain_details 181 | - debug: var=otopi_storage_domain_details 182 | ... 183 | -------------------------------------------------------------------------------- /tasks/create_target_vm/01_create_target_hosted_engine_vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create target Hosted Engine VM 3 | block: 4 | - name: Fetch the value of HOST_KEY_CHECKING 5 | set_fact: host_key_checking="{{ lookup('config', 'HOST_KEY_CHECKING') }}" 6 | - debug: var=host_key_checking 7 | - name: Get the username running the deploy 8 | become: false 9 | command: whoami 10 | register: username_on_host 11 | - name: Register the engine FQDN as a host 12 | add_host: 13 | name: "{{ he_fqdn }}" 14 | groups: engine 15 | ansible_connection: smart 16 | ansible_ssh_extra_args: >- 17 | -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% if he_ansible_host_name != "localhost" %} 18 | -o ProxyCommand="ssh -W %h:%p -q 19 | {% if not host_key_checking %} -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null {% endif %} 20 | {{ username_on_host.stdout }}@{{ he_ansible_host_name }}" {% endif %} 21 | ansible_ssh_pass: "{{ he_appliance_password }}" 22 | ansible_user: root 23 | no_log: true 24 | - include_tasks: auth_sso.yml 25 | - name: Get local VM IP 26 | shell: virsh -r net-dhcp-leases default | grep -i {{ he_vm_mac_addr }} | awk '{ print $5 }' | cut -f1 -d'/' 27 | environment: "{{ he_cmd_lang }}" 28 | register: local_vm_ip 29 | changed_when: true 30 | - debug: var=local_vm_ip 31 | - name: Fetch host facts 32 | ovirt_host_info: 33 | pattern: name={{ he_host_name }} status=up 34 | auth: "{{ ovirt_auth }}" 35 | register: host_result 36 | until: host_result is succeeded and host_result.ovirt_hosts|length >= 1 37 | retries: 50 38 | delay: 10 39 | - debug: var=host_result 40 | - name: Fetch Cluster ID 41 | set_fact: cluster_id="{{ host_result.ovirt_hosts[0].cluster.id }}" 42 | - name: Fetch Cluster facts 43 | ovirt_cluster_info: 44 | auth: "{{ ovirt_auth }}" 45 | register: cluster_facts 46 | - debug: var=cluster_facts 47 | - name: Fetch Datacenter facts 48 | ovirt_datacenter_info: 49 | auth: "{{ ovirt_auth }}" 50 | register: datacenter_facts 51 | - debug: var=datacenter_facts 52 | - name: Fetch Cluster name 53 | set_fact: cluster_name={{ cluster_facts.ovirt_clusters|json_query("[?id=='" + cluster_id + "'].name")|first }} 54 | - name: Fetch Datacenter ID 55 | set_fact: >- 56 | datacenter_id={{ cluster_facts.ovirt_clusters|json_query("[?id=='" + cluster_id + "'].data_center.id")|first }} 57 | - name: Fetch Datacenter name 58 | set_fact: >- 59 | datacenter_name={{ datacenter_facts.ovirt_datacenters|json_query("[?id=='" + datacenter_id + "'].name")|first }} 60 | - name: Parse Cluster details 61 | set_fact: 62 | cluster_cpu: >- 63 | {{ cluster_facts.ovirt_clusters|selectattr('id', 'match', '^'+cluster_id+'$')|map(attribute='cpu')|list|first }} 64 | cluster_version: >- 65 | {{ cluster_facts.ovirt_clusters|selectattr('id', 'match', '^'+cluster_id+'$')| 66 | map(attribute='version')|list|first }} 67 | # TODO: use a proper ansible module for this once available 68 | - name: Get server CPU list via REST API 69 | uri: 70 | url: 71 | "https://{{ he_fqdn }}/ovirt-engine/api/options/ServerCPUList?version=\ 72 | {{ cluster_version.major }}.{{ cluster_version.minor }}" 73 | validate_certs: false 74 | user: admin@internal 75 | password: "{{ he_admin_password }}" 76 | method: GET 77 | return_content: true 78 | status_code: 200 79 | headers: 80 | Accept: application/json 81 | no_log: true 82 | register: server_cpu_list 83 | - debug: var=server_cpu_list 84 | # TODO: use a proper ansible module for this once available 85 | - name: Get cluster emulated machine list via REST API 86 | uri: 87 | url: 88 | "https://{{ he_fqdn }}/ovirt-engine/api/options/ClusterEmulatedMachines?version=\ 89 | {{ cluster_version.major }}.{{ cluster_version.minor }}" 90 | validate_certs: false 91 | user: admin@internal 92 | password: "{{ he_admin_password }}" 93 | method: GET 94 | return_content: true 95 | status_code: 200 96 | headers: 97 | Accept: application/json 98 | no_log: true 99 | register: emulated_machine_list 100 | - name: Prepare for parsing server CPU list 101 | set_fact: 102 | server_cpu_dict: {} 103 | - name: Parse server CPU list 104 | set_fact: 105 | server_cpu_dict: "{{ server_cpu_dict | combine({item.split(':')[1]: item.split(':')[3]}) }}" 106 | with_items: >- 107 | {{ server_cpu_list.json['values']['system_option_value'][0]['value'].split('; ')|list|difference(['']) }} 108 | - debug: var=server_cpu_dict 109 | - name: Convert CPU model name 110 | set_fact: 111 | cluster_cpu_model: "{{ server_cpu_dict[cluster_cpu.type] }}" 112 | - debug: var=cluster_cpu_model 113 | - name: Parse emulated_machine 114 | set_fact: 115 | emulated_machine: 116 | emulated_machine_list.json['values']['system_option_value'][0]['value'].replace( 117 | '[','').replace(']','').split(', ')|first 118 | - name: Get storage domain details 119 | ovirt_storage_domain_info: 120 | pattern: name={{ he_storage_domain_name }} and datacenter={{ datacenter_name }} 121 | auth: "{{ ovirt_auth }}" 122 | register: storage_domain_details 123 | - debug: var=storage_domain_details 124 | - name: Add HE disks 125 | # rename it to ovirt_disk with ansible 2.8+ 126 | ovirt_disk_28: 127 | name: "{{ item.name }}" 128 | size: "{{ item.size }}" 129 | format: "{{ item.format }}" 130 | sparse: "{{ item.sparse }}" 131 | description: "{{ item.description }}" 132 | content_type: "{{ item.content }}" 133 | interface: virtio 134 | storage_domain: "{{ he_storage_domain_name }}" 135 | wait: true 136 | timeout: 600 137 | auth: "{{ ovirt_auth }}" 138 | with_items: 139 | - { 140 | name: 'he_virtio_disk', 141 | description: 'Hosted-Engine disk', 142 | size: "{{ he_disk_size_GB }}GiB", 143 | format: 'raw', 144 | sparse: "{{ false if he_domain_type == 'fc' or he_domain_type == 'iscsi' else true }}", 145 | content: 'hosted_engine' 146 | } 147 | - { 148 | name: 'he_sanlock', 149 | description: 'Hosted-Engine sanlock disk', 150 | size: '1GiB', 151 | format: 'raw', 152 | sparse: false, 153 | content: 'hosted_engine_sanlock' 154 | } 155 | - { 156 | name: 'HostedEngineConfigurationImage', 157 | description: 'Hosted-Engine configuration disk', 158 | size: '1GiB', 159 | format: 'raw', 160 | sparse: false, 161 | content: 'hosted_engine_configuration' 162 | } 163 | - { 164 | name: 'he_metadata', 165 | description: 'Hosted-Engine metadata disk', 166 | size: '1GiB', 167 | format: 'raw', 168 | sparse: false, 169 | content: 'hosted_engine_metadata' 170 | } 171 | register: add_disks 172 | - name: Register disk details 173 | set_fact: 174 | he_virtio_disk_details: "{{ add_disks.results[0] }}" 175 | he_sanlock_disk_details: "{{ add_disks.results[1] }}" 176 | he_conf_disk_details: "{{ add_disks.results[2] }}" 177 | he_metadata_disk_details: "{{ add_disks.results[3] }}" 178 | - debug: var=add_disks 179 | - name: Set default graphics protocols 180 | set_fact: 181 | he_graphic_protocols: [vnc, spice] 182 | - name: Check if FIPS is enabled 183 | command: sysctl -n crypto.fips_enabled 184 | register: he_fips_enabled 185 | - debug: var=he_fips_enabled 186 | - name: Select graphic protocols 187 | set_fact: 188 | he_graphic_protocols: [spice] 189 | when: he_fips_enabled.stdout == "1" 190 | - debug: var=he_graphic_protocols 191 | - name: Add VM 192 | ovirt_vm: 193 | state: stopped 194 | cluster: "{{ cluster_name }}" 195 | name: "{{ he_vm_name }}" 196 | description: 'Hosted Engine Virtual Machine' 197 | memory: "{{ he_mem_size_MB }}Mib" 198 | cpu_cores: "{{ he_vcpus }}" 199 | cpu_sockets: 1 200 | graphical_console: 201 | headless_mode: false 202 | protocol: "{{ he_graphic_protocols }}" 203 | serial_console: false 204 | operating_system: rhel_8x64 205 | type: server 206 | high_availability_priority: 1 207 | high_availability: false 208 | delete_protected: true 209 | # timezone: "{{ he_time_zone }}" # TODO: fix with the right parameter syntax 210 | disks: 211 | - id: "{{ he_virtio_disk_details.disk.id }}" 212 | nics: 213 | - name: vnet0 214 | profile_name: "{{ he_mgmt_network }}" 215 | interface: virtio 216 | mac_address: "{{ he_vm_mac_addr }}" 217 | auth: "{{ ovirt_auth }}" 218 | register: he_vm_details 219 | - debug: var=he_vm_details 220 | - name: Register external local VM uuid 221 | shell: virsh -r domuuid {{ he_vm_name }}Local | head -1 222 | environment: "{{ he_cmd_lang }}" 223 | register: external_local_vm_uuid 224 | changed_when: true 225 | - debug: var=external_local_vm_uuid 226 | -------------------------------------------------------------------------------- /tasks/create_target_vm/02_engine_vm_configuration.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Engine VM configuration tasks 3 | block: 4 | - name: Create a temporary directory for ansible as postgres user 5 | file: 6 | path: /var/lib/pgsql/.ansible/tmp 7 | state: directory 8 | owner: postgres 9 | group: postgres 10 | mode: 0700 11 | - name: Update target VM details at DB level 12 | command: >- 13 | psql -d engine -c 14 | "UPDATE vm_static SET {{ item.field }}={{ item.value }} WHERE 15 | vm_guid='{{ hostvars[he_ansible_host_name]['he_vm_details']['vm']['id'] }}'" 16 | environment: "{{ he_cmd_lang }}" 17 | become: true 18 | become_user: postgres 19 | become_method: sudo 20 | changed_when: true 21 | register: db_vm_update 22 | with_items: 23 | - {field: 'origin', value: 6} 24 | - debug: var=db_vm_update 25 | - name: Insert Hosted Engine configuration disk uuid into Engine database 26 | command: >- 27 | psql -d engine -c 28 | "UPDATE vdc_options SET option_value= 29 | '{{ hostvars[he_ansible_host_name]['he_conf_disk_details']['disk']['id'] }}' 30 | WHERE option_name='HostedEngineConfigurationImageGuid' AND version='general'" 31 | environment: "{{ he_cmd_lang }}" 32 | become: true 33 | become_user: postgres 34 | become_method: sudo 35 | changed_when: true 36 | register: db_conf_update 37 | - debug: var=db_conf_update 38 | - name: Fetch host SPM_ID 39 | command: >- 40 | psql -t -d engine -c 41 | "SELECT vds_spm_id FROM vds WHERE vds_name='{{ hostvars[he_ansible_host_name]['he_host_name'] }}'" 42 | environment: "{{ he_cmd_lang }}" 43 | become: true 44 | become_user: postgres 45 | become_method: sudo 46 | changed_when: true 47 | register: host_spm_id_out 48 | - name: Parse host SPM_ID 49 | set_fact: host_spm_id="{{ host_spm_id_out.stdout_lines|first|trim }}" 50 | - debug: var=host_spm_id 51 | - name: Restore original DisableFenceAtStartupInSec 52 | shell: "engine-config -s DisableFenceAtStartupInSec=$(cat /root/DisableFenceAtStartupInSec.txt)" 53 | environment: "{{ he_cmd_lang }}" 54 | changed_when: true 55 | when: he_restore_from_file is defined and he_restore_from_file 56 | - name: Remove DisableFenceAtStartupInSec temporary file 57 | file: 58 | path: /root/DisableFenceAtStartupInSec.txt 59 | state: absent 60 | when: he_restore_from_file is defined and he_restore_from_file 61 | - name: Restore original OvfUpdateIntervalInMinutes 62 | shell: "engine-config -s OvfUpdateIntervalInMinutes=$(cat /root/OvfUpdateIntervalInMinutes.txt)" 63 | environment: "{{ he_cmd_lang }}" 64 | changed_when: true 65 | - name: Remove OvfUpdateIntervalInMinutes temporary file 66 | file: 67 | path: /root/OvfUpdateIntervalInMinutes.txt 68 | state: absent 69 | changed_when: true 70 | - name: Restore original SSO_ALTERNATE_ENGINE_FQDNS 71 | block: 72 | - name: Removing temporary value 73 | lineinfile: 74 | path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf 75 | regexp: '^SSO_ALTERNATE_ENGINE_FQDNS=.* # hosted-engine-setup' 76 | state: absent 77 | - name: Restoring original value 78 | replace: 79 | path: /etc/ovirt-engine/engine.conf.d/11-setup-sso.conf 80 | regexp: '^#(SSO_ALTERNATE_ENGINE_FQDNS=.*) # pre hosted-engine-setup' 81 | replace: '\1' 82 | - name: Remove temporary directory for ansible as postgres user 83 | file: 84 | path: /var/lib/pgsql/.ansible 85 | state: absent 86 | - name: Configure PermitRootLogin for sshd to its final value 87 | lineinfile: 88 | dest: /etc/ssh/sshd_config 89 | regexp: "^\\s*PermitRootLogin" 90 | line: "PermitRootLogin {{ he_root_ssh_access }}" 91 | state: present 92 | -------------------------------------------------------------------------------- /tasks/create_target_vm/03_hosted_engine_final_tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Hosted-Engine final tasks 3 | block: 4 | - name: Choose IPv4, IPv6 or auto 5 | import_tasks: ipv_switch.yml 6 | - name: Trigger hosted engine OVF update and enable the serial console 7 | ovirt_vm: 8 | id: "{{ he_vm_details.vm.id }}" 9 | description: "Hosted engine VM" 10 | serial_console: true 11 | auth: "{{ ovirt_auth }}" 12 | - name: Wait until OVF update finishes 13 | ovirt_storage_domain_info: 14 | auth: "{{ ovirt_auth }}" 15 | fetch_nested: true 16 | nested_attributes: 17 | - name 18 | - image_id 19 | - id 20 | pattern: "name={{ he_storage_domain_name }}" 21 | retries: 12 22 | delay: 10 23 | register: storage_domain_details 24 | until: "storage_domain_details.ovirt_storage_domains[0].disks | selectattr('name', 'match', '^OVF_STORE$') | list" 25 | - debug: var=storage_domain_details 26 | - name: Parse OVF_STORE disk list 27 | set_fact: 28 | ovf_store_disks: >- 29 | {{ storage_domain_details.ovirt_storage_domains[0].disks | 30 | selectattr('name', 'match', '^OVF_STORE$') | list }} 31 | - debug: var=ovf_store_disks 32 | - name: Check OVF_STORE volume status 33 | command: >- 34 | vdsm-client Volume getInfo storagepoolID={{ datacenter_id }} 35 | storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }} 36 | imageID={{ item.id }} volumeID={{ item.image_id }} 37 | environment: "{{ he_cmd_lang }}" 38 | changed_when: true 39 | register: ovf_store_status 40 | retries: 12 41 | delay: 10 42 | until: >- 43 | ovf_store_status.rc == 0 and ovf_store_status.stdout|from_json|json_query('status') == 'OK' and 44 | ovf_store_status.stdout|from_json|json_query('description')|from_json|json_query('Updated') 45 | with_items: "{{ ovf_store_disks }}" 46 | - debug: var=ovf_store_status 47 | - name: Wait for OVF_STORE disk content 48 | shell: >- 49 | vdsm-client Image prepare storagepoolID={{ datacenter_id }} 50 | storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }} imageID={{ item.id }} 51 | volumeID={{ item.image_id }} | grep path | awk '{ print $2 }' | 52 | xargs -I{} sudo -u vdsm dd if={} | tar -tvf - {{ he_vm_details.vm.id }}.ovf 53 | environment: "{{ he_cmd_lang }}" 54 | changed_when: true 55 | register: ovf_store_content 56 | retries: 12 57 | delay: 10 58 | until: ovf_store_content.rc == 0 59 | with_items: "{{ ovf_store_disks }}" 60 | args: 61 | warn: false 62 | - name: Prepare images 63 | command: >- 64 | vdsm-client Image prepare storagepoolID={{ datacenter_id }} 65 | storagedomainID={{ storage_domain_details.ovirt_storage_domains[0].id }} 66 | imageID={{ item.disk.id }} volumeID={{ item.disk.image_id }} 67 | environment: "{{ he_cmd_lang }}" 68 | with_items: 69 | - "{{ he_virtio_disk_details }}" 70 | - "{{ he_conf_disk_details }}" 71 | - "{{ he_metadata_disk_details }}" 72 | - "{{ he_sanlock_disk_details }}" 73 | register: prepareimage_results 74 | changed_when: true 75 | - debug: var=prepareimage_results 76 | - name: Fetch Hosted Engine configuration disk path 77 | set_fact: 78 | he_conf_disk_path: >- 79 | {{ (prepareimage_results.results|json_query("[?item.id=='" + 80 | he_conf_disk_details.id + "'].stdout")|first|from_json).path }} 81 | - name: Fetch Hosted Engine virtio disk path 82 | set_fact: 83 | he_virtio_disk_path: >- 84 | {{ (prepareimage_results.results|json_query("[?item.id=='" + 85 | he_virtio_disk_details.id + "'].stdout")|first|from_json).path }} 86 | - name: Fetch Hosted Engine virtio metadata path 87 | set_fact: 88 | he_metadata_disk_path: >- 89 | {{ (prepareimage_results.results|json_query("[?item.id=='" + 90 | he_metadata_disk_details.id + "'].stdout")|first|from_json).path }} 91 | - debug: var=he_conf_disk_path 92 | - debug: var=he_virtio_disk_path 93 | - debug: var=he_metadata_disk_path 94 | - name: Shutdown local VM 95 | command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} shutdown {{ he_vm_name }}Local" 96 | environment: "{{ he_cmd_lang }}" 97 | - name: Wait for local VM shutdown 98 | command: virsh -r domstate "{{ he_vm_name }}Local" 99 | environment: "{{ he_cmd_lang }}" 100 | changed_when: true 101 | register: dominfo_out 102 | until: dominfo_out.rc == 0 and 'shut off' in dominfo_out.stdout 103 | retries: 120 104 | delay: 5 105 | - debug: var=dominfo_out 106 | - name: Undefine local VM 107 | command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} undefine {{ he_vm_name }}Local" 108 | environment: "{{ he_cmd_lang }}" 109 | - name: Update libvirt default network configuration, destroy 110 | command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} net-destroy default" 111 | environment: "{{ he_cmd_lang }}" 112 | - name: Update libvirt default network configuration, undefine 113 | command: "virsh -c qemu:///system?authfile={{ he_libvirt_authfile }} net-undefine default" 114 | environment: "{{ he_cmd_lang }}" 115 | ignore_errors: true 116 | - name: Detect ovirt-hosted-engine-ha version 117 | command: >- 118 | {{ ansible_python.executable }} -c 119 | 'from ovirt_hosted_engine_ha.agent import constants as agentconst; print(agentconst.PACKAGE_VERSION)' 120 | environment: "{{ he_cmd_lang }}" 121 | register: ha_version_out 122 | changed_when: true 123 | - name: Set ha_version 124 | set_fact: ha_version="{{ ha_version_out.stdout_lines|first }}" 125 | - debug: var=ha_version 126 | - name: Create configuration templates 127 | template: 128 | src: "{{ item.src }}" 129 | dest: "{{ item.dest }}" 130 | mode: 0644 131 | with_items: 132 | - {src: templates/vm.conf.j2, dest: "{{ he_local_vm_dir }}/vm.conf"} 133 | - {src: templates/broker.conf.j2, dest: "{{ he_local_vm_dir }}/broker.conf"} 134 | - {src: templates/version.j2, dest: "{{ he_local_vm_dir }}/version"} 135 | - {src: templates/fhanswers.conf.j2, dest: "{{ he_local_vm_dir }}/fhanswers.conf"} 136 | - {src: templates/hosted-engine.conf.j2, dest: "{{ he_local_vm_dir }}/hosted-engine.conf"} 137 | - name: Create configuration archive 138 | command: >- 139 | tar --record-size=20480 -cvf {{ he_conf_disk_details.disk.image_id }} 140 | vm.conf broker.conf version fhanswers.conf hosted-engine.conf 141 | environment: "{{ he_cmd_lang }}" 142 | args: 143 | chdir: "{{ he_local_vm_dir }}" 144 | warn: false 145 | changed_when: true 146 | tags: ['skip_ansible_lint'] 147 | - name: Create ovirt-hosted-engine-ha run directory 148 | file: 149 | path: /var/run/ovirt-hosted-engine-ha 150 | state: directory 151 | - name: Copy configuration files to the right location on host 152 | copy: 153 | remote_src: true 154 | src: "{{ item.src }}" 155 | dest: "{{ item.dest }}" 156 | mode: 0644 157 | with_items: 158 | - {src: "{{ he_local_vm_dir }}/vm.conf", dest: /var/run/ovirt-hosted-engine-ha} 159 | - {src: "{{ he_local_vm_dir }}/hosted-engine.conf", dest: /etc/ovirt-hosted-engine/} 160 | - name: Copy configuration archive to storage 161 | command: >- 162 | dd bs=20480 count=1 oflag=direct if="{{ he_local_vm_dir }}/{{ he_conf_disk_details.disk.image_id }}" 163 | of="{{ he_conf_disk_path }}" 164 | environment: "{{ he_cmd_lang }}" 165 | become: true 166 | become_user: vdsm 167 | become_method: sudo 168 | changed_when: true 169 | args: 170 | warn: false 171 | - name: Initialize metadata volume 172 | command: dd bs=1M count=1024 oflag=direct if=/dev/zero of="{{ he_metadata_disk_path }}" 173 | environment: "{{ he_cmd_lang }}" 174 | become: true 175 | become_user: vdsm 176 | become_method: sudo 177 | changed_when: true 178 | - include_tasks: get_local_vm_disk_path.yml 179 | - name: Generate DHCP network configuration for the engine VM 180 | template: 181 | src: templates/ifcfg-eth0-dhcp.j2 182 | dest: "{{ he_local_vm_dir }}/ifcfg-eth0" 183 | owner: root 184 | group: root 185 | mode: 0644 186 | when: he_vm_ip_addr is none 187 | - name: Generate static network configuration for the engine VM, IPv4 188 | template: 189 | src: templates/ifcfg-eth0-static.j2 190 | dest: "{{ he_local_vm_dir }}/ifcfg-eth0" 191 | owner: root 192 | group: root 193 | mode: 0644 194 | when: he_vm_ip_addr is not none and he_vm_ip_addr | ipv4 195 | - name: Generate static network configuration for the engine VM, IPv6 196 | template: 197 | src: templates/ifcfg-eth0-static-ipv6.j2 198 | dest: "{{ he_local_vm_dir }}/ifcfg-eth0" 199 | owner: root 200 | group: root 201 | mode: 0644 202 | when: he_vm_ip_addr is not none and he_vm_ip_addr | ipv6 203 | - name: Inject network configuration with guestfish 204 | command: >- 205 | guestfish -a {{ local_vm_disk_path }} --rw -i copy-in "{{ he_local_vm_dir }}/ifcfg-eth0" 206 | /etc/sysconfig/network-scripts {{ ":" }} selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts 207 | /etc/sysconfig/network-scripts/ifcfg-eth0 force{{ ":" }}true 208 | environment: 209 | LIBGUESTFS_BACKEND: direct 210 | LANG: en_US.UTF-8 211 | LC_MESSAGES: en_US.UTF-8 212 | LC_ALL: en_US.UTF-8 213 | changed_when: true 214 | - name: Extract /etc/hosts from the Hosted Engine VM 215 | command: virt-copy-out -a {{ local_vm_disk_path }} /etc/hosts "{{ he_local_vm_dir }}" 216 | environment: 217 | LIBGUESTFS_BACKEND: direct 218 | LANG: en_US.UTF-8 219 | LC_MESSAGES: en_US.UTF-8 220 | LC_ALL: en_US.UTF-8 221 | changed_when: true 222 | - name: Clean /etc/hosts for the Hosted Engine VM for Engine VM FQDN 223 | lineinfile: 224 | dest: "{{ he_local_vm_dir }}/hosts" 225 | regexp: "# hosted-engine-setup-{{ hostvars[he_ansible_host_name]['he_local_vm_dir'] }}$" 226 | state: absent 227 | - name: Add an entry on /etc/hosts for the Hosted Engine VM for the VM itself 228 | lineinfile: 229 | dest: "{{ he_local_vm_dir }}/hosts" 230 | line: "{{ he_vm_ip_addr }} {{ he_fqdn }}" 231 | state: present 232 | when: he_vm_etc_hosts and he_vm_ip_addr is not none 233 | - name: Clean /etc/hosts for the Hosted Engine VM for host address 234 | lineinfile: 235 | dest: "{{ he_local_vm_dir }}/hosts" 236 | line: "{{ he_host_ip }} {{ he_host_address }}" 237 | state: absent 238 | when: not he_vm_etc_hosts 239 | - name: Inject /etc/hosts with guestfish 240 | command: >- 241 | guestfish -a {{ local_vm_disk_path }} --rw -i copy-in "{{ he_local_vm_dir }}/hosts" 242 | /etc {{ ":" }} selinux-relabel /etc/selinux/targeted/contexts/files/file_contexts 243 | /etc/hosts force{{ ":" }}true 244 | environment: 245 | LIBGUESTFS_BACKEND: direct 246 | LANG: en_US.UTF-8 247 | LC_MESSAGES: en_US.UTF-8 248 | LC_ALL: en_US.UTF-8 249 | changed_when: true 250 | - name: Copy local VM disk to shared storage 251 | command: >- 252 | qemu-img convert -f qcow2 -O raw -t none -T none {{ local_vm_disk_path }} {{ he_virtio_disk_path }} 253 | environment: "{{ he_cmd_lang }}" 254 | become: true 255 | become_user: vdsm 256 | become_method: sudo 257 | changed_when: true 258 | - name: Verify copy of VM disk 259 | command: qemu-img compare {{ local_vm_disk_path }} {{ he_virtio_disk_path }} 260 | environment: "{{ he_cmd_lang }}" 261 | become: true 262 | become_user: vdsm 263 | become_method: sudo 264 | changed_when: true 265 | when: he_debug_mode|bool 266 | - name: Remove temporary entry in /etc/hosts for the local VM 267 | lineinfile: 268 | dest: /etc/hosts 269 | regexp: "# temporary entry added by hosted-engine-setup for the bootstrap VM$" 270 | state: absent 271 | - name: Start ovirt-ha-broker service on the host 272 | service: 273 | name: ovirt-ha-broker 274 | state: started 275 | enabled: true 276 | - name: Initialize lockspace volume 277 | command: hosted-engine --reinitialize-lockspace --force 278 | environment: "{{ he_cmd_lang }}" 279 | register: result 280 | until: result.rc == 0 281 | ignore_errors: true 282 | retries: 5 283 | delay: 10 284 | changed_when: true 285 | - debug: var=result 286 | - block: 287 | - name: Workaround for ovirt-ha-broker start failures 288 | # Ugly workaround for https://bugzilla.redhat.com/1768511 289 | # fix it on ovirt-ha-broker side and remove ASAP 290 | systemd: 291 | state: restarted 292 | enabled: true 293 | name: ovirt-ha-broker 294 | - name: Initialize lockspace volume 295 | command: hosted-engine --reinitialize-lockspace --force 296 | environment: "{{ he_cmd_lang }}" 297 | register: result2 298 | until: result2.rc == 0 299 | retries: 5 300 | delay: 10 301 | changed_when: true 302 | - debug: var=result2 303 | when: result.rc != 0 304 | - name: Start ovirt-ha-agent service on the host 305 | service: 306 | name: ovirt-ha-agent 307 | state: started 308 | enabled: true 309 | - name: Exit HE maintenance mode 310 | command: hosted-engine --set-maintenance --mode=none 311 | environment: "{{ he_cmd_lang }}" 312 | register: mresult 313 | until: mresult.rc == 0 314 | retries: 3 315 | delay: 10 316 | changed_when: true 317 | - debug: var=mresult 318 | - name: Wait for the engine to come up on the target VM 319 | block: 320 | - name: Check engine VM health 321 | command: hosted-engine --vm-status --json 322 | environment: "{{ he_cmd_lang }}" 323 | register: health_result 324 | until: >- 325 | health_result.rc == 0 and 'health' in health_result.stdout and 326 | health_result.stdout|from_json|json_query('*."engine-status"."health"')|first=="good" 327 | retries: 180 328 | delay: 5 329 | changed_when: true 330 | - debug: var=health_result 331 | rescue: 332 | - name: Check VM status at virt level 333 | shell: virsh -r list | grep {{ he_vm_name }} | grep running 334 | environment: "{{ he_cmd_lang }}" 335 | ignore_errors: true 336 | changed_when: true 337 | register: vm_status_virsh 338 | - debug: var=vm_status_virsh 339 | - name: Fail if engine VM is not running 340 | fail: 341 | msg: "Engine VM is not running, please check vdsm logs" 342 | when: vm_status_virsh.rc != 0 343 | - name: Get target engine VM IP address 344 | shell: getent {{ ip_key }} {{ he_fqdn }} | cut -d' ' -f1 | uniq 345 | environment: "{{ he_cmd_lang }}" 346 | register: engine_vm_ip 347 | changed_when: true 348 | - name: Get VDSM's target engine VM stats 349 | command: vdsm-client VM getStats vmID={{ he_vm_details.vm.id }} 350 | environment: "{{ he_cmd_lang }}" 351 | register: engine_vdsm_stats 352 | changed_when: true 353 | - name: Convert stats to JSON format 354 | set_fact: json_stats={{ engine_vdsm_stats.stdout|from_json }} 355 | - name: Get target engine VM IP address from VDSM stats 356 | set_fact: engine_vm_ip_vdsm={{ json_stats[0].guestIPs }} 357 | - debug: var=engine_vm_ip_vdsm 358 | - name: Fail if Engine IP is different from engine's he_fqdn resolved IP 359 | fail: 360 | msg: >- 361 | Engine VM IP address is {{ engine_vm_ip_vdsm }} while the engine's he_fqdn {{ he_fqdn }} resolves to 362 | {{ engine_vm_ip.stdout_lines[0] }}. If you are using DHCP, check your DHCP reservation configuration 363 | when: engine_vm_ip_vdsm != engine_vm_ip.stdout_lines[0] 364 | - name: Fail is for any other reason the engine didn't started 365 | fail: 366 | msg: The engine failed to start inside the engine VM; please check engine.log. 367 | - name: Get target engine VM address 368 | shell: getent {{ ip_key }} {{ he_fqdn }} | cut -d' ' -f1 | uniq 369 | environment: "{{ he_cmd_lang }}" 370 | register: engine_vm_ip 371 | when: engine_vm_ip is not defined 372 | changed_when: true 373 | # Workaround for ovn-central being configured with the address of the bootstrap VM. 374 | # Keep this aligned with: 375 | # https://github.com/oVirt/ovirt-engine/blob/master/packaging/playbooks/roles/ovirt-provider-ovn-driver/tasks/main.yml 376 | - name: Reconfigure OVN central address 377 | command: vdsm-tool ovn-config {{ engine_vm_ip.stdout_lines[0] }} {{ he_mgmt_network }} 378 | environment: "{{ he_cmd_lang }}" 379 | changed_when: true 380 | # Workaround for https://bugzilla.redhat.com/1540107 381 | # the engine fails deleting a VM if its status in the engine DB 382 | # is not up to date. 383 | - include_tasks: auth_sso.yml 384 | - name: Check for the local bootstrap VM 385 | ovirt_vm_info: 386 | pattern: id="{{ external_local_vm_uuid.stdout_lines|first }}" 387 | auth: "{{ ovirt_auth }}" 388 | register: local_vm_f 389 | - name: Remove the bootstrap local VM 390 | block: 391 | - name: Make the engine aware that the external VM is stopped 392 | ignore_errors: true 393 | ovirt_vm: 394 | state: stopped 395 | id: "{{ external_local_vm_uuid.stdout_lines|first }}" 396 | auth: "{{ ovirt_auth }}" 397 | register: vmstop_result 398 | - debug: var=vmstop_result 399 | - name: Wait for the local bootstrap VM to be down at engine eyes 400 | ovirt_vm_info: 401 | pattern: id="{{ external_local_vm_uuid.stdout_lines|first }}" 402 | auth: "{{ ovirt_auth }}" 403 | register: local_vm_status 404 | until: local_vm_status.ovirt_vms[0].status == "down" 405 | retries: 24 406 | delay: 5 407 | - debug: var=local_vm_status 408 | - name: Remove bootstrap external VM from the engine 409 | ovirt_vm: 410 | state: absent 411 | id: "{{ external_local_vm_uuid.stdout_lines|first }}" 412 | auth: "{{ ovirt_auth }}" 413 | register: vmremove_result 414 | - debug: var=vmremove_result 415 | when: local_vm_f.ovirt_vms|length > 0 416 | - name: Remove ovirt-engine-appliance rpm 417 | yum: 418 | name: ovirt-engine-appliance 419 | state: absent 420 | register: yum_result 421 | until: yum_result is success 422 | retries: 10 423 | delay: 5 424 | when: he_remove_appliance_rpm|bool 425 | 426 | - name: Include custom tasks for after setup customization 427 | include_tasks: "{{ item }}" 428 | with_fileglob: "hooks/after_setup/*.yml" 429 | register: after_setup_results 430 | - debug: var=after_setup_results 431 | -------------------------------------------------------------------------------- /tasks/fc_getdevices.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: auth_sso.yml 3 | - name: Get Fibre Channel LUNs 4 | ovirt_host_storage_info: 5 | host: "{{ he_host_name }}" 6 | fcp: 7 | lun_id: -1 # currently it is unused and I use it to turn on FC filtering 8 | auth: "{{ ovirt_auth }}" 9 | register: otopi_fc_devices 10 | - debug: var=otopi_fc_devices 11 | ... 12 | -------------------------------------------------------------------------------- /tasks/fetch_engine_logs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set destination directory path 3 | set_fact: 4 | destdir=/var/log/ovirt-hosted-engine-setup/engine-logs-{{ ansible_date_time.iso8601 }}/ 5 | - name: Create destination directory 6 | file: 7 | state: directory 8 | path: "{{ destdir }}" 9 | owner: root 10 | group: root 11 | mode: 0700 12 | - include_tasks: get_local_vm_disk_path.yml 13 | - name: Give the vm time to flush dirty buffers 14 | wait_for: 15 | timeout: 10 16 | delegate_to: localhost 17 | become: false 18 | - name: Copy engine logs 19 | command: virt-copy-out -a {{ local_vm_disk_path }} {{ item }} {{ destdir }} 20 | environment: 21 | LIBGUESTFS_BACKEND: direct 22 | LANG: en_US.UTF-8 23 | LC_MESSAGES: en_US.UTF-8 24 | LC_ALL: en_US.UTF-8 25 | ignore_errors: true 26 | changed_when: true 27 | with_items: 28 | - /var/log/ovirt-engine 29 | - /var/log/messages 30 | when: local_vm_disk_path is defined 31 | -------------------------------------------------------------------------------- /tasks/fetch_host_ip.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get full hostname 3 | command: hostname -f 4 | changed_when: true 5 | register: host_full_name 6 | - name: Set hostname variable if not defined 7 | set_fact: 8 | he_host_name: "{{ host_full_name.stdout_lines[0] }}" 9 | when: he_host_name is none 10 | - debug: var=he_host_name 11 | - name: Define host address variable if not defined 12 | set_fact: 13 | he_host_address: "{{ host_full_name.stdout_lines[0] }}" 14 | when: he_host_address is none 15 | - debug: var=he_host_address 16 | 17 | - name: Get host IP address 18 | block: 19 | - name: Choose IPv4, IPv6 or auto 20 | import_tasks: ipv_switch.yml 21 | - name: Get host address resolution 22 | shell: getent {{ ip_key }} {{ he_host_address }} | grep STREAM 23 | register: hostname_resolution_output 24 | changed_when: true 25 | ignore_errors: true 26 | - debug: var=hostname_resolution_output 27 | - name: Check address resolution 28 | fail: 29 | msg: > 30 | Unable to resolve address 31 | when: hostname_resolution_output.rc != 0 32 | - name: Parse host address resolution 33 | set_fact: 34 | he_host_ip: "{{ 35 | ( 36 | hostname_resolution_output.stdout.split() | ipaddr | 37 | difference(hostname_resolution_output.stdout.split() | 38 | ipaddr('link-local') 39 | ) 40 | )[0] 41 | }}" 42 | - debug: var=he_host_ip 43 | 44 | - name: Fail if host's ip is empty 45 | fail: 46 | msg: Host has no IP address 47 | when: he_host_ip is none 48 | -------------------------------------------------------------------------------- /tasks/filter_team_devices.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Collect interface types 3 | shell: set -euo pipefail && nmcli -g GENERAL.TYPE device show {{ nic }} 4 | with_items: 5 | - "{{ host_net }}" 6 | loop_control: 7 | loop_var: nic 8 | changed_when: true 9 | register: interface_types 10 | - debug: var=interface_types 11 | - name: Check for Team devices 12 | set_fact: 13 | is_team: "{{ nic_if.stdout.find('team') > 0 }}" 14 | when: nic_if.stdout.find('team') != -1 15 | with_items: 16 | - "{{ interface_types.results }}" 17 | loop_control: 18 | loop_var: nic_if 19 | register: team_list 20 | - debug: var=team_list 21 | - name: Get list of Team devices 22 | set_fact: 23 | team_if: "{{ team_list.results | reject('skipped') | map(attribute='nic_if.nic') | list }}" 24 | - debug: var=team_if 25 | - name: Filter unsupported interface types 26 | set_fact: 27 | otopi_host_net: "{{ host_net | difference(team_if) }}" 28 | register: otopi_host_net 29 | - debug: var=otopi_host_net 30 | - name: Failed if only teaming devices are availible 31 | fail: 32 | msg: >- 33 | Only Team devices {{ team_if | join(', ') }} are present. 34 | Teaming is not supported. 35 | when: (otopi_host_net.ansible_facts.otopi_host_net | length == 0) 36 | -------------------------------------------------------------------------------- /tasks/final_clean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Clean temporary resources 3 | block: 4 | - name: Fetch logs from the engine VM 5 | include_tasks: fetch_engine_logs.yml 6 | ignore_errors: true 7 | - include_tasks: clean_localvm_dir.yml 8 | - name: Clean local storage pools 9 | include_tasks: clean_local_storage_pools.yml 10 | ignore_errors: true 11 | ... 12 | -------------------------------------------------------------------------------- /tasks/full_execution.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install packages and bootstrap local engine VM 3 | block: 4 | - name: Install required packages for oVirt Hosted Engine deployment 5 | import_tasks: install_packages.yml 6 | 7 | - name: System configuration validations 8 | include_tasks: "{{ item }}" 9 | with_fileglob: "pre_checks/*.yml" 10 | 11 | - name: Clean environment before deployment 12 | import_tasks: initial_clean.yml 13 | 14 | - name: 01_02 bootstrap local vm tasks 15 | block: 16 | - name: 01 Bootstrap local VM 17 | import_tasks: bootstrap_local_vm/01_prepare_routing_rules.yml 18 | 19 | - name: 02 Bootstrap local VM 20 | import_tasks: bootstrap_local_vm/02_create_local_vm.yml 21 | 22 | - name: Local engine VM installation - Pre tasks 23 | block: 24 | - name: 03 Bootstrap local VM 25 | import_tasks: bootstrap_local_vm/03_engine_initial_tasks.yml 26 | delegate_to: "{{ groups.engine[0] }}" 27 | 28 | - name: Engine Setup on local VM 29 | block: 30 | - name: Engine Setup on local VM 31 | vars: 32 | ovirt_engine_setup_hostname: "{{ he_fqdn.split('.')[0] }}" 33 | ovirt_engine_setup_organization: "{{ he_cloud_init_domain_name }}" 34 | ovirt_engine_setup_dwh_db_host: "{{ he_fqdn.split('.')[0] }}" 35 | ovirt_engine_setup_firewall_manager: null 36 | ovirt_engine_setup_answer_file_path: /root/ovirt-engine-answers 37 | ovirt_engine_setup_use_remote_answer_file: true 38 | ovirt_engine_setup_offline: "{{ he_offline_deployment }}" 39 | ovirt_engine_setup_package_list: "{{ he_additional_package_list }}" 40 | ovirt_engine_setup_admin_password: "{{ he_admin_password }}" 41 | import_role: 42 | name: ovirt.engine-setup 43 | delegate_to: "{{ groups.engine[0] }}" 44 | 45 | - name: Local engine VM installation - Post tasks 46 | block: 47 | - name: 04 Bootstrap local VM 48 | import_tasks: bootstrap_local_vm/04_engine_final_tasks.yml 49 | delegate_to: "{{ groups.engine[0] }}" 50 | 51 | - name: Configure engine VM on a storage domain 52 | block: 53 | - name: 05 Bootstrap local VM 54 | import_tasks: bootstrap_local_vm/05_add_host.yml 55 | - name: Create Storage Domain 56 | import_tasks: create_storage_domain.yml 57 | - name: Create target hosted engine vm 58 | import_tasks: create_target_vm/01_create_target_hosted_engine_vm.yml 59 | 60 | - name: Configure database settings 61 | import_tasks: create_target_vm/02_engine_vm_configuration.yml 62 | delegate_to: "{{ groups.engine[0] }}" 63 | 64 | - name: Closeup 65 | block: 66 | - name: Hosted engine final tasks 67 | import_tasks: create_target_vm/03_hosted_engine_final_tasks.yml 68 | - name: Sync on engine machine 69 | command: sync 70 | changed_when: true 71 | delegate_to: "{{ groups.engine[0] }}" 72 | - name: Final clean 73 | import_tasks: final_clean.yml 74 | -------------------------------------------------------------------------------- /tasks/get_local_vm_disk_path.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Find the local appliance image 3 | find: 4 | paths: "{{ he_local_vm_dir }}/images" 5 | recurse: true 6 | patterns: ^.*.(? 0 14 | -------------------------------------------------------------------------------- /tasks/initial_clean.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: initial clean 3 | tags: he_initial_clean 4 | block: 5 | - name: Stop libvirt service 6 | service: 7 | name: libvirtd 8 | state: stopped 9 | enabled: true 10 | - name: Drop vdsm config statements 11 | command: >- 12 | sed -i 13 | '/## beginning of configuration section by 14 | vdsm-4.[0-9]\+.[0-9]\+/,/## end of configuration section by vdsm-4.[0-9]\+.[0-9]\+/d' {{ item }} 15 | environment: "{{ he_cmd_lang }}" 16 | args: 17 | warn: false 18 | with_items: 19 | - /etc/libvirt/libvirtd.conf 20 | - /etc/libvirt/qemu.conf 21 | - /etc/libvirt/qemu-sanlock.conf 22 | - /etc/sysconfig/libvirtd 23 | tags: ['skip_ansible_lint'] 24 | - name: Restore initial abrt config files 25 | copy: 26 | remote_src: true 27 | src: "{{ item.src }}" 28 | dest: "{{ item.dest }}" 29 | with_items: 30 | - { 31 | src: /usr/share/abrt/conf.d/abrt-action-save-package-data.conf, 32 | dest: /etc/abrt/abrt-action-save-package-data.conf 33 | } 34 | - { 35 | src: /usr/share/abrt/conf.d/abrt.conf, 36 | dest: /etc/abrt/abrt.conf 37 | } 38 | - { 39 | src: /usr/share/abrt/conf.d/plugins/CCpp.conf, 40 | dest: /etc/abrt/plugins/CCpp.conf 41 | } 42 | - { 43 | src: /usr/share/abrt/conf.d/plugins/vmcore.conf, 44 | dest: /etc/abrt/plugins/vmcore.conf 45 | } 46 | - name: Restart abrtd service 47 | service: 48 | name: abrtd 49 | state: restarted 50 | - name: Drop libvirt sasl2 configuration by vdsm 51 | command: >- 52 | sed -i '/## start vdsm-4.[0-9]\+.[0-9]\+ configuration/,/## end vdsm configuration/d' /etc/sasl2/libvirt.conf 53 | environment: "{{ he_cmd_lang }}" 54 | args: 55 | warn: false 56 | tags: ['skip_ansible_lint'] 57 | - name: Stop and disable services 58 | service: 59 | name: "{{ item }}" 60 | state: stopped 61 | enabled: false 62 | with_items: 63 | - ovirt-ha-agent 64 | - ovirt-ha-broker 65 | - vdsmd 66 | - libvirtd-tls.socket 67 | - name: Restore initial libvirt default network configuration 68 | copy: 69 | remote_src: true 70 | src: /usr/share/libvirt/networks/default.xml 71 | dest: /etc/libvirt/qemu/networks/default.xml 72 | - name: Start libvirt 73 | service: 74 | name: libvirtd 75 | state: started 76 | enabled: true 77 | - name: Check for leftover local Hosted Engine VM 78 | shell: virsh list | grep {{ he_vm_name }}Local | cat 79 | environment: "{{ he_cmd_lang }}" 80 | changed_when: true 81 | register: local_vm_list 82 | - name: Destroy leftover local Hosted Engine VM 83 | command: virsh destroy {{ he_vm_name }}Local 84 | environment: "{{ he_cmd_lang }}" 85 | ignore_errors: true 86 | when: local_vm_list.stdout_lines|length >= 1 87 | - name: Check for leftover defined local Hosted Engine VM 88 | shell: virsh list --all | grep {{ he_vm_name }}Local | cat 89 | environment: "{{ he_cmd_lang }}" 90 | changed_when: true 91 | register: local_vm_list_all 92 | - name: Undefine leftover local engine VM 93 | command: virsh undefine --managed-save {{ he_vm_name }}Local 94 | environment: "{{ he_cmd_lang }}" 95 | when: local_vm_list_all.stdout_lines|length >= 1 96 | changed_when: true 97 | - name: Check for leftover defined Hosted Engine VM 98 | shell: virsh list --all | grep {{ he_vm_name }} | cat 99 | environment: "{{ he_cmd_lang }}" 100 | changed_when: true 101 | register: target_vm_list_all 102 | - name: Undefine leftover engine VM 103 | command: virsh undefine --managed-save {{ he_vm_name }} 104 | environment: "{{ he_cmd_lang }}" 105 | when: target_vm_list_all.stdout_lines|length >= 1 106 | changed_when: true 107 | - name: Remove eventually entries for the local VM from known_hosts file 108 | known_hosts: 109 | name: "{{ he_fqdn }}" 110 | state: absent 111 | delegate_to: localhost 112 | become: false 113 | ... 114 | -------------------------------------------------------------------------------- /tasks/install_appliance.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install ovirt-engine-appliance rpm 3 | yum: 4 | name: ovirt-engine-appliance 5 | state: present 6 | register: task_result 7 | until: task_result is success 8 | retries: 10 9 | delay: 2 10 | - name: Parse appliance configuration for path 11 | shell: set -euo pipefail && grep path /etc/ovirt-hosted-engine/10-appliance.conf | cut -f2 -d'=' 12 | environment: "{{ he_cmd_lang }}" 13 | register: he_appliance_ova_out 14 | changed_when: true 15 | - debug: var=he_appliance_ova_out 16 | - name: Parse appliance configuration for sha1sum 17 | shell: set -euo pipefail && grep sha1sum /etc/ovirt-hosted-engine/10-appliance.conf | cut -f2 -d'=' 18 | environment: "{{ he_cmd_lang }}" 19 | register: he_appliance_ova_sha1 20 | changed_when: true 21 | - debug: var=he_appliance_ova_sha1 22 | - name: Get OVA path 23 | set_fact: 24 | he_appliance_ova_path: "{{ he_appliance_ova_out.stdout_lines|first }}" 25 | cacheable: true 26 | - debug: var=he_appliance_ova_path 27 | - name: Compute sha1sum 28 | stat: 29 | path: "{{ he_appliance_ova_path }}" 30 | checksum_algorithm: sha1 31 | register: ova_stats 32 | - debug: var=ova_stats 33 | - name: Compare sha1sum 34 | fail: 35 | msg: "{{ he_appliance_ova_path }} is corrupted (sha1sum)" 36 | when: he_appliance_ova_sha1.stdout_lines|first != ova_stats.stat.checksum 37 | -------------------------------------------------------------------------------- /tasks/install_packages.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install oVirt Hosted Engine packages 3 | package: 4 | name: "ovirt-hosted-engine-setup" 5 | state: present 6 | register: task_result 7 | until: task_result is success 8 | retries: 10 9 | delay: 2 10 | -------------------------------------------------------------------------------- /tasks/ipv_switch.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Choose IPv4, IPv6 or auto 3 | block: 4 | - name: Fail if he_force_ip4 and he_force_ip6 are set at the same time 5 | fail: 6 | msg: he_force_ip4 and he_force_ip6 cannot be used at the same time 7 | when: he_force_ip4 and he_force_ip6 8 | - name: Prepare getent key 9 | set_fact: 10 | ip_key: "{{ 'ahostsv4' if he_force_ip4 else 'ahostsv6' if he_force_ip6 else 'ahosts' }}" 11 | when: ip_key is not defined 12 | -------------------------------------------------------------------------------- /tasks/iscsi_discover.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: auth_sso.yml 3 | - name: Prepare iSCSI parameters 4 | set_fact: 5 | iscsid: 6 | iscsi: 7 | address: "{{ he_iscsi_portal_addr }}" 8 | port: "{{ he_iscsi_portal_port }}" 9 | username: "{{ he_iscsi_discover_username }}" 10 | password: "{{ he_iscsi_discover_password }}" 11 | no_log: true 12 | - name: Fetch host facts 13 | ovirt_host_info: 14 | pattern: name={{ he_host_name }} 15 | auth: "{{ ovirt_auth }}" 16 | register: host_result 17 | until: host_result is succeeded and host_result.ovirt_hosts|length >= 1 18 | retries: 50 19 | delay: 10 20 | - debug: var=host_result 21 | - name: iSCSI discover with REST API 22 | uri: 23 | url: https://{{ he_fqdn }}/ovirt-engine/api/hosts/{{ host_result.ovirt_hosts[0].id }}/iscsidiscover 24 | validate_certs: false 25 | method: POST 26 | body: "{{ iscsid | to_json }}" 27 | return_content: true 28 | body_format: json 29 | status_code: 200 30 | headers: 31 | Content-Type: application/json 32 | Accept: application/json 33 | Authorization: "Basic {{ ('admin@internal' + ':' + he_admin_password ) | b64encode }}" 34 | register: otopi_iscsi_targets 35 | - debug: var=otopi_iscsi_targets 36 | # TODO: perform an iSCSI logout when viable, see: 37 | # https://bugzilla.redhat.com/show_bug.cgi?id=1535951 38 | # https://github.com/ansible/ansible/issues/35039 39 | ... 40 | -------------------------------------------------------------------------------- /tasks/iscsi_getdevices.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: auth_sso.yml 3 | - name: iSCSI login 4 | ovirt_host: 5 | name: "{{ he_host_name }}" 6 | state: iscsilogin 7 | timeout: 30 8 | iscsi: 9 | username: "{{ he_iscsi_username }}" 10 | password: "{{ he_iscsi_password }}" 11 | address: "{{ item.0 }}" 12 | port: "{{ item.1 }}" 13 | target: "{{ he_iscsi_target }}" 14 | auth: "{{ ovirt_auth }}" 15 | no_log: true 16 | ignore_errors: true 17 | # TODO: avoid the with_together loop once 18 | # https://github.com/ansible/ansible/issues/32640 got properly fixed 19 | with_together: 20 | - "{{ he_iscsi_portal_addr.split(',') }}" 21 | - "{{ he_iscsi_portal_port.split(',') if he_iscsi_portal_port is string else he_iscsi_portal_port }}" 22 | - name: Get iSCSI LUNs 23 | ovirt_host_storage_info: 24 | host: "{{ he_host_name }}" 25 | iscsi: 26 | username: "{{ he_iscsi_username }}" 27 | password: "{{ he_iscsi_password }}" 28 | address: "{{ he_iscsi_portal_addr.split(',')|first }}" 29 | port: "{{ he_iscsi_portal_port.split(',')|first if he_iscsi_portal_port is string else he_iscsi_portal_port }}" 30 | target: "{{ he_iscsi_target }}" 31 | auth: "{{ ovirt_auth }}" 32 | register: otopi_iscsi_devices 33 | - debug: var=otopi_iscsi_devices 34 | ... 35 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Deploy hosted-engine 3 | import_tasks: full_execution.yml 4 | tags: always 5 | 6 | - name: Execute just a specific set of steps 7 | include_tasks: partial_execution.yml 8 | tags: 9 | - initial_clean 10 | - final_clean 11 | - bootstrap_local_vm 12 | - create_storage_domain 13 | - create_target_vm 14 | - iscsi_discover 15 | - iscsi_getdevices 16 | - fc_getdevices 17 | - get_network_interfaces 18 | - validate_hostnames 19 | - never 20 | -------------------------------------------------------------------------------- /tasks/partial_execution.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Force facts gathering 3 | setup: 4 | tags: 5 | - initial_clean 6 | - final_clean 7 | - bootstrap_local_vm 8 | - create_storage_domain 9 | - create_target_vm 10 | - iscsi_discover 11 | - iscsi_getdevices 12 | - fc_getdevices 13 | - get_network_interfaces 14 | - validate_hostnames 15 | - never 16 | 17 | 18 | - name: Initial validations and cleanups 19 | block: 20 | - name: Install required packages for oVirt Hosted Engine deployment 21 | import_tasks: install_packages.yml 22 | 23 | - name: System configuration validations 24 | include_tasks: "{{ item }}" 25 | with_fileglob: "pre_checks/*.yml" 26 | 27 | - name: Clean environment before deployment 28 | import_tasks: initial_clean.yml 29 | tags: [initial_clean, bootstrap_local_vm, never] 30 | 31 | 32 | - name: Bootstrap local engine VM 33 | block: 34 | - name: Bootstrap local engine VM 35 | block: 36 | - name: 01 Bootstrap local VM 37 | import_tasks: bootstrap_local_vm/01_prepare_routing_rules.yml 38 | 39 | - name: 02 Bootstrap local VM 40 | import_tasks: bootstrap_local_vm/02_create_local_vm.yml 41 | 42 | - name: Local engine VM installation - Pre tasks 43 | block: 44 | - name: 03 Bootstrap local VM 45 | import_tasks: bootstrap_local_vm/03_engine_initial_tasks.yml 46 | delegate_to: "{{ groups.engine[0] }}" 47 | 48 | - name: Engine Setup on local VM 49 | vars: 50 | ovirt_engine_setup_hostname: "{{ he_fqdn.split('.')[0] }}" 51 | ovirt_engine_setup_organization: "{{ he_cloud_init_domain_name }}" 52 | ovirt_engine_setup_dwh_db_host: "{{ he_fqdn.split('.')[0] }}" 53 | ovirt_engine_setup_firewall_manager: null 54 | ovirt_engine_setup_answer_file_path: /root/ovirt-engine-answers 55 | ovirt_engine_setup_use_remote_answer_file: true 56 | ovirt_engine_setup_offline: "{{ he_offline_deployment }}" 57 | ovirt_engine_setup_package_list: "{{ he_additional_package_list }}" 58 | ovirt_engine_setup_admin_password: "{{ he_admin_password }}" 59 | import_role: 60 | name: ovirt.engine-setup 61 | delegate_to: "{{ groups.engine[0] }}" 62 | 63 | - name: Local engine VM installation - Post tasks 64 | block: 65 | - name: 04 Bootstrap local VM 66 | import_tasks: bootstrap_local_vm/04_engine_final_tasks.yml 67 | delegate_to: "{{ groups.engine[0] }}" 68 | 69 | - name: Add first HE host 70 | block: 71 | - name: 05 Bootstrap local VM 72 | import_tasks: bootstrap_local_vm/05_add_host.yml 73 | tags: [bootstrap_local_vm, never] 74 | 75 | 76 | - name: Create hosted-engine storage domain 77 | block: 78 | - name: Create Storage Domain 79 | import_tasks: create_storage_domain.yml 80 | tags: [create_storage_domain, never] 81 | 82 | 83 | - name: Create and configure target VM 84 | block: 85 | - name: Fetch host IP address 86 | import_tasks: fetch_host_ip.yml 87 | 88 | - name: Create target hosted engine vm 89 | import_tasks: create_target_vm/01_create_target_hosted_engine_vm.yml 90 | 91 | - name: Configure database settings 92 | import_tasks: create_target_vm/02_engine_vm_configuration.yml 93 | delegate_to: "{{ groups.engine[0] }}" 94 | tags: [create_target_vm, never] 95 | 96 | 97 | - name: Hosted engine final tasks 98 | import_tasks: create_target_vm/03_hosted_engine_final_tasks.yml 99 | tags: [create_target_vm, never] 100 | 101 | 102 | - name: Final clean 103 | import_tasks: final_clean.yml 104 | tags: [create_target_vm, final_clean, never] 105 | 106 | 107 | - name: Validate network interface 108 | import_tasks: "pre_checks/001_validate_network_interfaces.yml" 109 | tags: [get_network_interfaces, never] 110 | 111 | 112 | - name: Validate hostnames 113 | import_tasks: "pre_checks/002_validate_hostname_tasks.yml" 114 | tags: [validate_hostnames, never] 115 | 116 | 117 | - name: Get FC devices 118 | import_tasks: "fc_getdevices.yml" 119 | tags: [fc_getdevices, never] 120 | 121 | 122 | - name: iSCSI discover 123 | import_tasks: "iscsi_discover.yml" 124 | tags: [iscsi_discover, never] 125 | 126 | 127 | - name: Get iSCSI devices 128 | import_tasks: "iscsi_getdevices.yml" 129 | tags: [iscsi_getdevices, never] 130 | -------------------------------------------------------------------------------- /tasks/pause_execution.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create temporary lock file 3 | tempfile: 4 | state: file 5 | suffix: _he_setup_lock 6 | delegate_to: localhost 7 | register: he_setup_lock_file 8 | - name: Pause execution until {{ he_setup_lock_file.path }} is removed, delete it once ready to proceed 9 | wait_for: 10 | path: "{{ he_setup_lock_file.path }}" 11 | state: absent 12 | timeout: 86400 # 24 hours 13 | delegate_to: localhost 14 | -------------------------------------------------------------------------------- /tasks/pre_checks/001_validate_network_interfaces.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Network interfaces 3 | block: 4 | - name: Detecting interface on existing management bridge 5 | set_fact: 6 | bridge_interface="{{ hostvars[inventory_hostname]['ansible_' + bridge_name ]['interfaces']|first }}" 7 | when: "'ansible_' + bridge_name in hostvars[inventory_hostname]" 8 | with_items: 9 | - 'ovirtmgmt' 10 | - 'rhevm' 11 | loop_control: 12 | loop_var: bridge_name 13 | - debug: var=bridge_interface 14 | - name: Get all active network interfaces 15 | vars: 16 | acceptable_bond_modes: ['active-backup', 'balance-xor', 'broadcast', '802.3ad'] 17 | set_fact: 18 | otopi_net_host="{{ hostvars[inventory_hostname]['ansible_' + iface_item]['device'] }}" 19 | type="{{ hostvars[inventory_hostname]['ansible_' + iface_item]['type'] }}" 20 | bond_valid_name="{{ iface_item | regex_search('(^bond[0-9]+)') }}" 21 | when: ( 22 | ( 23 | iface_item != 'lo' 24 | ) and ( 25 | bridge_interface is not defined 26 | ) and ( 27 | 'active' in hostvars[inventory_hostname]['ansible_' + iface_item] and 28 | hostvars[inventory_hostname]['ansible_' + iface_item]['active'] 29 | ) and ( 30 | hostvars[inventory_hostname]['ansible_' + iface_item]['type'] != 'bridge' 31 | ) and ( 32 | hostvars[inventory_hostname]['ansible_' + iface_item]['ipv4'] is defined or 33 | hostvars[inventory_hostname]['ansible_' + iface_item]['ipv6'] is defined 34 | ) and ( 35 | ( 36 | hostvars[inventory_hostname]['ansible_' + iface_item]['type'] != 'bonding' 37 | ) or ( 38 | ( 39 | hostvars[inventory_hostname]['ansible_' + iface_item]['type'] == 'bonding' 40 | ) and ( 41 | hostvars[inventory_hostname]['ansible_' + iface_item]['slaves'][0] is defined 42 | ) and ( 43 | hostvars[inventory_hostname]['ansible_' + iface_item]['mode'] in acceptable_bond_modes 44 | ) 45 | ) 46 | ) 47 | ) 48 | with_items: 49 | - "{{ ansible_interfaces | map('replace', '-','_') | list }}" 50 | loop_control: 51 | loop_var: iface_item 52 | register: valid_network_interfaces 53 | - debug: var=valid_network_interfaces 54 | - name: Filter bonds with bad naming 55 | set_fact: 56 | net_iface="{{ bond_item }}" 57 | when: >- 58 | not 'skipped' in bond_item and ((bond_item['ansible_facts']['type'] == 'ether') or 59 | ( (bond_item['ansible_facts']['type'] == 'bonding') and bond_item['ansible_facts']['bond_valid_name'] )) 60 | with_items: 61 | - "{{ valid_network_interfaces['results'] }}" 62 | loop_control: 63 | loop_var: bond_item 64 | register: bb_filtered_list 65 | - debug: var=bb_filtered_list 66 | - name: Generate output list 67 | set_fact: 68 | host_net: >- 69 | {{ [bridge_interface] if bridge_interface is defined else bb_filtered_list.results | 70 | reject('skipped') | map(attribute='bond_item.ansible_facts.otopi_net_host') | list }} 71 | - debug: var=host_net 72 | - import_tasks: filter_team_devices.yml 73 | - name: Validate selected bridge interface if management bridge does not exist 74 | fail: 75 | msg: The selected network interface is not valid 76 | when: 77 | he_bridge_if not in otopi_host_net.ansible_facts.otopi_host_net and bridge_interface is not defined and 78 | not he_just_collect_network_interfaces 79 | ... 80 | -------------------------------------------------------------------------------- /tasks/pre_checks/002_validate_hostname_tasks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Choose IPv4, IPv6 or auto 3 | import_tasks: ipv_switch.yml 4 | - name: Define he_host_address and he_host_ip 5 | import_tasks: fetch_host_ip.yml 6 | when: he_host_ip is none or he_host_address is none 7 | 8 | - name: Validate host hostname 9 | block: 10 | - name: Avoid localhost 11 | fail: 12 | msg: > 13 | localhost is not a valid address 14 | when: he_host_address in ['localhost', 'localhost.localdomain'] 15 | - name: Ensure host address resolves locally 16 | fail: 17 | msg: > 18 | The address proposed for this host does not resolves locally 19 | when: he_host_ip not in ansible_all_ipv4_addresses | union(ansible_all_ipv6_addresses) 20 | - name: Ensure the resolved address resolves on the selected interface 21 | block: 22 | - name: Get target address from selected interface (IPv4) 23 | shell: >- 24 | ip addr show 25 | {{ he_mgmt_network 26 | if 'ansible_' + he_mgmt_network.replace('-','_') in hostvars[inventory_hostname] 27 | else he_bridge_if }} | 28 | grep 'inet ' | 29 | cut -d' ' -f6 | 30 | cut -d'/' -f1 31 | register: target_address_v4 32 | changed_when: true 33 | - debug: var=target_address_v4 34 | - name: Get target address from selected interface (IPv6) 35 | shell: >- 36 | ip addr show 37 | {{ he_mgmt_network 38 | if 'ansible_' + he_mgmt_network.replace('-','_') in hostvars[inventory_hostname] 39 | else he_bridge_if }} | 40 | grep 'inet6 ' | 41 | cut -d' ' -f6 | 42 | cut -d'/' -f1 43 | register: target_address_v6 44 | changed_when: true 45 | - debug: var=target_address_v6 46 | - name: Check the resolved address resolves on the selected interface 47 | fail: 48 | msg: > 49 | The resolved address doesn't resolve 50 | on the selected interface 51 | when: >- 52 | he_host_ip not in target_address_v4.stdout_lines and 53 | he_host_ip not in target_address_v6.stdout_lines 54 | - name: Check for alias 55 | shell: getent {{ ip_key }} {{ he_host_address }} | cut -d' ' -f1 | uniq 56 | register: hostname_res_count_output 57 | changed_when: true 58 | ignore_errors: true 59 | - debug: var=hostname_res_count_output 60 | - name: Filter resolved address list 61 | set_fact: 62 | hostname_res_count_output_filtered: >- 63 | {{ hostname_res_count_output.stdout_lines | 64 | difference(target_address_v6.stdout_lines) | 65 | difference(target_address_v4.stdout_lines) }} 66 | - name: Ensure the resolved address resolves only on the selected interface 67 | fail: 68 | msg: > 69 | hostname '{{ he_host_address }}' doesn't uniquely match the interface 70 | '{{ he_bridge_if }}' selected for the management bridge; 71 | it matches also interface with IP {{ hostname_res_count_output.stdout_lines | 72 | difference([he_host_ip,]) }}. 73 | Please make sure that the hostname got from 74 | the interface for the management network resolves 75 | only there. 76 | when: hostname_res_count_output_filtered|length > 0 77 | when: he_bridge_if is defined and he_bridge_if is not none and he_mgmt_network is defined 78 | when: he_host_address is defined and he_host_address is not none 79 | - name: Validate engine he_fqdn 80 | block: 81 | - name: Avoid localhost 82 | fail: 83 | msg: > 84 | localhost is not a valid he_fqdn for the engine VM 85 | when: he_fqdn in ['localhost', 'localhost.localdomain'] 86 | - name: Get engine FQDN resolution 87 | shell: getent {{ ip_key }} {{ he_fqdn }} | grep STREAM 88 | environment: "{{ he_cmd_lang }}" 89 | register: fqdn_resolution_output 90 | changed_when: true 91 | ignore_errors: true 92 | - debug: var=fqdn_resolution_output 93 | - name: Check engine he_fqdn resolution 94 | fail: 95 | msg: > 96 | Unable to resolve address 97 | when: fqdn_resolution_output.rc != 0 98 | - name: Parse engine he_fqdn resolution 99 | set_fact: 100 | r_fqdn_address: "{{ fqdn_resolution_output.stdout.split()[0] }}" 101 | - debug: var=r_fqdn_address 102 | - name: Ensure engine he_fqdn doesn't resolve locally 103 | fail: 104 | msg: > 105 | The he_fqdn proposed for the engine VM resolves on this host 106 | when: r_fqdn_address in ansible_all_ipv4_addresses | union(ansible_all_ipv6_addresses) 107 | - name: Check http/https proxy 108 | fail: 109 | msg: > 110 | Your system is configured to use a proxy, please 111 | add an exception for {{ url }} with no_proxy directive. 112 | when: url is proxied 113 | loop_control: 114 | loop_var: url 115 | with_items: 116 | - "http://{{ he_fqdn }}/" 117 | - "https://{{ he_fqdn }}/" 118 | when: he_fqdn is defined and he_fqdn is not none 119 | -------------------------------------------------------------------------------- /tasks/pre_checks/define_variables.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Define Variables 3 | block: 4 | - name: Define he_cloud_init_domain_name 5 | block: 6 | - name: Get domain name 7 | command: hostname -d 8 | changed_when: true 9 | register: host_domain_name 10 | - name: Set he_cloud_init_domain_name 11 | set_fact: 12 | he_cloud_init_domain_name: "{{ host_domain_name.stdout_lines[0] if host_domain_name.stdout_lines else '' }}" 13 | when: he_cloud_init_domain_name is not defined 14 | - debug: var=he_cloud_init_domain_name 15 | 16 | - name: Define he_cloud_init_host_name 17 | set_fact: 18 | he_cloud_init_host_name: "{{ he_fqdn }}" 19 | - debug: var=he_cloud_init_host_name 20 | 21 | - name: Define he_vm_uuid 22 | block: 23 | - name: Get uuid 24 | command: uuidgen 25 | changed_when: true 26 | register: uuid 27 | - name: Set he_vm_uuid 28 | set_fact: 29 | he_vm_uuid: "{{ uuid.stdout }}" 30 | - debug: var=he_vm_uuid 31 | 32 | - name: Define he_nic_uuid 33 | block: 34 | - name: Get uuid 35 | command: uuidgen 36 | changed_when: true 37 | register: uuid 38 | - name: Set he_nic_uuid 39 | set_fact: 40 | he_nic_uuid: "{{ uuid.stdout }}" 41 | - debug: var=he_nic_uuid 42 | 43 | - name: Define he_cdrom_uuid 44 | block: 45 | - name: Get uuid 46 | command: uuidgen 47 | changed_when: true 48 | register: uuid 49 | - name: Set he_cdrom_uuid 50 | set_fact: 51 | he_cdrom_uuid: "{{ uuid.stdout }}" 52 | - debug: var=he_cdrom_uuid 53 | 54 | - name: Define Timezone 55 | block: 56 | - name: get timezone 57 | shell: timedatectl | grep "Time zone" | awk '{print $3}' 58 | changed_when: true 59 | register: timezone 60 | - name: Set he_time_zone 61 | set_fact: 62 | he_time_zone: "{{ timezone.stdout }}" 63 | - debug: var=he_time_zone 64 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_data_center_name.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Validate Data Center name format 3 | block: 4 | - name: Fail if Data Center name format is incorrect 5 | fail: 6 | msg: >- 7 | "Invalid Data Center name format. Data Center name may only contain letters, numbers, '-', or '_'." 8 | " Got {{ he_data_center }}" 9 | when: not he_data_center | regex_search( "^[a-zA-Z0-9_-]+$" ) 10 | - name: Validate Cluster name 11 | fail: 12 | msg: >- 13 | "Cluster name cannot be 'Default'. This is a reserved name for the default DataCenter. Please choose" 14 | " another name for the cluster" 15 | when: he_data_center != "Default" and he_cluster == "Default" 16 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_firewalld.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check firewalld status 3 | block: 4 | - name: Check firewalld status 5 | systemd: 6 | name: firewalld 7 | register: firewalld_s 8 | - name: Enforce firewalld status 9 | fail: 10 | msg: > 11 | firewalld is required to be enabled and active in order 12 | to correctly deploy hosted-engine. 13 | Please check, fix accordingly and re-deploy. 14 | when: firewalld_s.status.SubState != 'running' or firewalld_s.status.LoadState == 'masked' 15 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_gateway.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Define default gateway 3 | block: 4 | - name: Get default gateway IPv4 5 | shell: ip r | grep default | awk '{print $3}' 6 | changed_when: true 7 | register: get_gateway_4 8 | when: he_default_gateway_4 is not defined or he_default_gateway_4 is none or not he_default_gateway_4 9 | - debug: var=get_gateway_4 10 | - name: Get default gateway IPv6 11 | shell: ip -6 r | grep default | awk '{print $3}' 12 | changed_when: true 13 | register: get_gateway_6 14 | when: he_default_gateway_6 is not defined or he_default_gateway_6 is none or not he_default_gateway_6 15 | - debug: var=get_gateway_6 16 | - name: Set he_gateway 17 | set_fact: 18 | he_gateway: >- 19 | {{ get_gateway_4.stdout_lines[0] if get_gateway_4.stdout_lines else 20 | get_gateway_6.stdout_lines[0] if get_gateway_6.stdout_lines else 21 | '' 22 | }} 23 | when: he_gateway is not defined or he_gateway is none or not he_gateway|trim 24 | - debug: var=he_gateway 25 | 26 | - name: Fail if there is no gateway 27 | fail: 28 | msg: "No default gateway is defined" 29 | when: he_gateway is none or not he_gateway|trim 30 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_mac_address.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Define Engine VM MAC address 3 | block: 4 | - name: Generate unicast MAC address 5 | shell: od -An -N6 -tx1 /dev/urandom | sed -e 's/^ *//' -e 's/ */:/g' -e 's/:$//' -e 's/^\(.\)[13579bdf]/\10/' 6 | changed_when: true 7 | register: mac_address 8 | - debug: var=mac_address 9 | - name: Set he_vm_mac_addr 10 | set_fact: 11 | he_vm_mac_addr: >- 12 | {{ mac_address.stdout if he_vm_mac_addr is not defined or he_vm_mac_addr is none else he_vm_mac_addr }} 13 | - name: Fail if MAC address structure is incorrect 14 | fail: 15 | msg: "Invalid unicast MAC address format. Got {{ he_vm_mac_addr }}" 16 | when: not he_vm_mac_addr | regex_search( "^[a-fA-F0-9][02468aAcCeE](:[a-fA-F0-9]{2}){5}$" ) 17 | - debug: var=he_vm_mac_addr 18 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_memory_size.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get available memory amount 3 | block: 4 | - name: Get free memory 5 | shell: free -m | grep Mem | awk '{print $4}' 6 | changed_when: true 7 | register: free_mem 8 | - debug: var=free_mem 9 | - name: Get cached memory 10 | shell: free -m | grep Mem | awk '{print $6}' 11 | changed_when: true 12 | register: cached_mem 13 | - debug: var=cached_mem 14 | - name: Set Max memory 15 | set_fact: 16 | max_mem: "{{ free_mem.stdout|int + cached_mem.stdout|int - he_reserved_memory_MB + he_avail_memory_grace_MB }}" 17 | - debug: var=max_mem 18 | 19 | - name: set he_mem_size_MB to max available if not defined 20 | set_fact: 21 | he_mem_size_MB: "{{ he_mem_size_MB if he_mem_size_MB != 'max' else max_mem }}" 22 | - debug: var=he_mem_size_MB 23 | 24 | - name: Fail if available memory is less then the minimal requirement 25 | fail: 26 | msg: >- 27 | Available memory ( {{ max_mem }}MB ) is less then the minimal requirement ({{ he_minimal_mem_size_MB }}MB). 28 | Be aware that {{ he_reserved_memory_MB }}MB is reserved for the host and cannot be allocated to the 29 | engine VM. 30 | when: >- 31 | he_requirements_check_enabled and he_memory_requirements_check_enabled and max_mem|int < he_minimal_mem_size_MB|int 32 | 33 | - name: Fail if user chose less memory then the minimal requirement 34 | fail: 35 | msg: "Memory size must be at least {{ he_minimal_mem_size_MB }}MB, while you selected only {{ he_mem_size_MB }}MB" 36 | when: >- 37 | he_requirements_check_enabled and 38 | he_memory_requirements_check_enabled and he_minimal_mem_size_MB|int > he_mem_size_MB|int 39 | 40 | - name: Fail if user chose more memory then the available memory 41 | fail: 42 | msg: >- 43 | Not enough memory! {{ he_mem_size_MB }}MB, while only {{ max_mem }}MB are available on the host. 44 | Be aware that {{ he_reserved_memory_MB }}MB is reserved for the host and cannot be allocated to the 45 | engine VM. 46 | 47 | when: >- 48 | he_requirements_check_enabled and 49 | he_memory_requirements_check_enabled and he_mem_size_MB|int > max_mem|int 50 | 51 | - name: Fail if he_disk_size_GB is smaller then the minimal requirement 52 | fail: 53 | msg: "Disk size too small: ({{ he_disk_size_GB }}GB), disk size must be at least {{ he_minimal_disk_size_GB }}GB" 54 | when: he_requirements_check_enabled and he_disk_size_GB|int < he_minimal_disk_size_GB|int 55 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_network_test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Validate network connectivity check configuration 3 | block: 4 | - debug: var=he_network_test 5 | - name: Fail if he_network_test is not valid 6 | fail: 7 | msg: "Invalid he_network_test defined" 8 | changed_when: true 9 | when: he_network_test not in ['dns', 'ping', 'tcp', 'none'] 10 | - name: Validate TCP network connectivity check parameters 11 | block: 12 | - debug: var=he_tcp_t_address 13 | - name: Fail if he_tcp_t_address is not defined 14 | fail: 15 | msg: "No he_tcp_t_address is defined" 16 | changed_when: true 17 | when: 18 | ( he_tcp_t_address is undefined ) or 19 | ( he_tcp_t_address is none ) or 20 | ( he_tcp_t_address|trim|length == 0 ) 21 | - debug: var=he_tcp_t_port 22 | - name: Fail if he_tcp_t_port is not defined 23 | fail: 24 | msg: "No he_tcp_t_port is defined" 25 | changed_when: true 26 | when: 27 | ( he_tcp_t_port is undefined ) or 28 | ( he_tcp_t_port is none ) 29 | - name: Fail if he_tcp_t_port is no integer 30 | fail: 31 | msg: "he_tcp_t_port has to be integer" 32 | changed_when: true 33 | when: not he_tcp_t_port|int 34 | when: he_network_test == 'tcp' 35 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_services_status.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Populate service facts 3 | systemd: 4 | name: "{{ service_item }}" 5 | register: checked_services 6 | with_items: 7 | - firewalld 8 | loop_control: 9 | loop_var: service_item 10 | - name: Fail if the service is masked or not running 11 | fail: 12 | msg: "{{ service.name }} is masked or not running" 13 | when: service.status.SubState != 'running' or service.status.LoadState == 'masked' 14 | with_items: "{{ checked_services.results }}" 15 | loop_control: 16 | label: "{{ service.name }}" 17 | loop_var: service 18 | -------------------------------------------------------------------------------- /tasks/pre_checks/validate_vcpus_count.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Define he_maxvcpus 3 | block: 4 | - name: get max cpus 5 | command: grep -c ^processor /proc/cpuinfo 6 | changed_when: true 7 | register: max_cpus 8 | - name: Set he_maxvcpus 9 | set_fact: 10 | he_maxvcpus: "{{ max_cpus.stdout }}" 11 | - debug: var=he_maxvcpus 12 | 13 | - name: Set he_vcpus to maximum amount if not defined 14 | set_fact: 15 | he_vcpus: "{{ he_vcpus if he_vcpus != 'max' else he_maxvcpus }}" 16 | - debug: var=he_vcpus 17 | 18 | - name: Check number of chosen CPUs 19 | fail: 20 | msg: "Invalid number of cpu specified: {{ he_vcpus }}, while only {{ he_maxvcpus }} are available on the host" 21 | when: he_maxvcpus|int < he_vcpus|int 22 | -------------------------------------------------------------------------------- /tasks/restore_backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Copy the backup file to the engine VM for restore 3 | copy: 4 | src: "{{ he_restore_from_file }}" 5 | dest: /root/engine_backup 6 | owner: root 7 | group: root 8 | mode: 0600 9 | - name: Run engine-backup 10 | shell: >- 11 | engine-backup --mode=restore 12 | --log=/var/log/ovirt-engine/setup/restore-backup-$(date -u +%Y%m%d%H%M%S).log 13 | --file=/root/engine_backup --provision-all-databases --restore-permissions 14 | environment: "{{ he_cmd_lang }}" 15 | register: engine_restore_out 16 | changed_when: true 17 | - debug: var=engine_restore_out 18 | - name: Remove backup file 19 | file: 20 | state: absent 21 | path: /root/engine_backup 22 | - name: Remove previous hosted-engine VM 23 | command: >- 24 | psql -d engine -c "SELECT deletevm(vm_guid) FROM (SELECT vm_guid FROM vms WHERE origin=6) t" 25 | environment: "{{ he_cmd_lang }}" 26 | become: true 27 | become_user: postgres 28 | become_method: sudo 29 | changed_when: true 30 | register: db_remove_old_enginevm 31 | - name: Update dynamic data for VMs on the host used to redeploy 32 | command: >- 33 | psql -d engine -c 34 | "UPDATE vm_dynamic SET run_on_vds = NULL, status=0 /* Down */ WHERE run_on_vds IN 35 | (SELECT vds_id FROM vds 36 | WHERE upper(vds_unique_id)=upper('{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}'))" 37 | environment: "{{ he_cmd_lang }}" 38 | become: true 39 | become_user: postgres 40 | become_method: sudo 41 | changed_when: true 42 | register: db_update_host_vms 43 | - debug: var=db_update_host_vms 44 | - name: Update dynamic data for VMs migrating to the host used to redeploy 45 | command: >- 46 | psql -d engine -c 47 | "UPDATE vm_dynamic SET migrating_to_vds = NULL, status=0 /* Down */ WHERE migrating_to_vds IN 48 | (SELECT vds_id FROM vds WHERE 49 | upper(vds_unique_id)=upper('{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}'))" 50 | environment: "{{ he_cmd_lang }}" 51 | become: true 52 | become_user: postgres 53 | become_method: sudo 54 | changed_when: true 55 | register: db_update_host_migrating_vms 56 | - debug: var=db_update_host_migrating_vms 57 | - name: Remove host used to redeploy 58 | command: >- 59 | psql -d engine -c 60 | "SELECT deletevds(vds_id) FROM 61 | (SELECT vds_id FROM vds WHERE 62 | upper(vds_unique_id)=upper('{{ hostvars[he_ansible_host_name]['unique_id_out']['stdout_lines']|first }}')) t" 63 | environment: "{{ he_cmd_lang }}" 64 | become: true 65 | become_user: postgres 66 | become_method: sudo 67 | changed_when: true 68 | register: db_remove_he_host 69 | - debug: var=db_remove_he_host 70 | - name: Rename previous HE storage domain to avoid name conflicts 71 | command: >- 72 | psql -d engine -c 73 | "UPDATE storage_domain_static SET 74 | storage_name='{{ he_storage_domain_name }}_old_{{ ansible_date_time.iso8601_basic_short }}' WHERE 75 | storage_name='{{ he_storage_domain_name }}'" 76 | environment: "{{ he_cmd_lang }}" 77 | become: true 78 | become_user: postgres 79 | become_method: sudo 80 | changed_when: true 81 | register: db_rename_he_sd 82 | - debug: var=db_rename_he_sd 83 | - name: Save original DisableFenceAtStartupInSec 84 | shell: >- 85 | set -euo pipefail && engine-config -g DisableFenceAtStartupInSec | 86 | cut -d' ' -f2 > /root/DisableFenceAtStartupInSec.txt 87 | environment: "{{ he_cmd_lang }}" 88 | changed_when: true 89 | - name: Update DisableFenceAtStartupInSec to prevent host fencing during the recovery 90 | command: "engine-config -s DisableFenceAtStartupInSec=86400" 91 | environment: "{{ he_cmd_lang }}" 92 | changed_when: true 93 | - name: Add lines to engine-setup answerfile for PKI renewal 94 | lineinfile: 95 | path: /root/ovirt-engine-answers 96 | line: "{{ item }}" 97 | with_items: 98 | - "OVESETUP_PKI/renew=bool:{{ he_pki_renew_on_restore }}" 99 | - "QUESTION/1/OVESETUP_SKIP_RENEW_PKI_CONFIRM=str:yes" 100 | - name: remove version lock from the engine 101 | file: 102 | state: absent 103 | path: /etc/yum/pluginconf.d/versionlock.list 104 | - name: recreate versionlock empty file 105 | file: 106 | state: touch 107 | path: /etc/yum/pluginconf.d/versionlock.list 108 | -------------------------------------------------------------------------------- /tasks/search_available_network_subnet.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Search for an available IPv4 subnet 3 | block: 4 | - name: Define 3rd chunk 5 | set_fact: 6 | chunk: 0 7 | when: chunk is not defined 8 | - name: Set 3rd chunk 9 | set_fact: 10 | chunk: "{{ chunk|int + 1 }}" 11 | - debug: var=chunk 12 | - name: Get ip route 13 | shell: ip route get 192.168.{{ chunk }}.1 | grep "via" | cat 14 | register: result 15 | - debug: var=result 16 | - name: Fail if can't find an available subnet 17 | fail: 18 | msg: >- 19 | "Cannot find an available subnet for internal Libvirt network" 20 | "Please set it to an unused subnet by adding the variable 'he_ipv4_subnet_prefix'" 21 | "to the variable-file ( e.g. he_ipv4_subnet_prefix: '123.123.123' )." 22 | when: result.stdout.find("via") == -1 and chunk|int > 253 23 | - name: Set new IPv4 subnet prefix 24 | set_fact: 25 | he_ipv4_subnet_prefix: "192.168.{{ chunk }}" 26 | when: result.stdout.find("via") != -1 27 | - name: Search again with another prefix 28 | include_tasks: search_available_network_subnet.yaml 29 | when: result.stdout.find("via") == -1 30 | when: not ipv6_deployment|bool 31 | 32 | - name: Search for an available IPv6 subnet 33 | block: 34 | - name: Define 3rd chunk 35 | set_fact: 36 | chunk: 1000 37 | when: chunk is not defined 38 | - name: Set 3rd chunk 39 | set_fact: 40 | chunk: "{{ chunk|int + 45 }}" # 200 tries 41 | - debug: var=chunk 42 | - name: Get ip route 43 | shell: ip -6 route get fd00:1234:{{ chunk }}:900::1 | grep "via" | cat 44 | register: result 45 | - debug: var=result 46 | - name: Fail if can't find an available subnet 47 | fail: 48 | msg: >- 49 | "Cannot find an available subnet for internal Libvirt network" 50 | "Please set it to an unused subnet by adding the variable 'he_ipv6_subnet_prefix'" 51 | "to the variable-file ( e.g. he_ipv6_subnet_prefix: 'fd00:9876:5432:900' )." 52 | when: result.stdout.find("via") == -1 and chunk|int > 9900 53 | - name: Set new IPv6 subnet prefix 54 | set_fact: 55 | he_ipv6_subnet_prefix: "fd00:1234:{{ chunk }}:900" 56 | when: result.stdout.find("via") != -1 57 | - name: Search again with another prefix 58 | include_tasks: search_available_network_subnet.yaml 59 | when: result.stdout.find("via") == -1 60 | when: ipv6_deployment|bool 61 | -------------------------------------------------------------------------------- /tasks/validate_ip_prefix.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Validate IP prefix 3 | block: 4 | - name: IPv4 Validation 5 | block: 6 | - name: Get IPv4 route 7 | command: ip route get {{ he_ipv4_subnet_prefix + ".1" }} 8 | register: ip_route_result 9 | - debug: var=ip_route_result 10 | - name: Check if route exists 11 | include_tasks: search_available_network_subnet.yaml 12 | when: ip_route_result.stdout.find("via") == -1 13 | when: not ipv6_deployment|bool 14 | - name: IPv6 Validation 15 | block: 16 | - name: Get IPv6 route 17 | command: ip route get {{ he_ipv6_subnet_prefix + "::1" }} 18 | register: ip_route_result 19 | - debug: var=ip_route_result 20 | - name: Check if route exists 21 | include_tasks: search_available_network_subnet.yaml 22 | when: ip_route_result.stdout.find("via") == -1 23 | when: ipv6_deployment|bool 24 | -------------------------------------------------------------------------------- /templates/broker.conf.j2: -------------------------------------------------------------------------------- 1 | [email] 2 | smtp-server = {{ he_smtp_server }} 3 | smtp-port = {{ he_smtp_port }} 4 | source-email = {{ he_source_email }} 5 | destination-emails = {{ he_dest_email }} 6 | 7 | [notify] 8 | state_transition = maintenance|start|stop|migrate|up|down 9 | -------------------------------------------------------------------------------- /templates/fhanswers.conf.j2: -------------------------------------------------------------------------------- 1 | [environment:default] 2 | OVEHOSTED_CORE/screenProceed=bool:True 3 | OVEHOSTED_CORE/deployProceed=bool:True 4 | OVEHOSTED_CORE/confirmSettings=bool:True 5 | OVEHOSTED_NETWORK/fqdn=str:{{ he_fqdn }} 6 | OVEHOSTED_NETWORK/bridgeName=str:{{ he_mgmt_network }} 7 | OVEHOSTED_NETWORK/firewallManager=str:iptables 8 | OVEHOSTED_NETWORK/gateway=str:{{ he_gateway }} 9 | OVEHOSTED_ENGINE/clusterName=str:{{ he_cluster }} 10 | {# TODO: FIX #} 11 | OVEHOSTED_STORAGE/storageDatacenterName=str:hosted_datacenter 12 | OVEHOSTED_STORAGE/domainType=str:{{ he_domain_type }} 13 | {# TODO: FIX #} 14 | OVEHOSTED_STORAGE/connectionUUID=str:e29cf818-5ee5-46e1-85c1-8aeefa33e95d 15 | OVEHOSTED_STORAGE/LunID={{ 'str' if he_lun_id else 'none' }}:{{ he_lun_id if he_lun_id else 'None' }} 16 | OVEHOSTED_STORAGE/imgSizeGB=str:{{ he_disk_size_GB }} 17 | OVEHOSTED_STORAGE/mntOptions={{ 'str' if he_mount_options else 'none' }}:{{ he_mount_options if he_mount_options else 'None' }} 18 | OVEHOSTED_STORAGE/iSCSIPortalIPAddress={{ 'str' if he_iscsi_portal_addr else 'none' }}:{{ he_iscsi_portal_addr if he_iscsi_portal_addr else 'None' }} 19 | OVEHOSTED_STORAGE/metadataVolumeUUID=str:{{ he_metadata_disk_details.disk.image_id }} 20 | OVEHOSTED_STORAGE/sdUUID=str:{{ storage_domain_details.ovirt_storage_domains[0].id }} 21 | OVEHOSTED_STORAGE/iSCSITargetName={{ 'str' if he_iscsi_target else 'none' }}:{{ he_iscsi_target if he_iscsi_target else 'None' }} 22 | OVEHOSTED_STORAGE/metadataImageUUID=str:{{ he_metadata_disk_details.disk.id }} 23 | OVEHOSTED_STORAGE/lockspaceVolumeUUID=str:{{ he_sanlock_disk_details.disk.image_id }} 24 | OVEHOSTED_STORAGE/iSCSIPortalPort={{ 'str' if he_iscsi_portal_port else 'none' }}:{{ he_iscsi_portal_port if he_iscsi_portal_port else 'None' }} 25 | OVEHOSTED_STORAGE/imgUUID=str:{{ he_virtio_disk_details.disk.id }} 26 | OVEHOSTED_STORAGE/confImageUUID=str:{{ he_conf_disk_details.disk.id }} 27 | OVEHOSTED_STORAGE/spUUID=str:00000000-0000-0000-0000-000000000000 28 | OVEHOSTED_STORAGE/lockspaceImageUUID=str:{{ he_sanlock_disk_details.disk.id }} 29 | {# TODO: FIX #} 30 | OVEHOSTED_ENGINE/enableHcGlusterService=none:None 31 | OVEHOSTED_STORAGE/storageDomainName=str:{{ he_storage_domain_name }} 32 | OVEHOSTED_STORAGE/iSCSIPortal={{ 'str' if he_iscsi_tpgt else 'none' }}:{{ he_iscsi_tpgt if he_iscsi_tpgt else 'None' }} 33 | OVEHOSTED_STORAGE/volUUID=str:{{ he_virtio_disk_details.disk.image_id }} 34 | {# TODO: FIX #} 35 | OVEHOSTED_STORAGE/vgUUID=none:None 36 | OVEHOSTED_STORAGE/confVolUUID=str:{{ he_conf_disk_details.disk.image_id }} 37 | {% if he_domain_type=="nfs" or he_domain_type=="glusterfs" %} 38 | OVEHOSTED_STORAGE/storageDomainConnection=str:{{ he_storage_domain_addr }}:{{ he_storage_domain_path }} 39 | {% else %} 40 | OVEHOSTED_STORAGE/storageDomainConnection=str:{{ he_storage_domain_addr }} 41 | {% endif %} 42 | OVEHOSTED_STORAGE/iSCSIPortalUser={{ 'str' if he_iscsi_username else 'none' }}:{{ he_iscsi_username if he_iscsi_username else 'None' }} 43 | {# TODO: fix it #} 44 | OVEHOSTED_VDSM/consoleType=str:vnc 45 | OVEHOSTED_VM/vmMemSizeMB=int:{{ he_mem_size_MB }} 46 | OVEHOSTED_VM/vmUUID=str:{{ he_vm_details.vm.id }} 47 | OVEHOSTED_VM/vmMACAddr=str:{{ he_vm_mac_addr }} 48 | OVEHOSTED_VM/emulatedMachine=str:{{ he_emulated_machine }} 49 | OVEHOSTED_VM/vmVCpus=str:{{ he_vcpus }} 50 | OVEHOSTED_VM/ovfArchive=str:{{ he_appliance_ova }} 51 | OVEHOSTED_VM/vmCDRom=none:None 52 | OVEHOSTED_VM/automateVMShutdown=bool:True 53 | OVEHOSTED_VM/cloudInitISO=str:generate 54 | OVEHOSTED_VM/cloudinitInstanceDomainName={{ 'str' if he_cloud_init_domain_name else 'none' }}:{{ he_cloud_init_domain_name if he_cloud_init_domain_name else 'None' }} 55 | OVEHOSTED_VM/cloudinitInstanceHostName={{ 'str' if he_cloud_init_host_name else 'none' }}:{{ he_cloud_init_host_name if he_cloud_init_host_name else 'None' }} 56 | OVEHOSTED_VM/rootSshPubkey={{ 'str' if he_root_ssh_pubkey else 'none' }}:{{ he_root_ssh_pubkey if he_root_ssh_pubkey else 'None' }} 57 | OVEHOSTED_VM/cloudinitExecuteEngineSetup=bool:True 58 | OVEHOSTED_VM/cloudinitVMStaticCIDR={{ 'str' if he_vm_ip_addr is not none else 'none' }}:{{ he_vm_ip_addr if he_vm_ip_addr is not none else 'None' }} 59 | OVEHOSTED_VM/cloudinitVMTZ={{ 'str' if he_time_zone else 'none' }}:{{ he_time_zone if he_time_zone else 'None' }} 60 | OVEHOSTED_VM/rootSshAccess=str:yes 61 | OVEHOSTED_VM/cloudinitVMETCHOSTS=bool:{{ he_vm_etc_hosts }} 62 | OVEHOSTED_VM/cloudinitVMDNS={{ 'str' if he_dns_addr else 'none' }}:{{ he_dns_addr if he_dns_addr else 'None' }} 63 | OVEHOSTED_NOTIF/smtpPort=str:{{ he_smtp_port }} 64 | OVEHOSTED_NOTIF/smtpServer=str:{{ he_smtp_server }} 65 | OVEHOSTED_NOTIF/sourceEmail=str:{{ he_source_email }} 66 | OVEHOSTED_NOTIF/destEmail=str:{{ he_dest_email }} 67 | -------------------------------------------------------------------------------- /templates/hosted-engine.conf.j2: -------------------------------------------------------------------------------- 1 | fqdn={{ he_fqdn }} 2 | vm_disk_id={{ he_virtio_disk_details.disk.id }} 3 | vm_disk_vol_id={{ he_virtio_disk_details.disk.image_id }} 4 | vmid={{ he_vm_details.vm.id }} 5 | {% if he_domain_type=="nfs" or he_domain_type=="glusterfs" %} 6 | storage={{ he_storage_domain_addr }}:{{ he_storage_domain_path }} 7 | {% else %} 8 | storage={{ he_storage_domain_addr }} 9 | {% endif %} 10 | nfs_version={{ he_nfs_version }} 11 | mnt_options={{ he_mount_options }} 12 | conf=/var/run/ovirt-hosted-engine-ha/vm.conf 13 | host_id={{ host_spm_id }} 14 | console=vnc 15 | domainType={{ he_domain_type }} 16 | {# spUUID={{ datacenter_id }} #} 17 | {# To avoid triggering #} 18 | {# 3.5 -> 3.6 upgrade code #} 19 | spUUID=00000000-0000-0000-0000-000000000000 20 | sdUUID={{ storage_domain_details.ovirt_storage_domains[0].id }} 21 | {# TODO: fix it #} 22 | connectionUUID=e29cf818-5ee5-46e1-85c1-8aeefa33e95d 23 | ca_cert=/etc/pki/vdsm/libvirt-spice/ca-cert.pem 24 | ca_subject="C=EN, L=Test, O=Test, CN=Test" 25 | vdsm_use_ssl=true 26 | gateway={{ he_gateway }} 27 | bridge={{ he_mgmt_network }} 28 | network_test={{ he_network_test }} 29 | tcp_t_address={{ he_tcp_t_address }} 30 | tcp_t_port={{ he_tcp_t_port }} 31 | metadata_volume_UUID={{ he_metadata_disk_details.disk.image_id }} 32 | metadata_image_UUID={{ he_metadata_disk_details.disk.id }} 33 | lockspace_volume_UUID={{ he_sanlock_disk_details.disk.image_id }} 34 | lockspace_image_UUID={{ he_sanlock_disk_details.disk.id }} 35 | conf_volume_UUID={{ he_conf_disk_details.disk.image_id }} 36 | conf_image_UUID={{ he_conf_disk_details.disk.id }} 37 | {# TODO: get OVF_STORE volume uid from the engine at deploy time #} 38 | 39 | # The following are used only for iSCSI storage 40 | iqn={{ he_iscsi_target }} 41 | portal={{ he_iscsi_tpgt }} 42 | user={{ he_iscsi_username }} 43 | password={{ he_iscsi_password }} 44 | port={{ he_iscsi_portal_port }} 45 | -------------------------------------------------------------------------------- /templates/ifcfg-eth0-dhcp.j2: -------------------------------------------------------------------------------- 1 | # generated by ovirt-hosted-engine-setup 2 | BOOTPROTO=dhcp 3 | DEVICE=eth0 4 | HWADDR="{{ he_vm_mac_addr }}" 5 | ONBOOT=yes 6 | TYPE=Ethernet 7 | USERCTL=no 8 | ZONE=public 9 | DEFROUTE=yes 10 | IPV4_FAILURE_FATAL=no 11 | IPV6INIT=no 12 | NM_CONTROLLED=yes 13 | -------------------------------------------------------------------------------- /templates/ifcfg-eth0-static-ipv6.j2: -------------------------------------------------------------------------------- 1 | # generated by ovirt-hosted-engine-setup 2 | BOOTPROTO=none 3 | DEVICE=eth0 4 | HWADDR="{{ he_vm_mac_addr }}" 5 | ONBOOT=yes 6 | TYPE=Ethernet 7 | USERCTL=no 8 | ZONE=public 9 | IPV6INIT=yes 10 | IPV6ADDR={{ he_vm_ip_addr }}/{{ he_vm_ip_prefix }} 11 | IPV6_DEFAULTGW={{ he_gateway }} 12 | {% if he_dns_addr is string %} 13 | {% set DNS_ADDR_LIST = he_dns_addr.split(',') %} 14 | {% elif he_dns_addr is iterable %} 15 | {% set DNS_ADDR_LIST = he_dns_addr %} 16 | {% else %} 17 | {% set DNS_ADDR_LIST = [] %} 18 | {% endif %} 19 | {% for d in DNS_ADDR_LIST %} 20 | DNS{{loop.index}}={{ d }} 21 | {% endfor %} 22 | DEFROUTE=yes 23 | IPV4_FAILURE_FATAL=no 24 | NM_CONTROLLED=yes 25 | -------------------------------------------------------------------------------- /templates/ifcfg-eth0-static.j2: -------------------------------------------------------------------------------- 1 | # generated by ovirt-hosted-engine-setup 2 | BOOTPROTO=none 3 | DEVICE=eth0 4 | HWADDR="{{ he_vm_mac_addr }}" 5 | ONBOOT=yes 6 | TYPE=Ethernet 7 | USERCTL=no 8 | ZONE=public 9 | IPADDR={{ he_vm_ip_addr }} 10 | PREFIX={{ he_vm_ip_prefix }} 11 | GATEWAY={{ he_gateway }} 12 | {% if he_dns_addr is string %} 13 | {% set DNS_ADDR_LIST = he_dns_addr.split(',') %} 14 | {% elif he_dns_addr is iterable %} 15 | {% set DNS_ADDR_LIST = he_dns_addr %} 16 | {% else %} 17 | {% set DNS_ADDR_LIST = [] %} 18 | {% endif %} 19 | {% for d in DNS_ADDR_LIST %} 20 | DNS{{loop.index}}={{ d }} 21 | {% endfor %} 22 | DEFROUTE=yes 23 | IPV4_FAILURE_FATAL=no 24 | IPV6INIT=no 25 | NM_CONTROLLED=yes 26 | -------------------------------------------------------------------------------- /templates/meta-data.j2: -------------------------------------------------------------------------------- 1 | instance-id: {{ he_vm_uuid }} 2 | local-hostname: {{ he_fqdn }} 3 | -------------------------------------------------------------------------------- /templates/network-config-dhcp.j2: -------------------------------------------------------------------------------- 1 | version: 1 2 | config: 3 | - type: physical 4 | name: eth0 5 | mac_address: "{{ he_vm_mac_addr }}" 6 | subnets: 7 | {% if ipv6_deployment %} 8 | - type: dhcp6 9 | {% else %} 10 | - type: dhcp 11 | {% endif %} 12 | -------------------------------------------------------------------------------- /templates/user-data.j2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | # vim: syntax=yaml 3 | disable_root: false 4 | {% if he_root_ssh_pubkey is not none and he_root_ssh_pubkey|length > 1 %} 5 | ssh_authorized_keys: 6 | - {{ he_root_ssh_pubkey }} 7 | {% endif %} 8 | ssh_pwauth: True 9 | chpasswd: 10 | list: | 11 | root:{{ he_hashed_appliance_password }} 12 | expire: False 13 | {% if he_time_zone is defined %} 14 | timezone: {{ he_time_zone }} 15 | {% endif %} 16 | bootcmd: 17 | - if grep -Gq "^\s*PermitRootLogin" /etc/ssh/sshd_config; then sed -re "s/^\s*(PermitRootLogin)\s+(yes|no|without-password)/\1 yes/" -i.$(date -u +%Y%m%d%H%M%S) /etc/ssh/sshd_config; else echo "PermitRootLogin yes" >> /etc/ssh/sshd_config; fi 18 | - if grep -Gq "^\s*UseDNS" /etc/ssh/sshd_config; then sed -re "s/^\s*(UseDNS)\s+(yes|no)/\1 no/" -i.$(date -u +%Y%m%d%H%M%S) /etc/ssh/sshd_config; else echo "UseDNS no" >> /etc/ssh/sshd_config; fi 19 | runcmd: 20 | - systemctl restart sshd & 21 | -------------------------------------------------------------------------------- /templates/version.j2: -------------------------------------------------------------------------------- 1 | {{ ha_version }} 2 | -------------------------------------------------------------------------------- /templates/vm.conf.j2: -------------------------------------------------------------------------------- 1 | vmId={{ he_vm_details.vm.id }} 2 | memSize={{ he_mem_size_MB }} 3 | display={{ he_console_type }} 4 | devices={index:2,iface:ide,address:{ controller:0, target:0,unit:0, bus:1, type:drive},specParams:{},readonly:true,deviceId:{{ he_cdrom_uuid }},path:{{ he_cdrom }},device:cdrom,shared:false,type:disk} 5 | devices={index:0,iface:virtio,format:raw,poolID:00000000-0000-0000-0000-000000000000,volumeID:{{ he_virtio_disk_details.disk.image_id }},imageID:{{ he_virtio_disk_details.disk.id }},specParams:{},readonly:false,domainID:{{ storage_domain_details.ovirt_storage_domains[0].id }},optional:false,deviceId:{{ he_virtio_disk_details.disk.image_id }},address:{bus:0x00, slot:0x06, domain:0x0000, type:pci, function:0x0},device:disk,shared:exclusive,propagateErrors:off,type:disk,bootOrder:1} 6 | devices={device:scsi,model:virtio-scsi,type:controller} 7 | devices={nicModel:pv,macAddr:{{ he_vm_mac_addr }},linkActive:true,network:{{ he_mgmt_network }},specParams:{},deviceId:{{ he_nic_uuid }},address:{bus:0x00, slot:0x03, domain:0x0000, type:pci, function:0x0},device:bridge,type:interface} 8 | devices={device:console,type:console} 9 | devices={device:{{ he_video_device }},alias:video0,type:video} 10 | devices={device:{{ he_graphic_device }},type:graphics} 11 | vmName={{ he_vm_name }} 12 | spiceSecureChannels=smain,sdisplay,sinputs,scursor,splayback,srecord,ssmartcard,susbredir 13 | smp={{ he_vcpus }} 14 | maxVCpus={{ he_maxvcpus }} 15 | cpuType={{ cluster_cpu_model }} 16 | emulatedMachine={{ he_emulated_machine }} 17 | devices={device:virtio,specParams:{source:urandom},model:virtio,type:rng} 18 | -------------------------------------------------------------------------------- /test_plugins/ovirt_proxied_check.py: -------------------------------------------------------------------------------- 1 | # ovirt-hosted-engine-setup -- ovirt hosted engine setup 2 | # Copyright (C) 2018 Red Hat, Inc. 3 | # 4 | # This library is free software; you can redistribute it and/or 5 | # modify it under the terms of the GNU Lesser General Public 6 | # License as published by the Free Software Foundation; either 7 | # version 2.1 of the License, or (at your option) any later version. 8 | # 9 | # This library is distributed in the hope that it will be useful, 10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 12 | # Lesser General Public License for more details. 13 | # 14 | # You should have received a copy of the GNU Lesser General Public 15 | # License along with this library; if not, write to the Free Software 16 | # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 17 | # 18 | 19 | """ Check if un URL will be accessed through a proxy""" 20 | 21 | try: 22 | from urllib import getproxies_environment 23 | from urllib import proxy_bypass 24 | from urlparse import urlparse 25 | except ImportError: 26 | from urllib.request import getproxies_environment 27 | from urllib.request import proxy_bypass 28 | from urllib.parse import urlparse 29 | 30 | 31 | def proxied(value): 32 | netloc = urlparse(value).netloc 33 | proxied = bool(getproxies_environment()) and not proxy_bypass(netloc) 34 | return(proxied) 35 | 36 | 37 | class TestModule(object): 38 | ''' Ansible jinja2 tests ''' 39 | 40 | def tests(self): 41 | return { 42 | 'proxied': proxied, 43 | } 44 | --------------------------------------------------------------------------------