├── 01_deploy-openstack-infra.yml ├── 02_prepare-openshift.yml ├── LICENSE ├── README.md ├── ansible.cfg ├── bastion-offline-repo-for-disconnected-setup.yml ├── group_vars └── all ├── heat ├── bastion.yaml ├── hosts.yaml ├── hosts_octavia.yaml ├── infra.yaml ├── infra_octavia.yaml ├── infras.yaml ├── infras_octavia.yaml ├── lbaas_infras.yaml ├── lbaas_infras_octavia.yaml ├── lbaas_masters.yaml ├── lbaas_masters_octavia.yaml ├── lbaas_single.yaml ├── master.yaml ├── master_octavia.yaml ├── masters.yaml ├── masters_octavia.yaml ├── network.yaml ├── node.yaml ├── nodes.yaml ├── openshift.yaml ├── openshift_octavia.yaml └── openshift_single_lbaas.yaml ├── images ├── one.png ├── openshift_on_openstack_ha.PNG ├── openshift_on_openstack_non_ha.PNG ├── openshift_single_master.png ├── three.png └── two.png ├── roles ├── all-prep │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── hosts.j2 ├── bastion-prep │ └── tasks │ │ └── main.yml ├── bastion-repo │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── rclonefs │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── daemon.json.j2 │ │ ├── rclone-systemd.mount.j2 │ │ ├── rclone.conf.j2 │ │ └── registry-config.yml.j2 ├── docker-prep │ └── tasks │ │ └── main.yml ├── ocp-inventory │ ├── tasks │ │ └── main.yml │ ├── templates │ │ ├── openshift-inventory-enterprise.j2 │ │ └── openshift-inventory-origin.j2 │ └── vars │ │ └── main.yml ├── ocp-setup-project │ ├── tasks │ │ └── main.yml │ └── templates │ │ ├── compute-resources.j2 │ │ └── limit-ranges.j2 ├── osp-inventory │ ├── library │ │ └── os_stack.py │ └── tasks │ │ └── main.yml ├── osp-setup-project │ └── tasks │ │ └── main.yml ├── osp-stack-create │ ├── library │ │ └── stack_create.py │ └── tasks │ │ ├── main.yml │ │ └── validate-parameters.yml ├── osp-stack-update │ ├── library │ │ └── stack_update.py │ └── tasks │ │ ├── main.yml │ │ └── validate-parameters.yml └── redhat_subscription │ └── tasks │ └── main.yml ├── sample_vars.yml └── update-openstack-infra.yml /01_deploy-openstack-infra.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Deploy the OpenShift Cluster Infrastructure 4 | hosts: localhost 5 | connection: local 6 | become: no 7 | gather_facts: no 8 | environment: 9 | OS_USERNAME: "{{ openstack_user }}" 10 | OS_PASSWORD: "{{ openstack_passwd }}" 11 | OS_AUTH_URL: "{{ openstack_auth_url }}" 12 | OS_PROJECT_NAME: "{{ openstack_project }}" 13 | OS_USER_DOMAIN_NAME: Default 14 | OS_PROJECT_DOMAIN_NAME: Default 15 | OS_IDENTITY_API_VERSION: 3 16 | OS_INTERFACE: public 17 | 18 | tasks: 19 | - name: Include vars.yml 20 | include_vars: 21 | file: vars.yml 22 | 23 | - name: Set ansible_ssh_private_key_file 24 | set_fact: 25 | ansible_ssh_private_key_file: "{{ ssh_key_path }}" 26 | 27 | - import_role: 28 | name: osp-stack-create 29 | - import_role: 30 | name: osp-inventory 31 | 32 | - name: Configure OpenStack Client on Bastion 33 | hosts: bastion 34 | become: true 35 | 36 | tasks: 37 | - name: Include vars.yml 38 | include_vars: 39 | file: vars.yml 40 | 41 | - name: Set ansible_ssh_private_key_file 42 | set_fact: 43 | ansible_ssh_private_key_file: "{{ ssh_key_path }}" 44 | 45 | - import_role: 46 | name: redhat_subscription 47 | when: openshift_deployment == "openshift-enterprise" and not rhn_local_repo 48 | 49 | - import_role: 50 | name: bastion-prep 51 | 52 | -------------------------------------------------------------------------------- /02_prepare-openshift.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Inventory OpenShift Nodes 4 | hosts: localhost 5 | connection: local 6 | become: no 7 | gather_facts: no 8 | environment: 9 | OS_USERNAME: "{{ openstack_user }}" 10 | OS_PASSWORD: "{{ openstack_passwd }}" 11 | OS_AUTH_URL: "{{ openstack_auth_url }}" 12 | OS_PROJECT_NAME: "{{ openstack_project }}" 13 | OS_USER_DOMAIN_NAME: Default 14 | OS_PROJECT_DOMAIN_NAME: Default 15 | OS_IDENTITY_API_VERSION: 3 16 | 17 | tasks: 18 | - name: Include vars.yml 19 | include_vars: 20 | file: vars.yml 21 | - import_role: 22 | name: osp-inventory 23 | 24 | - name: Setup All OpenShift Nodes 25 | hosts: all 26 | gather_facts: yes 27 | become: yes 28 | 29 | tasks: 30 | - name: Include vars.yml 31 | include_vars: 32 | file: vars.yml 33 | - import_role: 34 | name: redhat_subscription 35 | when: openshift_deployment == "openshift-enterprise" and not rhn_local_repo 36 | 37 | - import_role: 38 | name: all-prep 39 | 40 | - name: Setup All OpenShift Nodes 41 | hosts: openshift 42 | gather_facts: true 43 | become: true 44 | 45 | tasks: 46 | - name: Include vars.yml 47 | include_vars: 48 | file: vars.yml 49 | - import_role: 50 | name: docker-prep 51 | 52 | - name: Configure OpenShift inventory file 53 | hosts: bastion 54 | gather_facts: true 55 | become: true 56 | 57 | tasks: 58 | - name: Include vars.yml 59 | include_vars: 60 | file: vars.yml 61 | - import_role: 62 | name: ocp-inventory 63 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # General 2 | The purpose of this project is to provide a simple, yet flexible deployment of OpenShift on OpenStack using a three step process. This guide assumes you are familiar with OpenStack. 3 | 4 | # Contribution 5 | If you want to provide additional features, please feel free to contribute via pull requests or any other means. 6 | We are happy to track and discuss ideas, topics and requests via 'Issues'. 7 | 8 | # Releases 9 | For each release of OpenShift a release branch will be created. Starting with OpenShift 3.9 we will follow the OpenShift release version so it is easy to tell what release branch goes with OpenShift version. The installer support OpenShift Enterprise and OKD starting with 3.11. Note: CentOS OKD rpms are released after enterprise so you may have to wait a bit for OKD. 10 | 11 | * release-3.9 OpenShift 3.9 12 | * release-3.10 OpenShift 3.10 13 | * release-3.11 OpenShift 3.11 14 | 15 | In addition I would like to metion I borrowed a lot of ideas from two other projects. 16 | * [OpenShift setup for Hetzner from RH SSA team](https://github.com/RedHat-EMEA-SSA-Team/hetzner-ocp) 17 | * [OpenShift on OpenStack](https://github.com/redhat-openstack/openshift-on-openstack) 18 | 19 | # Pre-requisites 20 | * Working OpenStack deployment. Tested is OpenStack 12 & 13 (Pike & Queens) using RDO. 21 | * RHEL 7 image. Tested is RHEL 7.4, 7.5, 7.6. 22 | * An openstack ssh key for accessing instances 23 | * A provider (public) network with at least two or three available floating ips. 24 | * A service (internal) network 25 | * A router configured with the public network as gateway and internal network as interface. 26 | * Flavors configured for OpenShift. These are only recommendations. 27 | * ocp.master (2 vCPU, 4GB RAM, 30 GB Root Disk) 28 | * ocp.infra (4 vCPU, 16GB RAM, 30 GB Root Disk) 29 | * ocp.node (2 vCPU, 4GB RAM, 30 GB Root Disk) 30 | * ocp.bastion (1 vCPU, 4GB RAM, 30 GB Root Disk) 31 | * Properly configured cinder and nova storage. 32 | * Make sure you aren't using default loop back and have disabled disk zeroing in cinder/nova for LVM. 33 | 34 | More information on setting up proper OpenStack environment can be found [here](https://keithtenzer.com/2018/07/17/openstack-13-queens-lab-installation-and-configuration-guide-for-hetzner-root-servers/). 35 | 36 | # Tested Deployments 37 | ```Single Master - Non HA``` 38 | 39 | Single Master deployment is 1 Master, 1 Infra node and X number of App nodes. This configuration is a non-HA setup, ideal for test environments. 40 | ![](images/openshift_on_openstack_non_ha.PNG) 41 | 42 | ```Multiple Master - HA``` 43 | 44 | Multiple Master deployment is 3 Master, 2 Infra node and X number of App nodes. This configuration is an HA setup. By default etcd and registry are not using persistent storage. This would need to be configured post-install manually at this time if those should be persisted. 45 | ![](images/openshift_on_openstack_ha.PNG) 46 | 47 | # OpenStack Pre-Configuration (if required) 48 | 49 | Setup New OpenStack Project 50 | 51 | Create Project 52 | 53 | ``` 54 | # openstack project create openshift 55 | ``` 56 | 57 | Add admin user as admin to project 58 | 59 | ``` 60 | # openstack role add --project openshift --user admin admin 61 | ```` 62 | 63 | Increase project quota for security groups 64 | 65 | ``` 66 | # openstack quota set --secgroups 100 openshift 67 | ``` 68 | 69 | Increase quota for volumes 70 | 71 | ``` 72 | # openstack quota set --volumes 100 openshift 73 | ``` 74 | 75 | ## Setup Internal Network 76 | 77 | Create internal network 78 | 79 | ``` 80 | # openstack network create openshift --project openshift 81 | ``` 82 | 83 | Create internal subnet 84 | 85 | ``` 86 | # openstack subnet create --network openshift --allocation-pool \ 87 | start=192.168.4.100,end=192.168.4.200 --dns-nameserver 213.133.98.98 \ 88 | --subnet-range 192.168.4.0/24 openshift_subnet 89 | ``` 90 | 91 | Add internal network to router as interface 92 | 93 | ``` 94 | # openstack router add subnet router1 openshift_subnet 95 | ``` 96 | 97 | # OpenShift Authentication 98 | This deployment will configure authentication through OpenStack keystone. This means you need to create users in OpenStack so they are available to OpenShift. All users that successfully authenticate from OpenShift to Keystone will be allowed to login. They will only have permissions to create new projects and not see anyone elses projects. If you would like something else you can configure the inventory file manually before deploying OpenShift cluster. 99 | 100 | Create OpenStack User for OpenShift 101 | ``` 102 | # openstack user create --project admin --password 103 | ``` 104 | 105 | # Install 106 | ![](images/one.png) 107 | 108 | ```[OpenStack Controller]``` 109 | 110 | Install Git & Ansible 111 | ``` 112 | # yum install -y git ansible 113 | ``` 114 | 115 | Clone Git Repository 116 | ``` 117 | # git clone https://github.com/ktenzer/openshift-on-openstack-123.git 118 | ``` 119 | 120 | Change dir to repository 121 | ``` 122 | # cd openshift-on-openstack-123 123 | ``` 124 | 125 | Checkout release branch 3.11 126 | ``` 127 | # git checkout release-3.11 128 | ``` 129 | 130 | Configure Parameters 131 | ``` 132 | # cp sample-vars.yml vars.yml 133 | ``` 134 | ``` 135 | # vi vars.yml 136 | --- 137 | ### OpenStack Setting ### 138 | openstack_user: admin 139 | openstack_passwd: 140 | openstack_auth_url: 141 | openstack_project: openshift 142 | domain_name: ocp3.lab 143 | external_network: public 144 | service_network: openshift 145 | service_subnet_id: 146 | image: rhel76 147 | ssh_user: cloud-user 148 | ssh_key_path: /root/admin.pem 149 | ssh_key_name: admin 150 | stack_name: openshift 151 | openstack_release: queens 152 | openstack_version: "13" 153 | contact: admin@ocp3.lab 154 | heat_template_path: /root/openshift-on-openstack-123/heat/openshift_single_lbaas.yaml 155 | 156 | ### OpenShift Settings ### 157 | openshift_deployment: openshift-enterprise 158 | openshift_version: "3.11" 159 | docker_version: "1.13.1" 160 | openshift_ha: false 161 | registry_replicas: 1 162 | openshift_user: admin 163 | openshift_passwd: 164 | 165 | ### Red Hat Subscription ### 166 | subscription_use_username: True 167 | rhn_username_or_org_id: 168 | rhn_password_or_activation_key: 169 | rhn_pool: 170 | 171 | ### OpenStack Instance Count ### 172 | master_count: 1 173 | infra_count: 1 174 | node_count: 3 175 | 176 | ### OpenStack Instance Group Policies ### 177 | ### Set to 'anti-affinity' if running on multiple compute node ### 178 | master_server_group_policies: "['soft-anti-affinity']" 179 | infra_server_group_policies: "['soft-anti-affinity']" 180 | node_server_group_policies: "['soft-anti-affinity']" 181 | 182 | ### OpenStack Instance Flavors ### 183 | bastion_flavor: ocp.bastion 184 | master_flavor: ocp.master 185 | infra_flavor: ocp.infra 186 | node_flavor: ocp.node 187 | ``` 188 | 189 | Note: If you want to run a single load balancer (to save floating ips) for masters and infra, instead of default two use following heat template ```heat_template_path: /root/openshift-on-openstack-123/heat/openshift_single_lbaas.yaml```. 190 | 191 | # Step 1: Deploy OpenStack Infrastructure for OpenShift 192 | ``` 193 | # ./01_deploy-openstack-infra.yml --private-key= 194 | ``` 195 | 196 | ![](images/two.png) 197 | 198 | Get ip address of the bastion host. 199 | ``` 200 | # openstack stack output show openshift --all | grep -A1 '"name": "bastion"' 201 | 202 | | "name": "bastion", 203 | | "address": "1.2.3.4" 204 | 205 | ``` 206 | 207 | SSH to the bastion host using cloud-user and key. 208 | ``` 209 | ssh -i /root/admin.pem cloud-user@1.2.3.4 210 | ``` 211 | 212 | ```[Bastion Host]``` 213 | 214 | Change dir to repository 215 | ``` 216 | # cd openshift-on-openstack-123 217 | ``` 218 | 219 | # Step 2: Prepare the nodes for deployment of OpenShift. 220 | ``` 221 | [cloud-user@bastion ~]$ ./02_prepare-openshift.yml 222 | 223 | PLAY RECAP ***************************************************************************************** 224 | bastion : ok=15 changed=7 unreachable=0 failed=0 225 | infra0 : ok=18 changed=13 unreachable=0 failed=0 226 | infra1 : ok=18 changed=13 unreachable=0 failed=0 227 | localhost : ok=7 changed=6 unreachable=0 failed=0 228 | master0 : ok=18 changed=13 unreachable=0 failed=0 229 | master1 : ok=18 changed=13 unreachable=0 failed=0 230 | master2 : ok=18 changed=13 unreachable=0 failed=0 231 | node0 : ok=18 changed=13 unreachable=0 failed=0 232 | node1 : ok=18 changed=13 unreachable=0 failed=0 233 | ``` 234 | 235 | ![](images/three.png) 236 | 237 | ```[Bastion Host]``` 238 | # Step 3: Install and Configure OpenShift Cluster 239 | 240 | ``` 241 | [cloud-user@bastion ~]$ ansible-playbook -i /home/cloud-user/openshift-inventory -vv /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml 242 | PLAY RECAP ***************************************************************************************** 243 | infra0.ocp3.lab : ok=61 changed=15 unreachable=0 failed=0 244 | localhost : ok=11 changed=0 unreachable=0 failed=0 245 | master0.ocp3.lab : ok=73 changed=15 unreachable=0 failed=0 246 | node0.ocp3.lab : ok=61 changed=15 unreachable=0 failed=0 247 | 248 | 249 | INSTALLER STATUS *********************************************************************************** 250 | Initialization : Complete (0:04:16) 251 | ``` 252 | 253 | ``` 254 | [cloud-user@bastion ~]$ ansible-playbook -i /home/cloud-user/openshift-inventory -vv /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml 255 | 256 | PLAY RECAP ***************************************************************************************** 257 | infra0 : ok=118 changed=20 unreachable=0 failed=0 258 | localhost : ok=11 changed=0 unreachable=0 failed=0 259 | master0 : ok=715 changed=237 unreachable=0 failed=0 260 | node0 : ok=118 changed=21 unreachable=0 failed=0 261 | 262 | 263 | INSTALLER STATUS *********************************************************************************** 264 | Initialization : Complete (0:00:32) 265 | Health Check : Complete (0:00:01) 266 | Node Bootstrap Preparation : Complete (0:09:24) 267 | etcd Install : Complete (0:01:06) 268 | Master Install : Complete (0:06:04) 269 | Master Additional Install : Complete (0:01:51) 270 | Node Join : Complete (0:00:37) 271 | Hosted Install : Complete (0:00:49) 272 | Cluster Monitoring Operator : Complete (0:01:15) 273 | Web Console Install : Complete (0:00:31) 274 | Console Install : Complete (0:00:27) 275 | metrics-server Install : Complete (0:00:00) 276 | Service Catalog Install : Complete (0:01:57) 277 | ``` 278 | 279 | Login in to UI. 280 | ``` 281 | https://openshift.144.76.134.226.xip.io:8443 282 | ``` 283 | 284 | # OKD Installation (in case you aren't doing OpenShift Entrerprise) 285 | OKD formally called OpenShift Origin (community version) is also supported starting with release-3.11 branch. To use OKD make sure you have a centos 7.5 image and set 'openshift_deployment=origin' in the vars file. 286 | 287 | Once you have run the 01_deploy-openstack-infra.yml and 03_prepare-openshift.yml playbooks as documented above run the following to install openshift OKD from bastion. 288 | 289 | Prerequisites playbook 290 | ``` 291 | [centosr@bastion ~] ansible-playbook -i /home/centos/openshift-inventory openshift-ansible/playbooks/prerequisites.yml 292 | ``` 293 | 294 | Deploy cluster playbook 295 | ``` 296 | [centosr@bastion ~] ansible-playbook -i /home/centos/openshift-inventory openshift-ansible/playbooks/deploy_cluster.yml 297 | ``` 298 | 299 | # Optional Section 300 | # Configure Admin User 301 | Configure admin user 302 | ``` 303 | [cloud-user@bastion ~]$ ssh master0 304 | ``` 305 | 306 | Authenticate as system:admin user. 307 | ``` 308 | [cloud-user@master0 ~]$ oc login -u system:admin -n default 309 | ``` 310 | 311 | Make user OpenShift Cluster Administrator 312 | ``` 313 | [cloud-user@master0 ~]$ oc adm policy add-cluster-role-to-user cluster-admin admin 314 | ``` 315 | 316 | # Install Metrics 317 | 318 | Note: Metrics is integrated with OpenShift UI and will be depricated in 4.0 but for 3.11 if you want metrics in UI it is still needed. 319 | 320 | Set metrics to true in inventory 321 | ``` 322 | [cloud-user@bastion ~]$ vi openshift_inventory 323 | ... 324 | openshift_hosted_metrics_deploy=true 325 | ... 326 | ``` 327 | 328 | Run playbook for metrics in OpenShift 329 | ``` 330 | [cloud-user@bastion ~]$ ansible-playbook -i /home/cloud-user/openshift-inventory -vv /usr/share/ansible/openshift-ansible/playbooks/openshift-metrics/config.yml 331 | PLAY RECAP ***************************************************************************************** 332 | infra0.ocp3.lab : ok=0 changed=0 unreachable=0 failed=0 333 | localhost : ok=11 changed=0 unreachable=0 failed=0 334 | master0.ocp3.lab : ok=217 changed=47 unreachable=0 failed=0 335 | node0.ocp3.lab : ok=0 changed=0 unreachable=0 failed=0 336 | 337 | 338 | INSTALLER STATUS *********************************************************************************** 339 | Initialization : Complete (0:01:34) 340 | Metrics Install : Complete (0:04:37) 341 | ``` 342 | 343 | # Install Logging 344 | Set logging to true in inventory 345 | ``` 346 | [cloud-user@bastion ~]$ vi openshift_inventory 347 | ... 348 | openshift_hosted_logging_deploy=true 349 | ... 350 | ``` 351 | 352 | Run Playbook for logging in OpenShift 353 | ``` 354 | [cloud-user@bastion ~]$ ansible-playbook -i /home/cloud-user/openshift-inventory -vv /usr/share/ansible/openshift-ansible/playbooks/openshift-logging/config.yml 355 | ``` 356 | 357 | # Operator Framework (tech preview in 3.11) 358 | ``` 359 | [cloud-user@bastion ~]$ vi openshift_inventory 360 | ... 361 | openshift_additional_registry_credentials=[{'host':'registry.connect.redhat.com','user':'','password':'','test_image':'mongodb/enterprise-operator:0.3.2'}] 362 | ... 363 | ``` 364 | 365 | Reconfigure registry auth 366 | ``` 367 | [cloud-user@bastion ~]$ ansible-playbook -i /home/cloud-user/openshift-inventory -vv /usr/share/ansible/openshift-ansible/playbooks/updates/registry_auth.yml 368 | PLAY RECAP ******************************************************************************************************************************************************************************************************** 369 | infra0 : ok=27 changed=1 unreachable=0 failed=0 370 | infra1 : ok=27 changed=1 unreachable=0 failed=0 371 | localhost : ok=13 changed=0 unreachable=0 failed=0 372 | master0 : ok=48 changed=2 unreachable=0 failed=0 373 | master1 : ok=48 changed=2 unreachable=0 failed=0 374 | master2 : ok=65 changed=2 unreachable=0 failed=0 375 | node0 : ok=27 changed=1 unreachable=0 failed=0 376 | node1 : ok=27 changed=1 unreachable=0 failed=0 377 | node2 : ok=27 changed=1 unreachable=0 failed=0 378 | 379 | 380 | INSTALLER STATUS ************************************************************************************************************************************************************************************************** 381 | Initialization : Complete (0:03:08) 382 | ``` 383 | 384 | Deploy Operator Framework 385 | ``` 386 | [cloud-user@bastion ~]$ ansible-playbook -i /home/cloud-user/openshift-inventory -vv /usr/share/ansible/openshift-ansible/playbooks/olm/config.yml 387 | PLAY RECAP ******************************************************************************************************************************************************************************************************** 388 | infra0 : ok=0 changed=0 unreachable=0 failed=0 389 | infra1 : ok=0 changed=0 unreachable=0 failed=0 390 | localhost : ok=11 changed=0 unreachable=0 failed=0 391 | master0 : ok=27 changed=0 unreachable=0 failed=0 392 | master1 : ok=27 changed=0 unreachable=0 failed=0 393 | master2 : ok=68 changed=19 unreachable=0 failed=0 394 | node0 : ok=0 changed=0 unreachable=0 failed=0 395 | node1 : ok=0 changed=0 unreachable=0 failed=0 396 | node2 : ok=0 changed=0 unreachable=0 failed=0 397 | 398 | 399 | INSTALLER STATUS ************************************************************************************************************************************************************************************************** 400 | Initialization : Complete (0:01:30) 401 | OLM Install : Complete (0:00:47) 402 | ``` 403 | 404 | # Openshift disconnected install (optionally) 405 | Disconnected installation requires two Swift containers, which are used to store Docker images and RHEL repositories. 406 | ```01_deploy-openstack-infra.yml``` playbook will do the following: 407 | * Install httpd on Bastion server 408 | * Install and configure Rclone, mount ```openshift_rhn_repo``` container via systemd mount script to ```/var/www/html/repo``` 409 | * Generate CA and SSL certificate for Docker private registry. 410 | * Setup Docker and configure Docker private registry to use Swift container ```openshift_rhn_registry``` as backend storage. 411 | * Replicate latest RHEL packages from required repositories (6GB), and sync them to ```openshift_rhn_repo``` container. 412 | * Download all required Openshift Docker images (6GB), re-tag them and push to private Docker registry running on Bastion. 413 | 414 | note: replication of RHEL repositories and Openshift Docker images will only happen once, if data exists in Swift containers this steps will be skipped. 415 | 416 | Set variables in vars.yml 417 | ``` 418 | bastion_repo: True 419 | ``` 420 | Create two required Swift containers 421 | ``` 422 | swift_rhn_repo_container_name: openshift_rhn_repo 423 | swift_rhn_registry_container_name: openshift_rhn_registry 424 | ``` 425 | 426 | Run below playbook after running ```01_deploy-openstack-infra.yml``` 427 | ``` 428 | # ./02_bastion-repo.yml 429 | ``` 430 | Continue with step 2. 431 | 432 | 433 | # Issues 434 | ## Issue 1: Dynamic storage provisioning using cinder not working 435 | Currently using the OpenStack cloud provider requires using Cinder v2 API. Most current OpenStack deployments will default to v3. 436 | ``` 437 | Error creating cinder volume: BS API version autodetection failed. 438 | ``` 439 | If you provision OpenShift volume and it is pending check /var/log/messages on master. If you see this error you need to add following in /etc/origin/cloudprovider/openstack.conf on masters and all nodes then restart node service on node and controller service on master. 440 | ``` 441 | ... 442 | [BlockStorage] 443 | bs-version=v2 444 | ... 445 | ``` 446 | 447 | The post-openshift.yml playbook takes care of setting v2 for cinder automatically. 448 | 449 | ## Issue 2: Hosted Install Fails 450 | 451 | The registry sometimes fails to complete install due to host resolution of xip.io. Not sure if this is issue in OpenShift 3.7 or environment. Simply re-running hosted playbook resolved the issue and resulted in successful installation. 452 | 453 | ## Issue 3: Ansible 2.7 causes control plane to not start 454 | 455 | Don't use ansible 2.7 with OpenShift or OKD 3.11 there is an issue where etcd playbooks won't run which leads to control plane not starting. Use 2.6, this is tested and working. This deployment will force 2.6 so 2.7 doesnt end up on systems by accident. 456 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | # Additional default options for OpenShift Ansible 3 | forks = 20 4 | host_key_checking = False 5 | retry_files_enabled = False 6 | nocows = True 7 | 8 | -------------------------------------------------------------------------------- /bastion-offline-repo-for-disconnected-setup.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Deploy the OpenShift Cluster Infrastructure 4 | hosts: localhost 5 | connection: local 6 | become: no 7 | gather_facts: no 8 | 9 | tasks: 10 | - name: Include vars.yml 11 | include_vars: 12 | file: vars.yml 13 | 14 | - import_role: 15 | name: osp-inventory 16 | 17 | - name: Configure local RHN repos on Bastion 18 | hosts: bastion 19 | become: true 20 | 21 | tasks: 22 | - name: Include vars.yml 23 | include_vars: 24 | file: vars.yml 25 | 26 | - name: Set ansible_ssh_private_key_file 27 | set_fact: 28 | ansible_ssh_private_key_file: "{{ ssh_key_path }}" 29 | 30 | - import_role: 31 | name: bastion-repo 32 | when: openshift_deployment == "openshift-enterprise" 33 | -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | packages: 2 | - wget 3 | - git 4 | - net-tools 5 | - bind-utils 6 | - iptables-services 7 | - bash-completion 8 | - nfs-utils 9 | - kexec-tools 10 | - sos 11 | - psacct 12 | - yum-utils 13 | - NetworkManager 14 | packages_bastion_openshift: 15 | - openshift-ansible 16 | packages_bastion_origin: 17 | - ansible 18 | - pyOpenSSL 19 | origin_repos_bastion: 20 | - centos-release-openshift-origin{{ openshift_repoversion }} 21 | - centos-release-openstack-{{ openstack_release }} 22 | - epel-release 23 | packages_bastion_openstack: 24 | - python-openstackclient 25 | - python-heatclient 26 | repos: 27 | - rhel-7-server-rpms 28 | - rhel-7-server-extras-rpms 29 | - rhel-7-server-ose-{{ openshift_version }}-rpms 30 | - rhel-7-fast-datapath-rpms 31 | - rhel-7-server-ansible-2.6-rpms 32 | repos_bastion: 33 | - rhel-7-server-openstack-{{ openstack_version }}-tools-rpms 34 | -------------------------------------------------------------------------------- /heat/bastion.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a bastion server 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | 10 | # Networks to connect to or create 11 | external_network: 12 | type: string 13 | description: > 14 | The external network that provides floating IP addresses for the nodes 15 | constraints: 16 | - custom_constraint: neutron.network 17 | 18 | service_network: 19 | description: > 20 | The name or ID of the internal network 21 | type: string 22 | constraints: 23 | - custom_constraint: neutron.network 24 | 25 | service_subnet: 26 | description: > 27 | The name or ID of the internal IPv4 space 28 | type: string 29 | constraints: 30 | - custom_constraint: neutron.subnet 31 | 32 | security_group: 33 | description: > 34 | Allow bastion server access to instances 35 | type: string 36 | constraints: 37 | - custom_constraint: neutron.security_group 38 | 39 | image: 40 | description: > 41 | The Glance image to use as a base for bastion server 42 | type: string 43 | constraints: 44 | - custom_constraint: glance.image 45 | 46 | bastion_flavor: 47 | description: > 48 | The name of the OpenStack instance flavor to use for bastion server 49 | type: string 50 | default: m1.small 51 | constraints: 52 | - custom_constraint: nova.flavor 53 | 54 | hostname: 55 | description: > 56 | The Infrastructure hostname portion of the FQDN 57 | type: string 58 | default: "bastion" 59 | constraints: 60 | - allowed_pattern: '[a-z0-9\-]*' 61 | description: Hostname must contain only characters [a-z0-9\-]. 62 | 63 | domain_name: 64 | description: > 65 | All VMs will be placed in this domain 66 | type: string 67 | 68 | # Access to the VMs 69 | ssh_user: 70 | type: string 71 | description: > 72 | The SSH user available on all nodes. 73 | 74 | ssh_key_name: 75 | type: string 76 | description: Name of the SSH keypair registered with Nova 77 | constraints: 78 | - custom_constraint: nova.keypair 79 | 80 | 81 | resources: 82 | # A VM to provide host based orchestration and other sub-services 83 | host: 84 | type: OS::Nova::Server 85 | properties: 86 | name: 87 | str_replace: 88 | template: "HOSTNAME" 89 | params: 90 | HOSTNAME: {get_param: hostname} 91 | admin_user: {get_param: ssh_user} 92 | image: {get_param: image} 93 | flavor: {get_param: bastion_flavor} 94 | key_name: {get_param: ssh_key_name} 95 | networks: 96 | - port: {get_resource: port} 97 | user_data_format: SOFTWARE_CONFIG 98 | user_data: {get_resource: init} 99 | block_device_mapping: 100 | - device_name: vda 101 | volume_id: { get_resource: root_volume } 102 | delete_on_termination: true 103 | 104 | port: 105 | type: OS::Neutron::Port 106 | properties: 107 | security_groups: 108 | - {get_param: security_group} 109 | network: {get_param: service_network} 110 | fixed_ips: 111 | - subnet: {get_param: service_subnet} 112 | replacement_policy: AUTO 113 | 114 | floating_ip: 115 | type: OS::Neutron::FloatingIP 116 | properties: 117 | floating_network: {get_param: external_network} 118 | port_id: {get_resource: port} 119 | 120 | root_volume: 121 | type: OS::Cinder::Volume 122 | properties: 123 | size: 30 124 | image: {get_param: image} 125 | availability_zone: nova 126 | 127 | init: 128 | type: OS::Heat::MultipartMime 129 | properties: 130 | parts: 131 | - config: {get_resource: set_hostname} 132 | 133 | set_hostname: 134 | type: OS::Heat::CloudConfig 135 | properties: 136 | cloud_config: 137 | hostname: {get_param: hostname} 138 | fqdn: 139 | str_replace: 140 | template: "HOSTNAME" 141 | params: 142 | HOSTNAME: {get_param: hostname} 143 | DOMAIN: {get_param: domain_name} 144 | 145 | outputs: 146 | ip_address: 147 | value: {get_attr: [floating_ip, floating_ip_address]} 148 | -------------------------------------------------------------------------------- /heat/hosts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a DNS service made up of a single master and a set of 6 | slave services. 7 | The instances are created with Heat and then configured with Ansible 8 | 9 | parameters: 10 | masters_pool: 11 | type: string 12 | description: The masters lbaas pool 13 | 14 | infras_pool_http: 15 | type: string 16 | description: The infras lbaas pool 17 | 18 | infras_pool_https: 19 | type: string 20 | description: The infras lbaas pool 21 | 22 | node_count: 23 | type: number 24 | description: > 25 | Number of app nodes to create. 26 | default: 2 27 | 28 | infra_count: 29 | type: number 30 | description: > 31 | Number of infra nodes to create. 32 | default: 1 33 | 34 | master_count: 35 | type: number 36 | description: > 37 | Number of master nodes to create. 38 | default: 1 39 | 40 | bastion_hostname: 41 | description: > 42 | The default prefix for bastion hostname 43 | type: string 44 | default: "bastion" 45 | 46 | master_hostname_prefix: 47 | description: > 48 | The default prefix for master hostnames 49 | type: string 50 | default: "master" 51 | 52 | infra_hostname_prefix: 53 | description: > 54 | The default prefix for infra node hostnames 55 | type: string 56 | default: "infra" 57 | 58 | node_hostname_prefix: 59 | description: > 60 | The default prefix for app node hostnames 61 | type: string 62 | default: "node" 63 | 64 | domain_name: 65 | description: > 66 | All VMs will be placed in this domain 67 | type: string 68 | 69 | # Networks to connect to or create 70 | external_network: 71 | type: string 72 | description: > 73 | The external network that provides floating IP addresses for the nodes 74 | constraints: 75 | - custom_constraint: neutron.network 76 | 77 | service_network: 78 | description: > 79 | The name or ID of the internal network 80 | type: string 81 | constraints: 82 | - custom_constraint: neutron.network 83 | 84 | service_subnet: 85 | description: > 86 | The name or ID of the internal IPv4 space 87 | type: string 88 | constraints: 89 | - custom_constraint: neutron.subnet 90 | 91 | master_server_group_policies: 92 | type: comma_delimited_list 93 | description: > 94 | List of policies applied on master nodes ServerGroup. By default 95 | 'anti-affinity' policy is used to make sure that each master node 96 | is deployed on a different host. If you use a small/all-in-one openstack 97 | environment, you may need to disable this e.g. by passing 98 | '-P master_server_group_policies=affinity'. 99 | default: ['anti-affinity'] 100 | 101 | infra_server_group_policies: 102 | type: comma_delimited_list 103 | description: > 104 | List of policies applied on master nodes ServerGroup. By default 105 | 'anti-affinity' policy is used to make sure that each master node 106 | is deployed on a different host. If you use a small/all-in-one openstack 107 | environment, you may need to disable this e.g. by passing 108 | '-P infra_server_group_policies=affinity'. 109 | default: ['anti-affinity'] 110 | 111 | node_server_group_policies: 112 | type: comma_delimited_list 113 | description: > 114 | List of policies applied on master nodes ServerGroup. By default 115 | 'anti-affinity' policy is used to make sure that each master node 116 | is deployed on a different host. If you use a small/all-in-one openstack 117 | environment, you may need to disable this e.g. by passing 118 | '-P node_server_group_policies=affinity'. 119 | default: ['anti-affinity'] 120 | 121 | image: 122 | description: > 123 | The Glance image to use as a base for OpenShift nodes 124 | type: string 125 | constraints: 126 | - custom_constraint: glance.image 127 | 128 | bastion_flavor: 129 | description: > 130 | The name of the OpenStack instance flavor to use for OpenShift nodes 131 | type: string 132 | default: m1.small 133 | constraints: 134 | - custom_constraint: nova.flavor 135 | 136 | master_flavor: 137 | description: > 138 | The name of the OpenStack instance flavor to use for OpenShift nodes 139 | type: string 140 | default: m1.medium 141 | constraints: 142 | 143 | infra_flavor: 144 | description: > 145 | The name of the OpenStack instance flavor to use for OpenShift nodes 146 | type: string 147 | default: m1.large 148 | constraints: 149 | 150 | node_flavor: 151 | description: > 152 | The name of the OpenStack instance flavor to use for OpenShift nodes 153 | type: string 154 | default: m1.medium 155 | constraints: 156 | 157 | # Access to the VMs 158 | ssh_user: 159 | type: string 160 | description: > 161 | The SSH user available on all nodes. 162 | default: cloud-user 163 | 164 | ssh_key_name: 165 | type: string 166 | description: Name of the SSH keypair registered with Nova 167 | constraints: 168 | - custom_constraint: nova.keypair 169 | 170 | resources: 171 | 172 | bastion: 173 | type: bastion.yaml 174 | properties: 175 | image: {get_param: image} 176 | bastion_flavor: {get_param: bastion_flavor} 177 | external_network: {get_param: external_network} 178 | service_network: {get_param: service_network} 179 | service_subnet: {get_param: service_subnet} 180 | security_group: {get_resource: bastion_security_group} 181 | hostname: {get_param: bastion_hostname} 182 | domain_name: {get_param: domain_name} 183 | ssh_user: {get_param: ssh_user} 184 | ssh_key_name: {get_param: ssh_key_name} 185 | 186 | masters: 187 | type: masters.yaml 188 | properties: 189 | masters_pool: {get_param: masters_pool} 190 | master_count: {get_param: master_count} 191 | image: {get_param: image} 192 | master_flavor: {get_param: master_flavor} 193 | master_server_group_policies: {get_param: master_server_group_policies} 194 | external_network: {get_param: external_network} 195 | service_network: {get_param: service_network} 196 | service_subnet: {get_param: service_subnet} 197 | security_group: {get_resource: master_security_group} 198 | hostname_prefix: {get_param: master_hostname_prefix} 199 | domain_name: {get_param: domain_name} 200 | ssh_user: {get_param: ssh_user} 201 | ssh_key_name: {get_param: ssh_key_name} 202 | 203 | infras: 204 | type: infras.yaml 205 | properties: 206 | infras_pool_http: {get_param: infras_pool_http} 207 | infras_pool_https: {get_param: infras_pool_https} 208 | infra_count: {get_param: infra_count} 209 | image: {get_param: image} 210 | infra_flavor: {get_param: infra_flavor} 211 | infra_server_group_policies: {get_param: infra_server_group_policies} 212 | external_network: {get_param: external_network} 213 | service_network: {get_param: service_network} 214 | service_subnet: {get_param: service_subnet} 215 | security_group: {get_resource: infra_security_group} 216 | hostname_prefix: {get_param: infra_hostname_prefix} 217 | domain_name: {get_param: domain_name} 218 | ssh_user: {get_param: ssh_user} 219 | ssh_key_name: {get_param: ssh_key_name} 220 | 221 | nodes: 222 | type: nodes.yaml 223 | properties: 224 | node_count: {get_param: node_count} 225 | image: {get_param: image} 226 | node_flavor: {get_param: node_flavor} 227 | node_server_group_policies: {get_param: node_server_group_policies} 228 | external_network: {get_param: external_network} 229 | service_network: {get_param: service_network} 230 | service_subnet: {get_param: service_subnet} 231 | security_group: {get_resource: node_security_group} 232 | hostname_prefix: {get_param: node_hostname_prefix} 233 | domain_name: {get_param: domain_name} 234 | ssh_user: {get_param: ssh_user} 235 | ssh_key_name: {get_param: ssh_key_name} 236 | 237 | bastion_security_group: 238 | type: OS::Neutron::SecurityGroup 239 | properties: 240 | rules: 241 | - protocol: icmp 242 | - protocol: tcp 243 | port_range_min: 22 244 | port_range_max: 22 245 | - protocol: tcp 246 | port_range_min: 53 247 | port_range_max: 53 248 | - protocol: udp 249 | port_range_min: 53 250 | port_range_max: 53 251 | - protocol: tcp 252 | port_range_min: 80 253 | port_range_max: 80 254 | - protocol: tcp 255 | port_range_min: 443 256 | port_range_max: 443 257 | 258 | node_security_group: 259 | type: OS::Neutron::SecurityGroup 260 | properties: 261 | rules: 262 | - protocol: icmp 263 | - direction: ingress 264 | protocol: tcp 265 | port_range_min: 22 266 | port_range_max: 22 267 | - direction: ingress 268 | protocol: tcp 269 | port_range_min: 10250 270 | port_range_max: 10250 271 | - direction: ingress 272 | protocol: udp 273 | port_range_min: 4789 274 | port_range_max: 4789 275 | # Node exporter 276 | - direction: ingress 277 | protocol: tcp 278 | port_range_min: 9100 279 | port_range_max: 9100 280 | - direction: ingress 281 | protocol: tcp 282 | port_range_min: 53 283 | port_range_max: 53 284 | - direction: ingress 285 | protocol: udp 286 | port_range_min: 53 287 | port_range_max: 53 288 | - direction: ingress 289 | protocol: tcp 290 | port_range_min: 111 291 | port_range_max: 111 292 | - direction: ingress 293 | protocol: udp 294 | port_range_min: 111 295 | port_range_max: 111 296 | - direction: ingress 297 | protocol: tcp 298 | port_range_min: 892 299 | port_range_max: 892 300 | - direction: ingress 301 | protocol: udp 302 | port_range_min: 892 303 | port_range_max: 892 304 | - direction: ingress 305 | protocol: tcp 306 | port_range_min: 875 307 | port_range_max: 875 308 | - direction: ingress 309 | protocol: udp 310 | port_range_min: 875 311 | port_range_max: 875 312 | - direction: ingress 313 | protocol: tcp 314 | port_range_min: 662 315 | port_range_max: 662 316 | - direction: ingress 317 | protocol: udp 318 | port_range_min: 662 319 | port_range_max: 662 320 | - direction: ingress 321 | protocol: tcp 322 | port_range_min: 2049 323 | port_range_max: 2049 324 | - direction: ingress 325 | protocol: tcp 326 | port_range_min: 32803 327 | port_range_max: 32803 328 | - direction: ingress 329 | protocol: udp 330 | port_range_min: 32769 331 | port_range_max: 32769 332 | - direction: ingress 333 | protocol: tcp 334 | port_range_min: 24007 335 | port_range_max: 24007 336 | - direction: ingress 337 | protocol: tcp 338 | port_range_min: 24008 339 | port_range_max: 24008 340 | - direction: ingress 341 | protocol: tcp 342 | port_range_min: 2222 343 | port_range_max: 2222 344 | - direction: ingress 345 | protocol: tcp 346 | port_range_min: 49152 347 | port_range_max: 49664 348 | 349 | master_security_group: 350 | type: OS::Neutron::SecurityGroup 351 | properties: 352 | rules: 353 | - protocol: icmp 354 | - direction: ingress 355 | protocol: tcp 356 | port_range_min: 22 357 | port_range_max: 22 358 | - direction: ingress 359 | protocol: tcp 360 | port_range_min: 4001 361 | port_range_max: 4001 362 | - direction: ingress 363 | protocol: tcp 364 | port_range_min: 8443 365 | port_range_max: 8443 366 | - direction: ingress 367 | protocol: tcp 368 | port_range_min: 8444 369 | port_range_max: 8444 370 | - direction: ingress 371 | protocol: tcp 372 | port_range_min: 443 373 | port_range_max: 443 374 | - direction: ingress 375 | protocol: tcp 376 | port_range_min: 53 377 | port_range_max: 53 378 | - direction: ingress 379 | protocol: udp 380 | port_range_min: 53 381 | port_range_max: 53 382 | - direction: ingress 383 | protocol: udp 384 | port_range_min: 4789 385 | port_range_max: 4789 386 | - direction: ingress 387 | protocol: tcp 388 | port_range_min: 8053 389 | port_range_max: 8053 390 | - direction: ingress 391 | protocol: udp 392 | port_range_min: 8053 393 | port_range_max: 8053 394 | - direction: ingress 395 | protocol: tcp 396 | port_range_min: 9100 397 | port_range_max: 9100 398 | - direction: ingress 399 | protocol: tcp 400 | port_range_min: 10250 401 | port_range_max: 10250 402 | - direction: ingress 403 | protocol: tcp 404 | port_range_min: 24224 405 | port_range_max: 24224 406 | - direction: ingress 407 | protocol: udp 408 | port_range_min: 24224 409 | port_range_max: 24224 410 | - direction: ingress 411 | protocol: tcp 412 | port_range_min: 2379 413 | port_range_max: 2379 414 | - direction: ingress 415 | protocol: tcp 416 | port_range_min: 2380 417 | port_range_max: 2380 418 | remote_mode: remote_group_id 419 | - direction: ingress 420 | protocol: tcp 421 | port_range_min: 2049 422 | port_range_max: 2049 423 | 424 | infra_security_group: 425 | type: OS::Neutron::SecurityGroup 426 | properties: 427 | rules: 428 | - protocol: icmp 429 | - direction: ingress 430 | protocol: tcp 431 | port_range_min: 22 432 | port_range_max: 22 433 | - direction: ingress 434 | protocol: tcp 435 | port_range_min: 80 436 | port_range_max: 80 437 | - direction: ingress 438 | protocol: tcp 439 | port_range_min: 443 440 | port_range_max: 443 441 | - direction: ingress 442 | protocol: tcp 443 | port_range_min: 10250 444 | port_range_max: 10250 445 | - direction: ingress 446 | protocol: udp 447 | port_range_min: 4789 448 | port_range_max: 4789 449 | - direction: ingress 450 | protocol: tcp 451 | port_range_min: 1936 452 | port_range_max: 1936 453 | - direction: ingress 454 | protocol: tcp 455 | port_range_min: 9100 456 | port_range_max: 9100 457 | - direction: ingress 458 | protocol: tcp 459 | port_range_min: 5000 460 | port_range_max: 5000 461 | 462 | 463 | outputs: 464 | bastion_ip_address: 465 | value: {get_attr: [bastion, ip_address]} 466 | master_entries: 467 | value: {get_attr: [masters, entries]} 468 | infra_entries: 469 | value: {get_attr: [infras, entries]} 470 | node_entries: 471 | value: {get_attr: [nodes, entries]} 472 | 473 | -------------------------------------------------------------------------------- /heat/hosts_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a DNS service made up of a single master and a set of 6 | slave services. 7 | The instances are created with Heat and then configured with Ansible 8 | 9 | parameters: 10 | masters_pool: 11 | type: string 12 | description: The masters lbaas pool 13 | 14 | infras_pool_http: 15 | type: string 16 | description: The infras lbaas pool 17 | 18 | infras_pool_https: 19 | type: string 20 | description: The infras lbaas pool 21 | 22 | node_count: 23 | type: number 24 | description: > 25 | Number of app nodes to create. 26 | default: 2 27 | 28 | infra_count: 29 | type: number 30 | description: > 31 | Number of infra nodes to create. 32 | default: 1 33 | 34 | master_count: 35 | type: number 36 | description: > 37 | Number of master nodes to create. 38 | default: 1 39 | 40 | bastion_hostname: 41 | description: > 42 | The default prefix for bastion hostname 43 | type: string 44 | default: "bastion" 45 | 46 | master_hostname_prefix: 47 | description: > 48 | The default prefix for master hostnames 49 | type: string 50 | default: "master" 51 | 52 | infra_hostname_prefix: 53 | description: > 54 | The default prefix for infra node hostnames 55 | type: string 56 | default: "infra" 57 | 58 | node_hostname_prefix: 59 | description: > 60 | The default prefix for app node hostnames 61 | type: string 62 | default: "node" 63 | 64 | domain_name: 65 | description: > 66 | All VMs will be placed in this domain 67 | type: string 68 | 69 | # Networks to connect to or create 70 | external_network: 71 | type: string 72 | description: > 73 | The external network that provides floating IP addresses for the nodes 74 | constraints: 75 | - custom_constraint: neutron.network 76 | 77 | service_network: 78 | description: > 79 | The name or ID of the internal network 80 | type: string 81 | constraints: 82 | - custom_constraint: neutron.network 83 | 84 | service_subnet: 85 | description: > 86 | The name or ID of the internal IPv4 space 87 | type: string 88 | constraints: 89 | - custom_constraint: neutron.subnet 90 | 91 | master_server_group_policies: 92 | type: comma_delimited_list 93 | description: > 94 | List of policies applied on master nodes ServerGroup. By default 95 | 'anti-affinity' policy is used to make sure that each master node 96 | is deployed on a different host. If you use a small/all-in-one openstack 97 | environment, you may need to disable this e.g. by passing 98 | '-P master_server_group_policies=affinity'. 99 | default: ['anti-affinity'] 100 | 101 | infra_server_group_policies: 102 | type: comma_delimited_list 103 | description: > 104 | List of policies applied on master nodes ServerGroup. By default 105 | 'anti-affinity' policy is used to make sure that each master node 106 | is deployed on a different host. If you use a small/all-in-one openstack 107 | environment, you may need to disable this e.g. by passing 108 | '-P infra_server_group_policies=affinity'. 109 | default: ['anti-affinity'] 110 | 111 | node_server_group_policies: 112 | type: comma_delimited_list 113 | description: > 114 | List of policies applied on master nodes ServerGroup. By default 115 | 'anti-affinity' policy is used to make sure that each master node 116 | is deployed on a different host. If you use a small/all-in-one openstack 117 | environment, you may need to disable this e.g. by passing 118 | '-P node_server_group_policies=affinity'. 119 | default: ['anti-affinity'] 120 | 121 | image: 122 | description: > 123 | The Glance image to use as a base for OpenShift nodes 124 | type: string 125 | constraints: 126 | - custom_constraint: glance.image 127 | 128 | bastion_flavor: 129 | description: > 130 | The name of the OpenStack instance flavor to use for OpenShift nodes 131 | type: string 132 | default: m1.small 133 | constraints: 134 | - custom_constraint: nova.flavor 135 | 136 | master_flavor: 137 | description: > 138 | The name of the OpenStack instance flavor to use for OpenShift nodes 139 | type: string 140 | default: m1.medium 141 | constraints: 142 | 143 | infra_flavor: 144 | description: > 145 | The name of the OpenStack instance flavor to use for OpenShift nodes 146 | type: string 147 | default: m1.large 148 | constraints: 149 | 150 | node_flavor: 151 | description: > 152 | The name of the OpenStack instance flavor to use for OpenShift nodes 153 | type: string 154 | default: m1.medium 155 | constraints: 156 | 157 | # Access to the VMs 158 | ssh_user: 159 | type: string 160 | description: > 161 | The SSH user available on all nodes. 162 | default: cloud-user 163 | 164 | ssh_key_name: 165 | type: string 166 | description: Name of the SSH keypair registered with Nova 167 | constraints: 168 | - custom_constraint: nova.keypair 169 | 170 | resources: 171 | 172 | bastion: 173 | type: bastion.yaml 174 | properties: 175 | image: {get_param: image} 176 | bastion_flavor: {get_param: bastion_flavor} 177 | external_network: {get_param: external_network} 178 | service_network: {get_param: service_network} 179 | service_subnet: {get_param: service_subnet} 180 | security_group: {get_resource: bastion_security_group} 181 | hostname: {get_param: bastion_hostname} 182 | domain_name: {get_param: domain_name} 183 | ssh_user: {get_param: ssh_user} 184 | ssh_key_name: {get_param: ssh_key_name} 185 | 186 | masters: 187 | type: masters_octavia.yaml 188 | properties: 189 | masters_pool: {get_param: masters_pool} 190 | master_count: {get_param: master_count} 191 | image: {get_param: image} 192 | master_flavor: {get_param: master_flavor} 193 | master_server_group_policies: {get_param: master_server_group_policies} 194 | external_network: {get_param: external_network} 195 | service_network: {get_param: service_network} 196 | service_subnet: {get_param: service_subnet} 197 | security_group: {get_resource: master_security_group} 198 | hostname_prefix: {get_param: master_hostname_prefix} 199 | domain_name: {get_param: domain_name} 200 | ssh_user: {get_param: ssh_user} 201 | ssh_key_name: {get_param: ssh_key_name} 202 | 203 | infras: 204 | type: infras_octavia.yaml 205 | properties: 206 | infras_pool_http: {get_param: infras_pool_http} 207 | infras_pool_https: {get_param: infras_pool_https} 208 | infra_count: {get_param: infra_count} 209 | image: {get_param: image} 210 | infra_flavor: {get_param: infra_flavor} 211 | infra_server_group_policies: {get_param: infra_server_group_policies} 212 | external_network: {get_param: external_network} 213 | service_network: {get_param: service_network} 214 | service_subnet: {get_param: service_subnet} 215 | security_group: {get_resource: infra_security_group} 216 | hostname_prefix: {get_param: infra_hostname_prefix} 217 | domain_name: {get_param: domain_name} 218 | ssh_user: {get_param: ssh_user} 219 | ssh_key_name: {get_param: ssh_key_name} 220 | 221 | nodes: 222 | type: nodes.yaml 223 | properties: 224 | node_count: {get_param: node_count} 225 | image: {get_param: image} 226 | node_flavor: {get_param: node_flavor} 227 | node_server_group_policies: {get_param: node_server_group_policies} 228 | external_network: {get_param: external_network} 229 | service_network: {get_param: service_network} 230 | service_subnet: {get_param: service_subnet} 231 | security_group: {get_resource: node_security_group} 232 | hostname_prefix: {get_param: node_hostname_prefix} 233 | domain_name: {get_param: domain_name} 234 | ssh_user: {get_param: ssh_user} 235 | ssh_key_name: {get_param: ssh_key_name} 236 | 237 | bastion_security_group: 238 | type: OS::Neutron::SecurityGroup 239 | properties: 240 | rules: 241 | - protocol: icmp 242 | - protocol: tcp 243 | port_range_min: 22 244 | port_range_max: 22 245 | - protocol: tcp 246 | port_range_min: 53 247 | port_range_max: 53 248 | - protocol: udp 249 | port_range_min: 53 250 | port_range_max: 53 251 | 252 | node_security_group: 253 | type: OS::Neutron::SecurityGroup 254 | properties: 255 | rules: 256 | - protocol: icmp 257 | - direction: ingress 258 | protocol: tcp 259 | port_range_min: 22 260 | port_range_max: 22 261 | - direction: ingress 262 | protocol: tcp 263 | port_range_min: 10250 264 | port_range_max: 10250 265 | - direction: ingress 266 | protocol: udp 267 | port_range_min: 4789 268 | port_range_max: 4789 269 | # Node exporter 270 | - direction: ingress 271 | protocol: tcp 272 | port_range_min: 9100 273 | port_range_max: 9100 274 | - direction: ingress 275 | protocol: tcp 276 | port_range_min: 53 277 | port_range_max: 53 278 | - direction: ingress 279 | protocol: udp 280 | port_range_min: 53 281 | port_range_max: 53 282 | - direction: ingress 283 | protocol: tcp 284 | port_range_min: 111 285 | port_range_max: 111 286 | - direction: ingress 287 | protocol: udp 288 | port_range_min: 111 289 | port_range_max: 111 290 | - direction: ingress 291 | protocol: tcp 292 | port_range_min: 892 293 | port_range_max: 892 294 | - direction: ingress 295 | protocol: udp 296 | port_range_min: 892 297 | port_range_max: 892 298 | - direction: ingress 299 | protocol: tcp 300 | port_range_min: 875 301 | port_range_max: 875 302 | - direction: ingress 303 | protocol: udp 304 | port_range_min: 875 305 | port_range_max: 875 306 | - direction: ingress 307 | protocol: tcp 308 | port_range_min: 662 309 | port_range_max: 662 310 | - direction: ingress 311 | protocol: udp 312 | port_range_min: 662 313 | port_range_max: 662 314 | - direction: ingress 315 | protocol: tcp 316 | port_range_min: 2049 317 | port_range_max: 2049 318 | - direction: ingress 319 | protocol: tcp 320 | port_range_min: 32803 321 | port_range_max: 32803 322 | - direction: ingress 323 | protocol: udp 324 | port_range_min: 32769 325 | port_range_max: 32769 326 | 327 | master_security_group: 328 | type: OS::Neutron::SecurityGroup 329 | properties: 330 | rules: 331 | - protocol: icmp 332 | - direction: ingress 333 | protocol: tcp 334 | port_range_min: 22 335 | port_range_max: 22 336 | - direction: ingress 337 | protocol: tcp 338 | port_range_min: 4001 339 | port_range_max: 4001 340 | - direction: ingress 341 | protocol: tcp 342 | port_range_min: 8443 343 | port_range_max: 8443 344 | - direction: ingress 345 | protocol: tcp 346 | port_range_min: 8444 347 | port_range_max: 8444 348 | - direction: ingress 349 | protocol: tcp 350 | port_range_min: 443 351 | port_range_max: 443 352 | - direction: ingress 353 | protocol: tcp 354 | port_range_min: 53 355 | port_range_max: 53 356 | - direction: ingress 357 | protocol: udp 358 | port_range_min: 53 359 | port_range_max: 53 360 | - direction: ingress 361 | protocol: udp 362 | port_range_min: 4789 363 | port_range_max: 4789 364 | - direction: ingress 365 | protocol: tcp 366 | port_range_min: 8053 367 | port_range_max: 8053 368 | - direction: ingress 369 | protocol: udp 370 | port_range_min: 8053 371 | port_range_max: 8053 372 | - direction: ingress 373 | protocol: tcp 374 | port_range_min: 9100 375 | port_range_max: 9100 376 | - direction: ingress 377 | protocol: tcp 378 | port_range_min: 10250 379 | port_range_max: 10250 380 | - direction: ingress 381 | protocol: tcp 382 | port_range_min: 24224 383 | port_range_max: 24224 384 | - direction: ingress 385 | protocol: udp 386 | port_range_min: 24224 387 | port_range_max: 24224 388 | - direction: ingress 389 | protocol: tcp 390 | port_range_min: 2379 391 | port_range_max: 2379 392 | - direction: ingress 393 | protocol: tcp 394 | port_range_min: 2380 395 | port_range_max: 2380 396 | remote_mode: remote_group_id 397 | - direction: ingress 398 | protocol: tcp 399 | port_range_min: 2049 400 | port_range_max: 2049 401 | 402 | infra_security_group: 403 | type: OS::Neutron::SecurityGroup 404 | properties: 405 | rules: 406 | - protocol: icmp 407 | - direction: ingress 408 | protocol: tcp 409 | port_range_min: 22 410 | port_range_max: 22 411 | - direction: ingress 412 | protocol: tcp 413 | port_range_min: 80 414 | port_range_max: 80 415 | - direction: ingress 416 | protocol: tcp 417 | port_range_min: 443 418 | port_range_max: 443 419 | - direction: ingress 420 | protocol: tcp 421 | port_range_min: 10250 422 | port_range_max: 10250 423 | - direction: ingress 424 | protocol: udp 425 | port_range_min: 4789 426 | port_range_max: 4789 427 | - direction: ingress 428 | protocol: tcp 429 | port_range_min: 1936 430 | port_range_max: 1936 431 | - direction: ingress 432 | protocol: tcp 433 | port_range_min: 9100 434 | port_range_max: 9100 435 | - direction: ingress 436 | protocol: tcp 437 | port_range_min: 5000 438 | port_range_max: 5000 439 | 440 | 441 | outputs: 442 | bastion_ip_address: 443 | value: {get_attr: [bastion, ip_address]} 444 | master_entries: 445 | value: {get_attr: [masters, entries]} 446 | infra_entries: 447 | value: {get_attr: [infras, entries]} 448 | node_entries: 449 | value: {get_attr: [nodes, entries]} 450 | 451 | -------------------------------------------------------------------------------- /heat/infra.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift infra node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | infras_pool_http: 10 | type: string 11 | description: The infras lbaas pool 12 | 13 | infras_pool_https: 14 | type: string 15 | description: The infras lbaas pool 16 | 17 | infras_app_port_http: 18 | type: number 19 | default: 80 20 | description: Port used by the servers 21 | 22 | infras_app_port_https: 23 | type: number 24 | default: 443 25 | description: Port used by the servers 26 | 27 | external_network: 28 | type: string 29 | description: > 30 | The external network that provides floating IP addresses for the nodes 31 | constraints: 32 | - custom_constraint: neutron.network 33 | 34 | service_network: 35 | description: > 36 | The name or ID of the internal network 37 | type: string 38 | constraints: 39 | - custom_constraint: neutron.network 40 | 41 | service_subnet: 42 | description: > 43 | The name or ID of the internal IPv4 space 44 | type: string 45 | constraints: 46 | - custom_constraint: neutron.subnet 47 | 48 | security_group: 49 | description: > 50 | Allow OpenShift access to instances 51 | type: string 52 | constraints: 53 | - custom_constraint: neutron.security_group 54 | 55 | # Host Characteristics 56 | hostname: 57 | description: The prefix for infra hostnames 58 | type: string 59 | 60 | domain_name: 61 | description: The prefix for infra node domain 62 | type: string 63 | 64 | server_group: 65 | description: > 66 | ID of a server group containing all of the master hosts 67 | type: string 68 | 69 | image: 70 | description: > 71 | The Glance image to use as a base for infra nodes 72 | type: string 73 | constraints: 74 | - custom_constraint: glance.image 75 | 76 | flavor: 77 | description: > 78 | The name of the OpenStack instance flavor to use for infra nodes 79 | type: string 80 | constraints: 81 | - custom_constraint: nova.flavor 82 | 83 | # Access to the VMs 84 | ssh_user: 85 | type: string 86 | description: > 87 | The SSH user available on all nodes. 88 | 89 | ssh_key_name: 90 | type: string 91 | description: Name of the SSH keypair registered with Nova 92 | constraints: 93 | - custom_constraint: nova.keypair 94 | 95 | resources: 96 | host: 97 | type: OS::Nova::Server 98 | properties: 99 | name: 100 | str_replace: 101 | template: "HOSTNAME" 102 | params: 103 | HOSTNAME: {get_param: hostname} 104 | user_data_format: SOFTWARE_CONFIG 105 | user_data: {get_resource: init} 106 | image: {get_param: image} 107 | flavor: {get_param: flavor} 108 | admin_user: {get_param: ssh_user} 109 | key_name: {get_param: ssh_key_name} 110 | networks: 111 | - port: {get_resource: port} 112 | scheduler_hints: 113 | group: {get_param: server_group} 114 | block_device_mapping: 115 | - device_name: vda 116 | volume_id: { get_resource: root_volume } 117 | delete_on_termination: true 118 | 119 | port: 120 | type: OS::Neutron::Port 121 | properties: 122 | security_groups: 123 | - {get_param: security_group} 124 | network: {get_param: service_network} 125 | fixed_ips: 126 | - subnet: {get_param: service_subnet} 127 | replacement_policy: AUTO 128 | 129 | pool_member_http: 130 | type: OS::Neutron::LBaaS::PoolMember 131 | properties: 132 | pool: { get_param: infras_pool_http } 133 | address: { get_attr: [ host, first_address ]} 134 | protocol_port: { get_param: infras_app_port_http } 135 | subnet: { get_param: service_subnet } 136 | 137 | pool_member_https: 138 | type: OS::Neutron::LBaaS::PoolMember 139 | properties: 140 | pool: { get_param: infras_pool_https } 141 | address: { get_attr: [ host, first_address ]} 142 | protocol_port: { get_param: infras_app_port_https } 143 | subnet: { get_param: service_subnet } 144 | 145 | root_volume: 146 | type: OS::Cinder::Volume 147 | properties: 148 | size: 30 149 | image: {get_param: image} 150 | availability_zone: nova 151 | 152 | docker_volume: 153 | type: OS::Cinder::Volume 154 | properties: 155 | size: 25 156 | availability_zone: nova 157 | 158 | volume_attachment: 159 | type: OS::Cinder::VolumeAttachment 160 | properties: 161 | volume_id: { get_resource: docker_volume } 162 | instance_uuid: { get_resource: host } 163 | mountpoint: /dev/vdc 164 | 165 | init: 166 | type: OS::Heat::MultipartMime 167 | properties: 168 | parts: 169 | - config: {get_resource: set_hostname} 170 | 171 | set_hostname: 172 | type: OS::Heat::CloudConfig 173 | properties: 174 | cloud_config: 175 | hostname: {get_param: hostname} 176 | fqdn: 177 | str_replace: 178 | template: "HOSTNAME" 179 | params: 180 | HOSTNAME: {get_param: hostname} 181 | DOMAIN: {get_param: domain_name} 182 | 183 | outputs: 184 | entry: 185 | value: 186 | name: {get_param: hostname} 187 | address: {get_attr: [port, fixed_ips, 0, ip_address]} 188 | -------------------------------------------------------------------------------- /heat/infra_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift infra node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | infras_pool_http: 10 | type: string 11 | description: The infras lbaas pool 12 | 13 | infras_pool_https: 14 | type: string 15 | description: The infras lbaas pool 16 | 17 | infras_app_port_http: 18 | type: number 19 | default: 80 20 | description: Port used by the servers 21 | 22 | infras_app_port_https: 23 | type: number 24 | default: 443 25 | description: Port used by the servers 26 | 27 | external_network: 28 | type: string 29 | description: > 30 | The external network that provides floating IP addresses for the nodes 31 | constraints: 32 | - custom_constraint: neutron.network 33 | 34 | service_network: 35 | description: > 36 | The name or ID of the internal network 37 | type: string 38 | constraints: 39 | - custom_constraint: neutron.network 40 | 41 | service_subnet: 42 | description: > 43 | The name or ID of the internal IPv4 space 44 | type: string 45 | constraints: 46 | - custom_constraint: neutron.subnet 47 | 48 | security_group: 49 | description: > 50 | Allow OpenShift access to instances 51 | type: string 52 | constraints: 53 | - custom_constraint: neutron.security_group 54 | 55 | # Host Characteristics 56 | hostname: 57 | description: The prefix for infra hostnames 58 | type: string 59 | 60 | domain_name: 61 | description: The prefix for infra node domain 62 | type: string 63 | 64 | server_group: 65 | description: > 66 | ID of a server group containing all of the master hosts 67 | type: string 68 | 69 | image: 70 | description: > 71 | The Glance image to use as a base for infra nodes 72 | type: string 73 | constraints: 74 | - custom_constraint: glance.image 75 | 76 | flavor: 77 | description: > 78 | The name of the OpenStack instance flavor to use for infra nodes 79 | type: string 80 | constraints: 81 | - custom_constraint: nova.flavor 82 | 83 | # Access to the VMs 84 | ssh_user: 85 | type: string 86 | description: > 87 | The SSH user available on all nodes. 88 | 89 | ssh_key_name: 90 | type: string 91 | description: Name of the SSH keypair registered with Nova 92 | constraints: 93 | - custom_constraint: nova.keypair 94 | 95 | resources: 96 | host: 97 | type: OS::Nova::Server 98 | properties: 99 | name: 100 | str_replace: 101 | template: "HOSTNAME" 102 | params: 103 | HOSTNAME: {get_param: hostname} 104 | user_data_format: SOFTWARE_CONFIG 105 | user_data: {get_resource: init} 106 | image: {get_param: image} 107 | flavor: {get_param: flavor} 108 | admin_user: {get_param: ssh_user} 109 | key_name: {get_param: ssh_key_name} 110 | networks: 111 | - port: {get_resource: port} 112 | scheduler_hints: 113 | group: {get_param: server_group} 114 | block_device_mapping: 115 | - device_name: vda 116 | volume_id: { get_resource: root_volume } 117 | delete_on_termination: true 118 | 119 | port: 120 | type: OS::Neutron::Port 121 | properties: 122 | security_groups: 123 | - {get_param: security_group} 124 | network: {get_param: service_network} 125 | fixed_ips: 126 | - subnet: {get_param: service_subnet} 127 | replacement_policy: AUTO 128 | 129 | pool_member_http: 130 | type: OS::Octavia::PoolMember 131 | properties: 132 | pool: { get_param: infras_pool_http } 133 | address: { get_attr: [ host, first_address ]} 134 | protocol_port: { get_param: infras_app_port_http } 135 | subnet: { get_param: service_subnet } 136 | 137 | pool_member_https: 138 | type: OS::Octavia::PoolMember 139 | properties: 140 | pool: { get_param: infras_pool_https } 141 | address: { get_attr: [ host, first_address ]} 142 | protocol_port: { get_param: infras_app_port_https } 143 | subnet: { get_param: service_subnet } 144 | 145 | root_volume: 146 | type: OS::Cinder::Volume 147 | properties: 148 | size: 30 149 | image: {get_param: image} 150 | availability_zone: nova 151 | 152 | docker_volume: 153 | type: OS::Cinder::Volume 154 | properties: 155 | size: 25 156 | availability_zone: nova 157 | 158 | volume_attachment: 159 | type: OS::Cinder::VolumeAttachment 160 | properties: 161 | volume_id: { get_resource: docker_volume } 162 | instance_uuid: { get_resource: host } 163 | mountpoint: /dev/vdc 164 | 165 | init: 166 | type: OS::Heat::MultipartMime 167 | properties: 168 | parts: 169 | - config: {get_resource: set_hostname} 170 | 171 | set_hostname: 172 | type: OS::Heat::CloudConfig 173 | properties: 174 | cloud_config: 175 | hostname: {get_param: hostname} 176 | fqdn: 177 | str_replace: 178 | template: "HOSTNAME.DOMAIN" 179 | params: 180 | HOSTNAME: {get_param: hostname} 181 | DOMAIN: {get_param: domain_name} 182 | 183 | outputs: 184 | entry: 185 | value: 186 | name: {get_param: hostname} 187 | address: {get_attr: [port, fixed_ips, 0, ip_address]} 188 | -------------------------------------------------------------------------------- /heat/infras.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift infra node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | infras_pool_http: 10 | type: string 11 | description: The infras lbaas pool 12 | 13 | infras_pool_https: 14 | type: string 15 | description: The infras lbaas pool 16 | 17 | infra_count: 18 | type: number 19 | description: > 20 | Number of slave servers to create. 21 | default: 1 22 | 23 | hostname_prefix: 24 | description: > 25 | The default prefix for slave server hostnames 26 | type: string 27 | default: "infra" 28 | 29 | domain_name: 30 | description: > 31 | All VMs will be placed in this domain 32 | type: string 33 | 34 | # Connectivity 35 | external_network: 36 | type: string 37 | description: > 38 | The external network that provides floating IP addresses for the infra nodes 39 | constraints: 40 | - custom_constraint: neutron.network 41 | 42 | service_network: 43 | description: > 44 | The name or ID of the internal network 45 | type: string 46 | constraints: 47 | - custom_constraint: neutron.network 48 | 49 | service_subnet: 50 | description: > 51 | The name or ID of the internal IPv4 space 52 | type: string 53 | constraints: 54 | - custom_constraint: neutron.subnet 55 | 56 | security_group: 57 | description: > 58 | Allow OpenShift service access to instances 59 | type: string 60 | constraints: 61 | - custom_constraint: neutron.security_group 62 | 63 | infra_server_group_policies: 64 | type: comma_delimited_list 65 | description: > 66 | List of policies applied on infra nodes ServerGroup. By default 67 | 'anti-affinity' policy is used to make sure that each infra node 68 | is deployed on a different host. If you use a small/all-in-one openstack 69 | environment, you may need to disable this e.g. by passing 70 | '-P infra_server_group_policies=affinity'. 71 | default: ['anti-affinity'] 72 | 73 | image: 74 | description: > 75 | The Glance image to use as a base for OpenShift servers 76 | type: string 77 | constraints: 78 | - custom_constraint: glance.image 79 | 80 | infra_flavor: 81 | description: > 82 | The name of the OpenStack instance flavor to use for OpenShift servers 83 | type: string 84 | default: m1.medium 85 | constraints: 86 | - custom_constraint: nova.flavor 87 | 88 | # Access to the VMs 89 | ssh_user: 90 | type: string 91 | description: > 92 | The SSH user available on all nodes. 93 | default: cloud-user 94 | 95 | ssh_key_name: 96 | type: string 97 | description: Name of the SSH keypair registered with Nova 98 | constraints: 99 | - custom_constraint: nova.keypair 100 | 101 | resources: 102 | 103 | infras: 104 | type: OS::Heat::ResourceGroup 105 | properties: 106 | count: {get_param: infra_count} 107 | resource_def: 108 | type: infra.yaml 109 | properties: 110 | infras_pool_http: {get_param: infras_pool_http} 111 | infras_pool_https: {get_param: infras_pool_https} 112 | image: {get_param: image} 113 | flavor: {get_param: infra_flavor} 114 | external_network: {get_param: external_network} 115 | service_network: {get_param: service_network} 116 | service_subnet: {get_param: service_subnet} 117 | security_group: {get_param: security_group} 118 | hostname: 119 | str_replace: 120 | template: "%prefix%%index%" 121 | params: 122 | '%prefix%': {get_param: hostname_prefix} 123 | domain_name: {get_param: domain_name} 124 | ssh_user: {get_param: ssh_user} 125 | ssh_key_name: {get_param: ssh_key_name} 126 | server_group: {get_resource: infra_server_group} 127 | 128 | infra_server_group: 129 | type: OS::Nova::ServerGroup 130 | properties: 131 | name: infra_server_group 132 | policies: {get_param: infra_server_group_policies} 133 | 134 | 135 | outputs: 136 | entries: 137 | value: {get_attr: [infras, entry]} 138 | -------------------------------------------------------------------------------- /heat/infras_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift infra node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | infras_pool_http: 10 | type: string 11 | description: The infras lbaas pool 12 | 13 | infras_pool_https: 14 | type: string 15 | description: The infras lbaas pool 16 | 17 | infra_count: 18 | type: number 19 | description: > 20 | Number of slave servers to create. 21 | default: 1 22 | 23 | hostname_prefix: 24 | description: > 25 | The default prefix for slave server hostnames 26 | type: string 27 | default: "infra" 28 | 29 | domain_name: 30 | description: > 31 | All VMs will be placed in this domain 32 | type: string 33 | 34 | # Connectivity 35 | external_network: 36 | type: string 37 | description: > 38 | The external network that provides floating IP addresses for the infra nodes 39 | constraints: 40 | - custom_constraint: neutron.network 41 | 42 | service_network: 43 | description: > 44 | The name or ID of the internal network 45 | type: string 46 | constraints: 47 | - custom_constraint: neutron.network 48 | 49 | service_subnet: 50 | description: > 51 | The name or ID of the internal IPv4 space 52 | type: string 53 | constraints: 54 | - custom_constraint: neutron.subnet 55 | 56 | security_group: 57 | description: > 58 | Allow OpenShift service access to instances 59 | type: string 60 | constraints: 61 | - custom_constraint: neutron.security_group 62 | 63 | infra_server_group_policies: 64 | type: comma_delimited_list 65 | description: > 66 | List of policies applied on infra nodes ServerGroup. By default 67 | 'anti-affinity' policy is used to make sure that each infra node 68 | is deployed on a different host. If you use a small/all-in-one openstack 69 | environment, you may need to disable this e.g. by passing 70 | '-P infra_server_group_policies=affinity'. 71 | default: ['anti-affinity'] 72 | 73 | image: 74 | description: > 75 | The Glance image to use as a base for OpenShift servers 76 | type: string 77 | constraints: 78 | - custom_constraint: glance.image 79 | 80 | infra_flavor: 81 | description: > 82 | The name of the OpenStack instance flavor to use for OpenShift servers 83 | type: string 84 | default: m1.medium 85 | constraints: 86 | - custom_constraint: nova.flavor 87 | 88 | # Access to the VMs 89 | ssh_user: 90 | type: string 91 | description: > 92 | The SSH user available on all nodes. 93 | default: cloud-user 94 | 95 | ssh_key_name: 96 | type: string 97 | description: Name of the SSH keypair registered with Nova 98 | constraints: 99 | - custom_constraint: nova.keypair 100 | 101 | resources: 102 | 103 | infras: 104 | type: OS::Heat::ResourceGroup 105 | properties: 106 | count: {get_param: infra_count} 107 | resource_def: 108 | type: infra_octavia.yaml 109 | properties: 110 | infras_pool_http: {get_param: infras_pool_http} 111 | infras_pool_https: {get_param: infras_pool_https} 112 | image: {get_param: image} 113 | flavor: {get_param: infra_flavor} 114 | external_network: {get_param: external_network} 115 | service_network: {get_param: service_network} 116 | service_subnet: {get_param: service_subnet} 117 | security_group: {get_param: security_group} 118 | hostname: 119 | str_replace: 120 | template: "%prefix%%index%" 121 | params: 122 | '%prefix%': {get_param: hostname_prefix} 123 | domain_name: {get_param: domain_name} 124 | ssh_user: {get_param: ssh_user} 125 | ssh_key_name: {get_param: ssh_key_name} 126 | server_group: {get_resource: infra_server_group} 127 | 128 | infra_server_group: 129 | type: OS::Nova::ServerGroup 130 | properties: 131 | name: infra_server_group 132 | policies: {get_param: infra_server_group_policies} 133 | 134 | 135 | outputs: 136 | entries: 137 | value: {get_attr: [infras, entry]} 138 | -------------------------------------------------------------------------------- /heat/lbaas_infras.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | A Load Balancer for OpenShift Infrastructure Nodes 6 | 7 | parameters: 8 | lb_port_http: 9 | type: number 10 | default: 80 11 | description: Port used by the load balancer 12 | 13 | lb_port_https: 14 | type: number 15 | default: 443 16 | description: Port used by the load balancer 17 | 18 | external_network: 19 | type: string 20 | description: Network used by the load balancer 21 | constraints: 22 | - custom_constraint: neutron.network 23 | 24 | service_subnet: 25 | description: > 26 | The name or ID of the internal IPv4 space 27 | type: string 28 | constraints: 29 | - custom_constraint: neutron.subnet 30 | 31 | resources: 32 | # HTTP 33 | http_monitor: 34 | type: OS::Neutron::LBaaS::HealthMonitor 35 | properties: 36 | delay: 3 37 | type: PING 38 | timeout: 3 39 | max_retries: 3 40 | pool: { get_resource: http_pool } 41 | 42 | http_pool: 43 | type: OS::Neutron::LBaaS::Pool 44 | properties: 45 | lb_algorithm: LEAST_CONNECTIONS 46 | protocol: HTTP 47 | listener: { get_resource: http_listener } 48 | 49 | http_listener: 50 | type: OS::Neutron::LBaaS::Listener 51 | properties: 52 | loadbalancer: { get_resource: loadbalancer } 53 | protocol: HTTP 54 | protocol_port: { get_param: lb_port_http } 55 | 56 | # HTTPS 57 | https_monitor: 58 | type: OS::Neutron::LBaaS::HealthMonitor 59 | properties: 60 | delay: 3 61 | type: PING 62 | timeout: 3 63 | max_retries: 3 64 | pool: { get_resource: https_pool } 65 | 66 | https_pool: 67 | type: OS::Neutron::LBaaS::Pool 68 | properties: 69 | lb_algorithm: LEAST_CONNECTIONS 70 | protocol: HTTPS 71 | listener: { get_resource: https_listener } 72 | 73 | https_listener: 74 | type: OS::Neutron::LBaaS::Listener 75 | properties: 76 | loadbalancer: { get_resource: loadbalancer } 77 | protocol: HTTPS 78 | protocol_port: { get_param: lb_port_https } 79 | 80 | # Load Balancer 81 | loadbalancer: 82 | type: OS::Neutron::LBaaS::LoadBalancer 83 | properties: 84 | vip_subnet: { get_param: service_subnet } 85 | 86 | floating_ip: 87 | type: OS::Neutron::FloatingIP 88 | properties: 89 | floating_network: { get_param: external_network } 90 | port_id: { get_attr: [loadbalancer, vip_port_id ]} 91 | 92 | outputs: 93 | infras_pool_http: 94 | description: The load balancer pool 95 | value: {get_resource: http_pool} 96 | infras_pool_https: 97 | description: The load balancer pool 98 | value: {get_resource: https_pool} 99 | lb_infras_floatingip: 100 | value: {get_attr: [floating_ip, floating_ip_address]} 101 | -------------------------------------------------------------------------------- /heat/lbaas_infras_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | A Load Balancer for OpenShift Infrastructure Nodes 6 | 7 | parameters: 8 | lb_port_http: 9 | type: number 10 | default: 80 11 | description: Port used by the load balancer 12 | 13 | lb_port_https: 14 | type: number 15 | default: 443 16 | description: Port used by the load balancer 17 | 18 | external_network: 19 | type: string 20 | description: Network used by the load balancer 21 | constraints: 22 | - custom_constraint: neutron.network 23 | 24 | service_subnet: 25 | description: > 26 | The name or ID of the internal IPv4 space 27 | type: string 28 | constraints: 29 | - custom_constraint: neutron.subnet 30 | 31 | resources: 32 | # HTTP 33 | http_monitor: 34 | type: OS::Octavia::HealthMonitor 35 | properties: 36 | delay: 3 37 | type: PING 38 | timeout: 3 39 | max_retries: 3 40 | pool: { get_resource: http_pool } 41 | 42 | http_pool: 43 | type: OS::Octavia::Pool 44 | properties: 45 | lb_algorithm: LEAST_CONNECTIONS 46 | protocol: HTTP 47 | listener: { get_resource: http_listener } 48 | 49 | http_listener: 50 | type: OS::Octavia::Listener 51 | properties: 52 | loadbalancer: { get_resource: loadbalancer } 53 | protocol: HTTP 54 | protocol_port: { get_param: lb_port_http } 55 | 56 | # HTTPS 57 | https_monitor: 58 | type: OS::Octavia::HealthMonitor 59 | properties: 60 | delay: 3 61 | type: PING 62 | timeout: 3 63 | max_retries: 3 64 | pool: { get_resource: https_pool } 65 | 66 | https_pool: 67 | type: OS::Octavia::Pool 68 | properties: 69 | lb_algorithm: LEAST_CONNECTIONS 70 | protocol: HTTPS 71 | listener: { get_resource: https_listener } 72 | 73 | https_listener: 74 | type: OS::Octavia::Listener 75 | properties: 76 | loadbalancer: { get_resource: loadbalancer } 77 | protocol: HTTPS 78 | protocol_port: { get_param: lb_port_https } 79 | 80 | # Load Balancer 81 | loadbalancer: 82 | type: OS::Octavia::LoadBalancer 83 | properties: 84 | vip_subnet: { get_param: service_subnet } 85 | 86 | floating_ip: 87 | type: OS::Neutron::FloatingIP 88 | properties: 89 | floating_network: { get_param: external_network } 90 | port_id: { get_attr: [loadbalancer, vip_port_id ]} 91 | 92 | outputs: 93 | infras_pool_http: 94 | description: The load balancer pool 95 | value: {get_resource: http_pool} 96 | infras_pool_https: 97 | description: The load balancer pool 98 | value: {get_resource: https_pool} 99 | lb_infras_floatingip: 100 | value: {get_attr: [floating_ip, floating_ip_address]} 101 | -------------------------------------------------------------------------------- /heat/lbaas_masters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | A Load Balancer for OpenShift Masters 6 | 7 | parameters: 8 | lb_port: 9 | type: number 10 | default: 8443 11 | description: Port used by the load balancer 12 | 13 | external_network: 14 | type: string 15 | description: Network used by the load balancer 16 | constraints: 17 | - custom_constraint: neutron.network 18 | 19 | service_subnet: 20 | description: > 21 | The name or ID of the internal IPv4 space 22 | type: string 23 | constraints: 24 | - custom_constraint: neutron.subnet 25 | 26 | resources: 27 | monitor: 28 | type: OS::Neutron::LBaaS::HealthMonitor 29 | properties: 30 | delay: 3 31 | type: HTTPS 32 | timeout: 3 33 | max_retries: 3 34 | pool: { get_resource: pool } 35 | 36 | pool: 37 | type: OS::Neutron::LBaaS::Pool 38 | properties: 39 | lb_algorithm: ROUND_ROBIN 40 | protocol: HTTPS 41 | listener: { get_resource: listener } 42 | 43 | listener: 44 | type: OS::Neutron::LBaaS::Listener 45 | properties: 46 | loadbalancer: { get_resource: loadbalancer } 47 | protocol: HTTPS 48 | protocol_port: { get_param: lb_port } 49 | 50 | loadbalancer: 51 | type: OS::Neutron::LBaaS::LoadBalancer 52 | properties: 53 | vip_subnet: { get_param: service_subnet } 54 | 55 | floating_ip: 56 | type: OS::Neutron::FloatingIP 57 | properties: 58 | floating_network: { get_param: external_network } 59 | port_id: { get_attr: [loadbalancer, vip_port_id ]} 60 | 61 | outputs: 62 | masters_pool: 63 | description: The load balancer pool 64 | value: {get_resource: pool} 65 | lb_masters_floatingip: 66 | value: {get_attr: [floating_ip, floating_ip_address]} 67 | lburl: 68 | value: 69 | str_replace: 70 | template: https://NAME.IP_ADDRESS.DOMAIN:PORT 71 | params: 72 | IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] } 73 | PORT: { get_param: lb_port } 74 | NAME: openshift 75 | DOMAIN: xip.io 76 | description: > 77 | This URL is the "external" URL to access openshift 78 | -------------------------------------------------------------------------------- /heat/lbaas_masters_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | A Load Balancer for OpenShift Masters 6 | 7 | parameters: 8 | lb_port: 9 | type: number 10 | default: 8443 11 | description: Port used by the load balancer 12 | 13 | external_network: 14 | type: string 15 | description: Network used by the load balancer 16 | constraints: 17 | - custom_constraint: neutron.network 18 | 19 | service_subnet: 20 | description: > 21 | The name or ID of the internal IPv4 space 22 | type: string 23 | constraints: 24 | - custom_constraint: neutron.subnet 25 | 26 | resources: 27 | monitor: 28 | type: OS::Octavia::HealthMonitor 29 | properties: 30 | delay: 3 31 | type: HTTPS 32 | timeout: 3 33 | max_retries: 3 34 | pool: { get_resource: pool } 35 | 36 | pool: 37 | type: OS::Octavia::Pool 38 | properties: 39 | lb_algorithm: ROUND_ROBIN 40 | protocol: HTTPS 41 | listener: { get_resource: listener } 42 | 43 | listener: 44 | type: OS::Octavia::Listener 45 | properties: 46 | loadbalancer: { get_resource: loadbalancer } 47 | protocol: HTTPS 48 | protocol_port: { get_param: lb_port } 49 | 50 | loadbalancer: 51 | type: OS::Octavia::LoadBalancer 52 | properties: 53 | vip_subnet: { get_param: service_subnet } 54 | 55 | floating_ip: 56 | type: OS::Neutron::FloatingIP 57 | properties: 58 | floating_network: { get_param: external_network } 59 | port_id: { get_attr: [loadbalancer, vip_port_id ]} 60 | 61 | outputs: 62 | masters_pool: 63 | description: The load balancer pool 64 | value: {get_resource: pool} 65 | lb_masters_floatingip: 66 | value: {get_attr: [floating_ip, floating_ip_address]} 67 | lburl: 68 | value: 69 | str_replace: 70 | template: https://NAME.IP_ADDRESS.DOMAIN:PORT 71 | params: 72 | IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] } 73 | PORT: { get_param: lb_port } 74 | NAME: openshift 75 | DOMAIN: xip.io 76 | description: > 77 | This URL is the "external" URL to access openshift 78 | -------------------------------------------------------------------------------- /heat/lbaas_single.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | A Load Balancer for OpenShift Infrastructure Nodes 6 | 7 | parameters: 8 | lb_port_master: 9 | type: number 10 | default: 8443 11 | description: Port used for UI/API access to OpenShift 12 | 13 | lb_port_infra_http: 14 | type: number 15 | default: 80 16 | description: Port used for app access http over infra nodes 17 | 18 | lb_port_infra_https: 19 | type: number 20 | default: 443 21 | description: Port used for app access https over infra nodes 22 | 23 | external_network: 24 | type: string 25 | description: Network used by the load balancer 26 | constraints: 27 | - custom_constraint: neutron.network 28 | 29 | service_subnet: 30 | description: > 31 | The name or ID of the internal IPv4 space 32 | type: string 33 | constraints: 34 | - custom_constraint: neutron.subnet 35 | 36 | resources: 37 | # MASTERS 38 | monitor_master: 39 | type: OS::Neutron::LBaaS::HealthMonitor 40 | properties: 41 | delay: 3 42 | type: HTTPS 43 | timeout: 3 44 | max_retries: 3 45 | pool: { get_resource: pool_master } 46 | 47 | pool_master: 48 | type: OS::Neutron::LBaaS::Pool 49 | properties: 50 | lb_algorithm: ROUND_ROBIN 51 | protocol: HTTPS 52 | listener: { get_resource: listener_master } 53 | 54 | listener_master: 55 | type: OS::Neutron::LBaaS::Listener 56 | properties: 57 | loadbalancer: { get_resource: loadbalancer } 58 | protocol: HTTPS 59 | protocol_port: { get_param: lb_port_master } 60 | 61 | # HTTP INFRA 62 | http_monitor_infra: 63 | type: OS::Neutron::LBaaS::HealthMonitor 64 | properties: 65 | delay: 3 66 | type: PING 67 | timeout: 3 68 | max_retries: 3 69 | pool: { get_resource: http_pool_infra } 70 | 71 | http_pool_infra: 72 | type: OS::Neutron::LBaaS::Pool 73 | properties: 74 | lb_algorithm: LEAST_CONNECTIONS 75 | protocol: HTTP 76 | listener: { get_resource: http_listener_infra } 77 | 78 | http_listener_infra: 79 | type: OS::Neutron::LBaaS::Listener 80 | properties: 81 | loadbalancer: { get_resource: loadbalancer } 82 | protocol: HTTP 83 | protocol_port: { get_param: lb_port_infra_http } 84 | 85 | # HTTPS INFRA 86 | https_monitor_infra: 87 | type: OS::Neutron::LBaaS::HealthMonitor 88 | properties: 89 | delay: 3 90 | type: PING 91 | timeout: 3 92 | max_retries: 3 93 | pool: { get_resource: https_pool_infra } 94 | 95 | https_pool_infra: 96 | type: OS::Neutron::LBaaS::Pool 97 | properties: 98 | lb_algorithm: LEAST_CONNECTIONS 99 | protocol: HTTPS 100 | listener: { get_resource: https_listener_infra } 101 | 102 | https_listener_infra: 103 | type: OS::Neutron::LBaaS::Listener 104 | properties: 105 | loadbalancer: { get_resource: loadbalancer } 106 | protocol: HTTPS 107 | protocol_port: { get_param: lb_port_infra_https } 108 | 109 | # Load Balancer 110 | loadbalancer: 111 | type: OS::Neutron::LBaaS::LoadBalancer 112 | properties: 113 | vip_subnet: { get_param: service_subnet } 114 | 115 | floating_ip: 116 | type: OS::Neutron::FloatingIP 117 | properties: 118 | floating_network: { get_param: external_network } 119 | port_id: { get_attr: [loadbalancer, vip_port_id ]} 120 | 121 | outputs: 122 | masters_pool: 123 | description: The load balancer pool 124 | value: {get_resource: pool_master} 125 | lburl: 126 | value: 127 | str_replace: 128 | template: https://NAME.IP_ADDRESS.DOMAIN:PORT 129 | params: 130 | IP_ADDRESS: { get_attr: [ floating_ip, floating_ip_address ] } 131 | PORT: { get_param: lb_port_master } 132 | NAME: openshift 133 | DOMAIN: xip.io 134 | description: > 135 | This URL is the "external" URL to access openshift 136 | infras_pool_http: 137 | description: The load balancer pool 138 | value: {get_resource: http_pool_infra} 139 | infras_pool_https: 140 | description: The load balancer pool 141 | value: {get_resource: https_pool_infra} 142 | lb_floatingip: 143 | value: {get_attr: [floating_ip, floating_ip_address]} 144 | -------------------------------------------------------------------------------- /heat/master.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift master node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | masters_pool: 10 | type: string 11 | description: The masters lbaas pool 12 | 13 | masters_app_port: 14 | type: number 15 | default: 8443 16 | description: Port used by the servers 17 | 18 | external_network: 19 | type: string 20 | description: > 21 | The external network that provides floating IP addresses for the nodes 22 | constraints: 23 | - custom_constraint: neutron.network 24 | 25 | service_network: 26 | description: > 27 | The name or ID of the internal network 28 | type: string 29 | constraints: 30 | - custom_constraint: neutron.network 31 | 32 | service_subnet: 33 | description: > 34 | The name or ID of the internal IPv4 space 35 | type: string 36 | constraints: 37 | - custom_constraint: neutron.subnet 38 | 39 | security_group: 40 | description: > 41 | Allow OpenShift access to instances 42 | type: string 43 | constraints: 44 | - custom_constraint: neutron.security_group 45 | 46 | # Host Characteristics 47 | hostname: 48 | description: The prefix for master hostnames 49 | type: string 50 | 51 | domain_name: 52 | description: The prefix for master node domain 53 | type: string 54 | 55 | server_group: 56 | description: > 57 | ID of a server group containing all of the master hosts 58 | type: string 59 | 60 | image: 61 | description: > 62 | The Glance image to use as a base for master nodes 63 | type: string 64 | constraints: 65 | - custom_constraint: glance.image 66 | 67 | flavor: 68 | description: > 69 | The name of the OpenStack instance flavor to use for master nodes 70 | type: string 71 | constraints: 72 | - custom_constraint: nova.flavor 73 | 74 | # Access to the VMs 75 | ssh_user: 76 | type: string 77 | description: > 78 | The SSH user available on all nodes. 79 | 80 | ssh_key_name: 81 | type: string 82 | description: Name of the SSH keypair registered with Nova 83 | constraints: 84 | - custom_constraint: nova.keypair 85 | 86 | resources: 87 | host: 88 | type: OS::Nova::Server 89 | properties: 90 | name: 91 | str_replace: 92 | template: "HOSTNAME" 93 | params: 94 | HOSTNAME: {get_param: hostname} 95 | user_data_format: SOFTWARE_CONFIG 96 | user_data: {get_resource: init} 97 | image: {get_param: image} 98 | flavor: {get_param: flavor} 99 | admin_user: {get_param: ssh_user} 100 | key_name: {get_param: ssh_key_name} 101 | networks: 102 | - port: {get_resource: port} 103 | scheduler_hints: 104 | group: {get_param: server_group} 105 | block_device_mapping: 106 | - device_name: vda 107 | volume_id: { get_resource: root_volume } 108 | delete_on_termination: true 109 | 110 | port: 111 | type: OS::Neutron::Port 112 | properties: 113 | security_groups: 114 | - {get_param: security_group} 115 | network: {get_param: service_network} 116 | fixed_ips: 117 | - subnet: {get_param: service_subnet} 118 | replacement_policy: AUTO 119 | 120 | pool_member: 121 | type: OS::Neutron::LBaaS::PoolMember 122 | properties: 123 | pool: { get_param: masters_pool } 124 | address: { get_attr: [ host, first_address ]} 125 | protocol_port: { get_param: masters_app_port } 126 | subnet: { get_param: service_subnet } 127 | 128 | root_volume: 129 | type: OS::Cinder::Volume 130 | properties: 131 | size: 30 132 | image: {get_param: image} 133 | availability_zone: nova 134 | 135 | docker_volume: 136 | type: OS::Cinder::Volume 137 | properties: 138 | size: 25 139 | availability_zone: nova 140 | 141 | volume_attachment: 142 | type: OS::Cinder::VolumeAttachment 143 | properties: 144 | volume_id: { get_resource: docker_volume } 145 | instance_uuid: { get_resource: host } 146 | mountpoint: /dev/vdc 147 | 148 | init: 149 | type: OS::Heat::MultipartMime 150 | properties: 151 | parts: 152 | - config: {get_resource: set_hostname} 153 | 154 | set_hostname: 155 | type: OS::Heat::CloudConfig 156 | properties: 157 | cloud_config: 158 | hostname: {get_param: hostname} 159 | fqdn: 160 | str_replace: 161 | template: "HOSTNAME" 162 | params: 163 | HOSTNAME: {get_param: hostname} 164 | DOMAIN: {get_param: domain_name} 165 | 166 | outputs: 167 | entry: 168 | value: 169 | name: {get_param: hostname} 170 | address: {get_attr: [port, fixed_ips, 0, ip_address]} 171 | -------------------------------------------------------------------------------- /heat/master_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift master node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | masters_pool: 10 | type: string 11 | description: The masters lbaas pool 12 | 13 | masters_app_port: 14 | type: number 15 | default: 8443 16 | description: Port used by the servers 17 | 18 | external_network: 19 | type: string 20 | description: > 21 | The external network that provides floating IP addresses for the nodes 22 | constraints: 23 | - custom_constraint: neutron.network 24 | 25 | service_network: 26 | description: > 27 | The name or ID of the internal network 28 | type: string 29 | constraints: 30 | - custom_constraint: neutron.network 31 | 32 | service_subnet: 33 | description: > 34 | The name or ID of the internal IPv4 space 35 | type: string 36 | constraints: 37 | - custom_constraint: neutron.subnet 38 | 39 | security_group: 40 | description: > 41 | Allow OpenShift access to instances 42 | type: string 43 | constraints: 44 | - custom_constraint: neutron.security_group 45 | 46 | # Host Characteristics 47 | hostname: 48 | description: The prefix for master hostnames 49 | type: string 50 | 51 | domain_name: 52 | description: The prefix for master node domain 53 | type: string 54 | 55 | server_group: 56 | description: > 57 | ID of a server group containing all of the master hosts 58 | type: string 59 | 60 | image: 61 | description: > 62 | The Glance image to use as a base for master nodes 63 | type: string 64 | constraints: 65 | - custom_constraint: glance.image 66 | 67 | flavor: 68 | description: > 69 | The name of the OpenStack instance flavor to use for master nodes 70 | type: string 71 | constraints: 72 | - custom_constraint: nova.flavor 73 | 74 | # Access to the VMs 75 | ssh_user: 76 | type: string 77 | description: > 78 | The SSH user available on all nodes. 79 | 80 | ssh_key_name: 81 | type: string 82 | description: Name of the SSH keypair registered with Nova 83 | constraints: 84 | - custom_constraint: nova.keypair 85 | 86 | resources: 87 | host: 88 | type: OS::Nova::Server 89 | properties: 90 | name: 91 | str_replace: 92 | template: "HOSTNAME" 93 | params: 94 | HOSTNAME: {get_param: hostname} 95 | user_data_format: SOFTWARE_CONFIG 96 | user_data: {get_resource: init} 97 | image: {get_param: image} 98 | flavor: {get_param: flavor} 99 | admin_user: {get_param: ssh_user} 100 | key_name: {get_param: ssh_key_name} 101 | networks: 102 | - port: {get_resource: port} 103 | scheduler_hints: 104 | group: {get_param: server_group} 105 | block_device_mapping: 106 | - device_name: vda 107 | volume_id: { get_resource: root_volume } 108 | delete_on_termination: true 109 | 110 | port: 111 | type: OS::Neutron::Port 112 | properties: 113 | security_groups: 114 | - {get_param: security_group} 115 | network: {get_param: service_network} 116 | fixed_ips: 117 | - subnet: {get_param: service_subnet} 118 | replacement_policy: AUTO 119 | 120 | pool_member: 121 | type: OS::Octavia::PoolMember 122 | properties: 123 | pool: { get_param: masters_pool } 124 | address: { get_attr: [ host, first_address ]} 125 | protocol_port: { get_param: masters_app_port } 126 | subnet: { get_param: service_subnet } 127 | 128 | root_volume: 129 | type: OS::Cinder::Volume 130 | properties: 131 | size: 30 132 | image: {get_param: image} 133 | availability_zone: nova 134 | 135 | docker_volume: 136 | type: OS::Cinder::Volume 137 | properties: 138 | size: 25 139 | availability_zone: nova 140 | 141 | volume_attachment: 142 | type: OS::Cinder::VolumeAttachment 143 | properties: 144 | volume_id: { get_resource: docker_volume } 145 | instance_uuid: { get_resource: host } 146 | mountpoint: /dev/vdc 147 | 148 | init: 149 | type: OS::Heat::MultipartMime 150 | properties: 151 | parts: 152 | - config: {get_resource: set_hostname} 153 | 154 | set_hostname: 155 | type: OS::Heat::CloudConfig 156 | properties: 157 | cloud_config: 158 | hostname: {get_param: hostname} 159 | fqdn: 160 | str_replace: 161 | template: "HOSTNAME.DOMAIN" 162 | params: 163 | HOSTNAME: {get_param: hostname} 164 | DOMAIN: {get_param: domain_name} 165 | 166 | outputs: 167 | entry: 168 | value: 169 | name: {get_param: hostname} 170 | address: {get_attr: [port, fixed_ips, 0, ip_address]} 171 | -------------------------------------------------------------------------------- /heat/masters.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift master 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | masters_pool: 10 | type: string 11 | description: The masters lbaas pool 12 | 13 | masters_app_port: 14 | type: number 15 | default: 8443 16 | description: Port used by the servers 17 | 18 | master_count: 19 | type: number 20 | description: > 21 | Number of slave servers to create. 22 | default: 1 23 | 24 | hostname_prefix: 25 | description: > 26 | The default prefix for slave server hostnames 27 | type: string 28 | default: "master" 29 | 30 | domain_name: 31 | description: > 32 | All VMs will be placed in this domain 33 | type: string 34 | 35 | # Connectivity 36 | external_network: 37 | type: string 38 | description: > 39 | The external network that provides floating IP addresses for the masters 40 | constraints: 41 | - custom_constraint: neutron.network 42 | 43 | service_network: 44 | description: > 45 | The name or ID of the internal network 46 | type: string 47 | constraints: 48 | - custom_constraint: neutron.network 49 | 50 | service_subnet: 51 | description: > 52 | The name or ID of the internal IPv4 space 53 | type: string 54 | constraints: 55 | - custom_constraint: neutron.subnet 56 | 57 | security_group: 58 | description: > 59 | Allow OpenShift service access to instances 60 | type: string 61 | constraints: 62 | - custom_constraint: neutron.security_group 63 | 64 | master_server_group_policies: 65 | type: comma_delimited_list 66 | description: > 67 | List of policies applied on master nodes ServerGroup. By default 68 | 'anti-affinity' policy is used to make sure that each master node 69 | is deployed on a different host. If you use a small/all-in-one openstack 70 | environment, you may need to disable this e.g. by passing 71 | '-P master_server_group_policies=affinity'. 72 | default: ['anti-affinity'] 73 | 74 | image: 75 | description: > 76 | The Glance image to use as a base for OpenShift servers 77 | type: string 78 | constraints: 79 | - custom_constraint: glance.image 80 | 81 | master_flavor: 82 | description: > 83 | The name of the OpenStack instance flavor to use for OpenShift servers 84 | type: string 85 | default: m1.medium 86 | constraints: 87 | - custom_constraint: nova.flavor 88 | 89 | # Access to the VMs 90 | ssh_user: 91 | type: string 92 | description: > 93 | The SSH user available on all nodes. 94 | default: cloud-user 95 | 96 | ssh_key_name: 97 | type: string 98 | description: Name of the SSH keypair registered with Nova 99 | constraints: 100 | - custom_constraint: nova.keypair 101 | 102 | resources: 103 | 104 | masters: 105 | type: OS::Heat::ResourceGroup 106 | properties: 107 | count: {get_param: master_count} 108 | resource_def: 109 | type: master.yaml 110 | properties: 111 | masters_pool: {get_param: masters_pool} 112 | masters_app_port: {get_param: masters_app_port} 113 | image: {get_param: image} 114 | flavor: {get_param: master_flavor} 115 | external_network: {get_param: external_network} 116 | service_network: {get_param: service_network} 117 | service_subnet: {get_param: service_subnet} 118 | security_group: {get_param: security_group} 119 | hostname: 120 | str_replace: 121 | template: "%prefix%%index%" 122 | params: 123 | '%prefix%': {get_param: hostname_prefix} 124 | domain_name: {get_param: domain_name} 125 | ssh_user: {get_param: ssh_user} 126 | ssh_key_name: {get_param: ssh_key_name} 127 | server_group: {get_resource: master_server_group} 128 | 129 | master_server_group: 130 | type: OS::Nova::ServerGroup 131 | properties: 132 | name: master_server_group 133 | policies: {get_param: master_server_group_policies} 134 | 135 | 136 | outputs: 137 | entries: 138 | value: {get_attr: [masters, entry]} 139 | -------------------------------------------------------------------------------- /heat/masters_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift master 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | masters_pool: 10 | type: string 11 | description: The masters lbaas pool 12 | 13 | masters_app_port: 14 | type: number 15 | default: 8443 16 | description: Port used by the servers 17 | 18 | master_count: 19 | type: number 20 | description: > 21 | Number of slave servers to create. 22 | default: 1 23 | 24 | hostname_prefix: 25 | description: > 26 | The default prefix for slave server hostnames 27 | type: string 28 | default: "master" 29 | 30 | domain_name: 31 | description: > 32 | All VMs will be placed in this domain 33 | type: string 34 | 35 | # Connectivity 36 | external_network: 37 | type: string 38 | description: > 39 | The external network that provides floating IP addresses for the masters 40 | constraints: 41 | - custom_constraint: neutron.network 42 | 43 | service_network: 44 | description: > 45 | The name or ID of the internal network 46 | type: string 47 | constraints: 48 | - custom_constraint: neutron.network 49 | 50 | service_subnet: 51 | description: > 52 | The name or ID of the internal IPv4 space 53 | type: string 54 | constraints: 55 | - custom_constraint: neutron.subnet 56 | 57 | security_group: 58 | description: > 59 | Allow OpenShift service access to instances 60 | type: string 61 | constraints: 62 | - custom_constraint: neutron.security_group 63 | 64 | master_server_group_policies: 65 | type: comma_delimited_list 66 | description: > 67 | List of policies applied on master nodes ServerGroup. By default 68 | 'anti-affinity' policy is used to make sure that each master node 69 | is deployed on a different host. If you use a small/all-in-one openstack 70 | environment, you may need to disable this e.g. by passing 71 | '-P master_server_group_policies=affinity'. 72 | default: ['anti-affinity'] 73 | 74 | image: 75 | description: > 76 | The Glance image to use as a base for OpenShift servers 77 | type: string 78 | constraints: 79 | - custom_constraint: glance.image 80 | 81 | master_flavor: 82 | description: > 83 | The name of the OpenStack instance flavor to use for OpenShift servers 84 | type: string 85 | default: m1.medium 86 | constraints: 87 | - custom_constraint: nova.flavor 88 | 89 | # Access to the VMs 90 | ssh_user: 91 | type: string 92 | description: > 93 | The SSH user available on all nodes. 94 | default: cloud-user 95 | 96 | ssh_key_name: 97 | type: string 98 | description: Name of the SSH keypair registered with Nova 99 | constraints: 100 | - custom_constraint: nova.keypair 101 | 102 | resources: 103 | 104 | masters: 105 | type: OS::Heat::ResourceGroup 106 | properties: 107 | count: {get_param: master_count} 108 | resource_def: 109 | type: master_octavia.yaml 110 | properties: 111 | masters_pool: {get_param: masters_pool} 112 | masters_app_port: {get_param: masters_app_port} 113 | image: {get_param: image} 114 | flavor: {get_param: master_flavor} 115 | external_network: {get_param: external_network} 116 | service_network: {get_param: service_network} 117 | service_subnet: {get_param: service_subnet} 118 | security_group: {get_param: security_group} 119 | hostname: 120 | str_replace: 121 | template: "%prefix%%index%" 122 | params: 123 | '%prefix%': {get_param: hostname_prefix} 124 | domain_name: {get_param: domain_name} 125 | ssh_user: {get_param: ssh_user} 126 | ssh_key_name: {get_param: ssh_key_name} 127 | server_group: {get_resource: master_server_group} 128 | 129 | master_server_group: 130 | type: OS::Nova::ServerGroup 131 | properties: 132 | name: master_server_group 133 | policies: {get_param: master_server_group_policies} 134 | 135 | 136 | outputs: 137 | entries: 138 | value: {get_attr: [masters, entry]} 139 | -------------------------------------------------------------------------------- /heat/network.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | A network to host OpenShift SDN 6 | 7 | parameters: 8 | # Networks to connect to or create 9 | external_network: 10 | type: string 11 | description: > 12 | The external network that provides floating IP addresses for the nodes 13 | constraints: 14 | - custom_constraint: neutron.network 15 | 16 | network_name_prefix: 17 | type: string 18 | description: > 19 | The name of the network to create 20 | default: osp 21 | 22 | service_subnet_cidr: 23 | type: string 24 | description: > 25 | The subnet used for instance to instance communication 26 | default: 10.0.1.0/24 27 | 28 | dns_nameservers: 29 | type: comma_delimited_list 30 | description: Addresses of a dns nameserver reachable in your environment 31 | 32 | router_id: 33 | type: string 34 | description: The existing router id 35 | 36 | resources: 37 | 38 | # Network Components 39 | service_network: 40 | type: OS::Neutron::Net 41 | properties: 42 | name: 43 | str_replace: 44 | template: "{{prefix}}-network" 45 | params: 46 | "{{prefix}}": {get_param: network_name_prefix} 47 | 48 | service_subnet: 49 | type: OS::Neutron::Subnet 50 | properties: 51 | cidr: {get_param: service_subnet_cidr} 52 | network: {get_resource: service_network} 53 | dns_nameservers: {get_param: dns_nameservers} 54 | name: 55 | str_replace: 56 | template: "{{prefix}}-subnet" 57 | params: 58 | "{{prefix}}": {get_param: network_name_prefix} 59 | 60 | external_router_interface: 61 | type: OS::Neutron::RouterInterface 62 | properties: 63 | #router_id: {get_resource: external_router} 64 | router_id: {get_param: router_id} 65 | subnet: {get_resource: service_subnet} 66 | 67 | outputs: 68 | openshift_network: 69 | description: The network that carries OpenShift traffic 70 | value: {get_resource: service_network} 71 | 72 | openshift_subnet: 73 | description: The subnet that carries OpenShift traffic 74 | value: {get_resource: service_subnet} 75 | -------------------------------------------------------------------------------- /heat/node.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | # Connectivity 10 | external_network: 11 | type: string 12 | description: > 13 | The external network that provides floating IP addresses for the nodes 14 | constraints: 15 | - custom_constraint: neutron.network 16 | 17 | service_network: 18 | description: > 19 | The name or ID of the internal network 20 | type: string 21 | constraints: 22 | - custom_constraint: neutron.network 23 | 24 | service_subnet: 25 | description: > 26 | The name or ID of the internal IPv4 space 27 | type: string 28 | constraints: 29 | - custom_constraint: neutron.subnet 30 | 31 | security_group: 32 | description: > 33 | Allow OpenShift to access to instances 34 | type: string 35 | constraints: 36 | - custom_constraint: neutron.security_group 37 | 38 | # Host Characteristics 39 | hostname: 40 | description: The prefix for slave nameserver hostnames 41 | type: string 42 | 43 | domain_name: 44 | description: The prefix for slave nameserver hostnames 45 | type: string 46 | 47 | server_group: 48 | description: > 49 | ID of a server group containing all of the master hosts 50 | type: string 51 | 52 | image: 53 | description: > 54 | The Glance image to use as a base for OpenShift nodes 55 | type: string 56 | constraints: 57 | - custom_constraint: glance.image 58 | 59 | flavor: 60 | description: > 61 | The name of the OpenStack instance flavor to use for OpenShift nodes 62 | type: string 63 | constraints: 64 | - custom_constraint: nova.flavor 65 | 66 | # Access to the VMs 67 | ssh_user: 68 | type: string 69 | description: > 70 | The SSH user available on all nodes. 71 | 72 | ssh_key_name: 73 | type: string 74 | description: Name of the SSH keypair registered with Nova 75 | constraints: 76 | - custom_constraint: nova.keypair 77 | 78 | resources: 79 | 80 | host: 81 | type: OS::Nova::Server 82 | properties: 83 | name: 84 | str_replace: 85 | template: "HOSTNAME" 86 | params: 87 | HOSTNAME: {get_param: hostname} 88 | user_data_format: SOFTWARE_CONFIG 89 | user_data: {get_resource: init} 90 | image: {get_param: image} 91 | flavor: {get_param: flavor} 92 | admin_user: {get_param: ssh_user} 93 | key_name: {get_param: ssh_key_name} 94 | networks: 95 | - port: {get_resource: port} 96 | scheduler_hints: 97 | group: {get_param: server_group} 98 | block_device_mapping: 99 | - device_name: vda 100 | volume_id: { get_resource: root_volume } 101 | delete_on_termination: true 102 | 103 | port: 104 | type: OS::Neutron::Port 105 | properties: 106 | security_groups: 107 | - {get_param: security_group} 108 | network: {get_param: service_network} 109 | fixed_ips: 110 | - subnet: {get_param: service_subnet} 111 | replacement_policy: AUTO 112 | 113 | root_volume: 114 | type: OS::Cinder::Volume 115 | properties: 116 | size: 30 117 | image: {get_param: image} 118 | availability_zone: nova 119 | 120 | docker_volume: 121 | type: OS::Cinder::Volume 122 | properties: 123 | size: 25 124 | availability_zone: nova 125 | 126 | volume_attachment: 127 | type: OS::Cinder::VolumeAttachment 128 | properties: 129 | volume_id: { get_resource: docker_volume } 130 | instance_uuid: { get_resource: host } 131 | mountpoint: /dev/vdc 132 | 133 | init: 134 | type: OS::Heat::MultipartMime 135 | properties: 136 | parts: 137 | - config: {get_resource: set_hostname} 138 | 139 | set_hostname: 140 | type: OS::Heat::CloudConfig 141 | properties: 142 | cloud_config: 143 | hostname: {get_param: hostname} 144 | fqdn: 145 | str_replace: 146 | template: "HOSTNAME" 147 | params: 148 | HOSTNAME: {get_param: hostname} 149 | DOMAIN: {get_param: domain_name} 150 | 151 | outputs: 152 | entry: 153 | value: 154 | name: {get_param: hostname} 155 | address: {get_attr: [port, fixed_ips, 0, ip_address]} 156 | -------------------------------------------------------------------------------- /heat/nodes.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift node 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | node_count: 10 | type: number 11 | description: > 12 | Number of slave servers to create. 13 | default: 1 14 | 15 | hostname_prefix: 16 | description: > 17 | The default prefix for slave server hostnames 18 | type: string 19 | default: "node" 20 | 21 | domain_name: 22 | description: > 23 | All VMs will be placed in this domain 24 | type: string 25 | 26 | # Connectivity 27 | external_network: 28 | type: string 29 | description: > 30 | The external network that provides floating IP addresses for the nodes 31 | constraints: 32 | - custom_constraint: neutron.network 33 | 34 | service_network: 35 | description: > 36 | The name or ID of the internal network 37 | type: string 38 | constraints: 39 | - custom_constraint: neutron.network 40 | 41 | service_subnet: 42 | description: > 43 | The name or ID of the internal IPv4 space 44 | type: string 45 | constraints: 46 | - custom_constraint: neutron.subnet 47 | 48 | security_group: 49 | description: > 50 | Allow OpenShift service access to instances 51 | type: string 52 | constraints: 53 | - custom_constraint: neutron.security_group 54 | 55 | node_server_group_policies: 56 | type: comma_delimited_list 57 | description: > 58 | List of policies applied on nodes ServerGroup. By default 59 | 'anti-affinity' policy is used to make sure that each node 60 | is deployed on a different host. If you use a small/all-in-one openstack 61 | environment, you may need to disable this e.g. by passing 62 | '-P node_server_group_policies=affinity'. 63 | default: ['anti-affinity'] 64 | 65 | image: 66 | description: > 67 | The Glance image to use as a base for OpenShift servers 68 | type: string 69 | constraints: 70 | - custom_constraint: glance.image 71 | 72 | node_flavor: 73 | description: > 74 | The name of the OpenStack instance flavor to use for OpenShift servers 75 | type: string 76 | default: m1.medium 77 | constraints: 78 | - custom_constraint: nova.flavor 79 | 80 | # Access to the VMs 81 | ssh_user: 82 | type: string 83 | description: > 84 | The SSH user available on all nodes. 85 | default: cloud-user 86 | 87 | ssh_key_name: 88 | type: string 89 | description: Name of the SSH keypair registered with Nova 90 | constraints: 91 | - custom_constraint: nova.keypair 92 | 93 | resources: 94 | 95 | nodes: 96 | type: OS::Heat::ResourceGroup 97 | properties: 98 | count: {get_param: node_count} 99 | resource_def: 100 | type: node.yaml 101 | properties: 102 | image: {get_param: image} 103 | flavor: {get_param: node_flavor} 104 | external_network: {get_param: external_network} 105 | service_network: {get_param: service_network} 106 | service_subnet: {get_param: service_subnet} 107 | security_group: {get_param: security_group} 108 | hostname: 109 | str_replace: 110 | template: "%prefix%%index%" 111 | params: 112 | '%prefix%': {get_param: hostname_prefix} 113 | domain_name: {get_param: domain_name} 114 | ssh_user: {get_param: ssh_user} 115 | ssh_key_name: {get_param: ssh_key_name} 116 | server_group: {get_resource: node_server_group} 117 | 118 | node_server_group: 119 | type: OS::Nova::ServerGroup 120 | properties: 121 | name: node_server_group 122 | policies: {get_param: node_server_group_policies} 123 | 124 | outputs: 125 | entries: 126 | value: {get_attr: [nodes, entry]} 127 | -------------------------------------------------------------------------------- /heat/openshift.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift cluster made up of a masters, infras, nodes and bastion servers 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | 10 | external_network: 11 | type: string 12 | description: > 13 | The external network that provides floating IP addresses for the nodes 14 | constraints: 15 | - custom_constraint: neutron.network 16 | 17 | service_network: 18 | description: > 19 | The name or ID of the internal network 20 | type: string 21 | constraints: 22 | - custom_constraint: neutron.network 23 | 24 | service_subnet: 25 | description: > 26 | The name or ID of the internal IPv4 space 27 | type: string 28 | constraints: 29 | - custom_constraint: neutron.subnet 30 | 31 | bastion_hostname: 32 | description: > 33 | The default prefix for bastion server hostnames 34 | type: string 35 | default: "bastion" 36 | 37 | master_hostname_prefix: 38 | description: > 39 | The default prefix for master server hostnames 40 | type: string 41 | default: "master" 42 | 43 | infra_hostname_prefix: 44 | description: > 45 | The default prefix for infra server hostnames 46 | type: string 47 | default: "infra" 48 | 49 | node_hostname_prefix: 50 | description: > 51 | The default prefix for nodes server hostnames 52 | type: string 53 | default: "node" 54 | 55 | domain_name: 56 | description: > 57 | All VMs will be placed in this domain 58 | type: string 59 | 60 | master_count: 61 | type: number 62 | description: > 63 | Number of master servers to create. 64 | default: 1 65 | 66 | infra_count: 67 | type: number 68 | description: > 69 | Number of infra servers to create. 70 | default: 1 71 | 72 | node_count: 73 | type: number 74 | description: > 75 | Number of node servers to create. 76 | default: 2 77 | 78 | master_server_group_policies: 79 | type: comma_delimited_list 80 | description: > 81 | List of policies applied on master nodes ServerGroup. By default 82 | 'anti-affinity' policy is used to make sure that each master node 83 | is deployed on a different host. If you use a small/all-in-one openstack 84 | environment, you may need to disable this e.g. by passing 85 | '-P master_server_group_policies=affinity'. 86 | default: ['anti-affinity'] 87 | 88 | infra_server_group_policies: 89 | type: comma_delimited_list 90 | description: > 91 | List of policies applied on master nodes ServerGroup. By default 92 | 'anti-affinity' policy is used to make sure that each master node 93 | is deployed on a different host. If you use a small/all-in-one openstack 94 | environment, you may need to disable this e.g. by passing 95 | '-P infra_server_group_policies=affinity'. 96 | default: ['anti-affinity'] 97 | 98 | node_server_group_policies: 99 | type: comma_delimited_list 100 | description: > 101 | List of policies applied on master nodes ServerGroup. By default 102 | 'anti-affinity' policy is used to make sure that each master node 103 | is deployed on a different host. If you use a small/all-in-one openstack 104 | environment, you may need to disable this e.g. by passing 105 | '-P node_server_group_policies=affinity'. 106 | default: ['anti-affinity'] 107 | 108 | image: 109 | description: > 110 | The Glance image to use as a base for DNS servers 111 | type: string 112 | constraints: 113 | - custom_constraint: glance.image 114 | 115 | bastion_flavor: 116 | description: > 117 | The name of the OpenStack instance flavor to use for bastion servers 118 | type: string 119 | default: m1.small 120 | constraints: 121 | - custom_constraint: nova.flavor 122 | 123 | master_flavor: 124 | description: > 125 | The name of the OpenStack instance flavor to use for master servers 126 | type: string 127 | default: m1.medium 128 | constraints: 129 | - custom_constraint: nova.flavor 130 | 131 | infra_flavor: 132 | description: > 133 | The name of the OpenStack instance flavor to use for infa servers 134 | type: string 135 | default: m1.large 136 | constraints: 137 | - custom_constraint: nova.flavor 138 | 139 | node_flavor: 140 | description: > 141 | The name of the OpenStack instance flavor to use for node servers 142 | type: string 143 | default: m1.medium 144 | constraints: 145 | - custom_constraint: nova.flavor 146 | 147 | # Access to the VMs 148 | ssh_user: 149 | type: string 150 | description: > 151 | The SSH user available on all nodes. 152 | default: cloud-user 153 | 154 | ssh_key_name: 155 | type: string 156 | description: Name of the SSH keypair registered with Nova 157 | constraints: 158 | - custom_constraint: nova.keypair 159 | 160 | resources: 161 | 162 | lbaas_masters: 163 | type: lbaas_masters.yaml 164 | properties: 165 | external_network: {get_param: external_network} 166 | service_subnet: {get_param: service_subnet} 167 | 168 | lbaas_infras: 169 | type: lbaas_infras.yaml 170 | properties: 171 | external_network: {get_param: external_network} 172 | service_subnet: {get_param: service_subnet} 173 | 174 | hosts: 175 | type: hosts.yaml 176 | properties: 177 | external_network: {get_param: external_network} 178 | service_network: {get_param: service_network} 179 | service_subnet: {get_param: service_subnet} 180 | masters_pool: {get_attr: [lbaas_masters, masters_pool]} 181 | infras_pool_http: {get_attr: [lbaas_infras, infras_pool_http]} 182 | infras_pool_https: {get_attr: [lbaas_infras, infras_pool_https]} 183 | image: {get_param: image} 184 | bastion_flavor: {get_param: bastion_flavor} 185 | master_flavor: {get_param: master_flavor} 186 | infra_flavor: {get_param: infra_flavor} 187 | node_flavor: {get_param: node_flavor} 188 | bastion_hostname: {get_param: bastion_hostname} 189 | master_hostname_prefix: {get_param: master_hostname_prefix} 190 | infra_hostname_prefix: {get_param: infra_hostname_prefix} 191 | node_hostname_prefix: {get_param: node_hostname_prefix} 192 | master_server_group_policies: {get_param: master_server_group_policies} 193 | infra_server_group_policies: {get_param: infra_server_group_policies} 194 | node_server_group_policies: {get_param: node_server_group_policies} 195 | domain_name: {get_param: domain_name} 196 | master_count: {get_param: master_count} 197 | infra_count: {get_param: infra_count} 198 | node_count: {get_param: node_count} 199 | ssh_user: {get_param: ssh_user} 200 | ssh_key_name: {get_param: ssh_key_name} 201 | 202 | outputs: 203 | ip_address: 204 | description: The floating IP address of the OpenShift servers 205 | value: 206 | bastion: 207 | name: {get_param: bastion_hostname} 208 | address: {get_attr: [hosts, bastion_ip_address]} 209 | masters: {get_attr: [hosts, master_entries]} 210 | infras: {get_attr: [hosts, infra_entries]} 211 | nodes: {get_attr: [hosts, node_entries]} 212 | lb_master: 213 | name: lb_master 214 | address: {get_attr: [lbaas_masters, lb_masters_floatingip]} 215 | lb_infra: 216 | name: lb_infra 217 | address: {get_attr: [lbaas_infras, lb_infras_floatingip]} 218 | openshift_master_url: 219 | value: {get_attr: [lbaas_masters, lburl]} 220 | -------------------------------------------------------------------------------- /heat/openshift_octavia.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift cluster made up of a masters, infras, nodes and bastion servers 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | 10 | external_network: 11 | type: string 12 | description: > 13 | The external network that provides floating IP addresses for the nodes 14 | constraints: 15 | - custom_constraint: neutron.network 16 | 17 | service_network: 18 | description: > 19 | The name or ID of the internal network 20 | type: string 21 | constraints: 22 | - custom_constraint: neutron.network 23 | 24 | service_subnet: 25 | description: > 26 | The name or ID of the internal IPv4 space 27 | type: string 28 | constraints: 29 | - custom_constraint: neutron.subnet 30 | 31 | bastion_hostname: 32 | description: > 33 | The default prefix for bastion server hostnames 34 | type: string 35 | default: "bastion" 36 | 37 | master_hostname_prefix: 38 | description: > 39 | The default prefix for master server hostnames 40 | type: string 41 | default: "master" 42 | 43 | infra_hostname_prefix: 44 | description: > 45 | The default prefix for infra server hostnames 46 | type: string 47 | default: "infra" 48 | 49 | node_hostname_prefix: 50 | description: > 51 | The default prefix for nodes server hostnames 52 | type: string 53 | default: "node" 54 | 55 | domain_name: 56 | description: > 57 | All VMs will be placed in this domain 58 | type: string 59 | 60 | master_count: 61 | type: number 62 | description: > 63 | Number of master servers to create. 64 | default: 1 65 | 66 | infra_count: 67 | type: number 68 | description: > 69 | Number of infra servers to create. 70 | default: 1 71 | 72 | node_count: 73 | type: number 74 | description: > 75 | Number of node servers to create. 76 | default: 2 77 | 78 | master_server_group_policies: 79 | type: comma_delimited_list 80 | description: > 81 | List of policies applied on master nodes ServerGroup. By default 82 | 'anti-affinity' policy is used to make sure that each master node 83 | is deployed on a different host. If you use a small/all-in-one openstack 84 | environment, you may need to disable this e.g. by passing 85 | '-P master_server_group_policies=affinity'. 86 | default: ['anti-affinity'] 87 | 88 | infra_server_group_policies: 89 | type: comma_delimited_list 90 | description: > 91 | List of policies applied on master nodes ServerGroup. By default 92 | 'anti-affinity' policy is used to make sure that each master node 93 | is deployed on a different host. If you use a small/all-in-one openstack 94 | environment, you may need to disable this e.g. by passing 95 | '-P infra_server_group_policies=affinity'. 96 | default: ['anti-affinity'] 97 | 98 | node_server_group_policies: 99 | type: comma_delimited_list 100 | description: > 101 | List of policies applied on master nodes ServerGroup. By default 102 | 'anti-affinity' policy is used to make sure that each master node 103 | is deployed on a different host. If you use a small/all-in-one openstack 104 | environment, you may need to disable this e.g. by passing 105 | '-P node_server_group_policies=affinity'. 106 | default: ['anti-affinity'] 107 | 108 | image: 109 | description: > 110 | The Glance image to use as a base for DNS servers 111 | type: string 112 | constraints: 113 | - custom_constraint: glance.image 114 | 115 | bastion_flavor: 116 | description: > 117 | The name of the OpenStack instance flavor to use for bastion servers 118 | type: string 119 | default: m1.small 120 | constraints: 121 | - custom_constraint: nova.flavor 122 | 123 | master_flavor: 124 | description: > 125 | The name of the OpenStack instance flavor to use for master servers 126 | type: string 127 | default: m1.medium 128 | constraints: 129 | - custom_constraint: nova.flavor 130 | 131 | infra_flavor: 132 | description: > 133 | The name of the OpenStack instance flavor to use for infa servers 134 | type: string 135 | default: m1.large 136 | constraints: 137 | - custom_constraint: nova.flavor 138 | 139 | node_flavor: 140 | description: > 141 | The name of the OpenStack instance flavor to use for node servers 142 | type: string 143 | default: m1.medium 144 | constraints: 145 | - custom_constraint: nova.flavor 146 | 147 | # Access to the VMs 148 | ssh_user: 149 | type: string 150 | description: > 151 | The SSH user available on all nodes. 152 | default: cloud-user 153 | 154 | ssh_key_name: 155 | type: string 156 | description: Name of the SSH keypair registered with Nova 157 | constraints: 158 | - custom_constraint: nova.keypair 159 | 160 | resources: 161 | 162 | lbaas_masters: 163 | type: lbaas_masters_octavia.yaml 164 | properties: 165 | external_network: {get_param: external_network} 166 | service_subnet: {get_param: service_subnet} 167 | 168 | lbaas_infras: 169 | type: lbaas_infras_octavia.yaml 170 | properties: 171 | external_network: {get_param: external_network} 172 | service_subnet: {get_param: service_subnet} 173 | 174 | hosts: 175 | type: hosts_octavia.yaml 176 | properties: 177 | external_network: {get_param: external_network} 178 | service_network: {get_param: service_network} 179 | service_subnet: {get_param: service_subnet} 180 | masters_pool: {get_attr: [lbaas_masters, masters_pool]} 181 | infras_pool_http: {get_attr: [lbaas_infras, infras_pool_http]} 182 | infras_pool_https: {get_attr: [lbaas_infras, infras_pool_https]} 183 | image: {get_param: image} 184 | bastion_flavor: {get_param: bastion_flavor} 185 | master_flavor: {get_param: master_flavor} 186 | infra_flavor: {get_param: infra_flavor} 187 | node_flavor: {get_param: node_flavor} 188 | bastion_hostname: {get_param: bastion_hostname} 189 | master_hostname_prefix: {get_param: master_hostname_prefix} 190 | infra_hostname_prefix: {get_param: infra_hostname_prefix} 191 | node_hostname_prefix: {get_param: node_hostname_prefix} 192 | master_server_group_policies: {get_param: master_server_group_policies} 193 | infra_server_group_policies: {get_param: infra_server_group_policies} 194 | node_server_group_policies: {get_param: node_server_group_policies} 195 | domain_name: {get_param: domain_name} 196 | master_count: {get_param: master_count} 197 | infra_count: {get_param: infra_count} 198 | node_count: {get_param: node_count} 199 | ssh_user: {get_param: ssh_user} 200 | ssh_key_name: {get_param: ssh_key_name} 201 | 202 | outputs: 203 | ip_address: 204 | description: The floating IP address of the OpenShift servers 205 | value: 206 | bastion: 207 | name: {get_param: bastion_hostname} 208 | address: {get_attr: [hosts, bastion_ip_address]} 209 | masters: {get_attr: [hosts, master_entries]} 210 | infras: {get_attr: [hosts, infra_entries]} 211 | nodes: {get_attr: [hosts, node_entries]} 212 | lb_master: 213 | name: lb_master 214 | address: {get_attr: [lbaas_masters, lb_masters_floatingip]} 215 | lb_infra: 216 | name: lb_infra 217 | address: {get_attr: [lbaas_infras, lb_infras_floatingip]} 218 | openshift_master_url: 219 | value: {get_attr: [lbaas_masters, lburl]} 220 | -------------------------------------------------------------------------------- /heat/openshift_single_lbaas.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | heat_template_version: 2016-10-14 3 | 4 | description: 5 | An instance for a OpenShift cluster made up of a masters, infras, nodes and bastion servers 6 | The instances are created with Heat and then configured with Ansible 7 | 8 | parameters: 9 | 10 | external_network: 11 | type: string 12 | description: > 13 | The external network that provides floating IP addresses for the nodes 14 | constraints: 15 | - custom_constraint: neutron.network 16 | 17 | service_network: 18 | description: > 19 | The name or ID of the internal network 20 | type: string 21 | constraints: 22 | - custom_constraint: neutron.network 23 | 24 | service_subnet: 25 | description: > 26 | The name or ID of the internal IPv4 space 27 | type: string 28 | constraints: 29 | - custom_constraint: neutron.subnet 30 | 31 | bastion_hostname: 32 | description: > 33 | The default prefix for bastion server hostnames 34 | type: string 35 | default: "bastion" 36 | 37 | master_hostname_prefix: 38 | description: > 39 | The default prefix for master server hostnames 40 | type: string 41 | default: "master" 42 | 43 | infra_hostname_prefix: 44 | description: > 45 | The default prefix for infra server hostnames 46 | type: string 47 | default: "infra" 48 | 49 | node_hostname_prefix: 50 | description: > 51 | The default prefix for nodes server hostnames 52 | type: string 53 | default: "node" 54 | 55 | domain_name: 56 | description: > 57 | All VMs will be placed in this domain 58 | type: string 59 | 60 | master_count: 61 | type: number 62 | description: > 63 | Number of master servers to create. 64 | default: 1 65 | 66 | infra_count: 67 | type: number 68 | description: > 69 | Number of infra servers to create. 70 | default: 1 71 | 72 | node_count: 73 | type: number 74 | description: > 75 | Number of node servers to create. 76 | default: 2 77 | 78 | master_server_group_policies: 79 | type: comma_delimited_list 80 | description: > 81 | List of policies applied on master nodes ServerGroup. By default 82 | 'anti-affinity' policy is used to make sure that each master node 83 | is deployed on a different host. If you use a small/all-in-one openstack 84 | environment, you may need to disable this e.g. by passing 85 | '-P master_server_group_policies=affinity'. 86 | default: ['anti-affinity'] 87 | 88 | infra_server_group_policies: 89 | type: comma_delimited_list 90 | description: > 91 | List of policies applied on master nodes ServerGroup. By default 92 | 'anti-affinity' policy is used to make sure that each master node 93 | is deployed on a different host. If you use a small/all-in-one openstack 94 | environment, you may need to disable this e.g. by passing 95 | '-P infra_server_group_policies=affinity'. 96 | default: ['anti-affinity'] 97 | 98 | node_server_group_policies: 99 | type: comma_delimited_list 100 | description: > 101 | List of policies applied on master nodes ServerGroup. By default 102 | 'anti-affinity' policy is used to make sure that each master node 103 | is deployed on a different host. If you use a small/all-in-one openstack 104 | environment, you may need to disable this e.g. by passing 105 | '-P node_server_group_policies=affinity'. 106 | default: ['anti-affinity'] 107 | 108 | image: 109 | description: > 110 | The Glance image to use as a base for DNS servers 111 | type: string 112 | constraints: 113 | - custom_constraint: glance.image 114 | 115 | bastion_flavor: 116 | description: > 117 | The name of the OpenStack instance flavor to use for bastion servers 118 | type: string 119 | default: m1.small 120 | constraints: 121 | - custom_constraint: nova.flavor 122 | 123 | master_flavor: 124 | description: > 125 | The name of the OpenStack instance flavor to use for master servers 126 | type: string 127 | default: m1.medium 128 | constraints: 129 | - custom_constraint: nova.flavor 130 | 131 | infra_flavor: 132 | description: > 133 | The name of the OpenStack instance flavor to use for infa servers 134 | type: string 135 | default: m1.large 136 | constraints: 137 | - custom_constraint: nova.flavor 138 | 139 | node_flavor: 140 | description: > 141 | The name of the OpenStack instance flavor to use for node servers 142 | type: string 143 | default: m1.medium 144 | constraints: 145 | - custom_constraint: nova.flavor 146 | 147 | # Access to the VMs 148 | ssh_user: 149 | type: string 150 | description: > 151 | The SSH user available on all nodes. 152 | default: cloud-user 153 | 154 | ssh_key_name: 155 | type: string 156 | description: Name of the SSH keypair registered with Nova 157 | constraints: 158 | - custom_constraint: nova.keypair 159 | 160 | resources: 161 | 162 | lbaas: 163 | type: lbaas_single.yaml 164 | properties: 165 | external_network: {get_param: external_network} 166 | service_subnet: {get_param: service_subnet} 167 | 168 | hosts: 169 | type: hosts.yaml 170 | properties: 171 | external_network: {get_param: external_network} 172 | service_network: {get_param: service_network} 173 | service_subnet: {get_param: service_subnet} 174 | masters_pool: {get_attr: [lbaas, masters_pool]} 175 | infras_pool_http: {get_attr: [lbaas, infras_pool_http]} 176 | infras_pool_https: {get_attr: [lbaas, infras_pool_https]} 177 | image: {get_param: image} 178 | bastion_flavor: {get_param: bastion_flavor} 179 | master_flavor: {get_param: master_flavor} 180 | infra_flavor: {get_param: infra_flavor} 181 | node_flavor: {get_param: node_flavor} 182 | bastion_hostname: {get_param: bastion_hostname} 183 | master_hostname_prefix: {get_param: master_hostname_prefix} 184 | infra_hostname_prefix: {get_param: infra_hostname_prefix} 185 | node_hostname_prefix: {get_param: node_hostname_prefix} 186 | master_server_group_policies: {get_param: master_server_group_policies} 187 | infra_server_group_policies: {get_param: infra_server_group_policies} 188 | node_server_group_policies: {get_param: node_server_group_policies} 189 | domain_name: {get_param: domain_name} 190 | master_count: {get_param: master_count} 191 | infra_count: {get_param: infra_count} 192 | node_count: {get_param: node_count} 193 | ssh_user: {get_param: ssh_user} 194 | ssh_key_name: {get_param: ssh_key_name} 195 | 196 | outputs: 197 | ip_address: 198 | description: The floating IP address of the OpenShift servers 199 | value: 200 | bastion: 201 | name: {get_param: bastion_hostname} 202 | address: {get_attr: [hosts, bastion_ip_address]} 203 | masters: {get_attr: [hosts, master_entries]} 204 | infras: {get_attr: [hosts, infra_entries]} 205 | nodes: {get_attr: [hosts, node_entries]} 206 | lb_master: 207 | name: lb_master 208 | address: {get_attr: [lbaas, lb_floatingip]} 209 | lb_infra: 210 | name: lb_infra 211 | address: {get_attr: [lbaas, lb_floatingip]} 212 | openshift_master_url: 213 | value: {get_attr: [lbaas, lburl]} 214 | -------------------------------------------------------------------------------- /images/one.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ktenzer/openshift-on-openstack-123/e4f99ed5173e7063fd7c63605876d743ee900b03/images/one.png -------------------------------------------------------------------------------- /images/openshift_on_openstack_ha.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ktenzer/openshift-on-openstack-123/e4f99ed5173e7063fd7c63605876d743ee900b03/images/openshift_on_openstack_ha.PNG -------------------------------------------------------------------------------- /images/openshift_on_openstack_non_ha.PNG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ktenzer/openshift-on-openstack-123/e4f99ed5173e7063fd7c63605876d743ee900b03/images/openshift_on_openstack_non_ha.PNG -------------------------------------------------------------------------------- /images/openshift_single_master.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ktenzer/openshift-on-openstack-123/e4f99ed5173e7063fd7c63605876d743ee900b03/images/openshift_single_master.png -------------------------------------------------------------------------------- /images/three.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ktenzer/openshift-on-openstack-123/e4f99ed5173e7063fd7c63605876d743ee900b03/images/three.png -------------------------------------------------------------------------------- /images/two.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ktenzer/openshift-on-openstack-123/e4f99ed5173e7063fd7c63605876d743ee900b03/images/two.png -------------------------------------------------------------------------------- /roles/all-prep/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Copy hosts file from template 2 | template: 3 | src: templates/hosts.j2 4 | dest: /etc/hosts 5 | owner: root 6 | group: root 7 | mode: 0644 8 | 9 | - name: copy certificate authority to trusted ca path of the os 10 | copy: 11 | src: /etc/pki/ca-trust/source/anchors/cacert.pem 12 | dest: /etc/pki/ca-trust/source/anchors 13 | when: bastion_repo 14 | 15 | - name: update trusted ca redhat 16 | shell: /bin/update-ca-trust 17 | when: bastion_repo 18 | 19 | - name: Set correct repo ip if bastion repo is enabled 20 | set_fact: 21 | repo_base_url: "http://{{hostvars['bastion']['ansible_default_ipv4']['address'] }}/repo" 22 | when: bastion_repo 23 | 24 | - name: Set correct repo ip if bastion repo is not enabled 25 | set_fact: 26 | repo_base_url: "{{rhn_local_repo_base_url}}" 27 | when: not bastion_repo 28 | 29 | - name: Enable correct local repos 30 | yum_repository: 31 | name: "{{item}}" 32 | description: "{{item}}" 33 | file: openshift 34 | baseurl: "{{repo_base_url}}/{{item}}" 35 | gpgcheck: no 36 | with_items: "{{repos}}" 37 | when: openshift_deployment == "openshift-enterprise" and rhn_local_repo 38 | 39 | - name: Ensure that required packages are present on target hosts 40 | yum: 41 | name: "{{item}}" 42 | state: latest 43 | retries: 5 44 | delay: 5 45 | with_items: "{{packages}}" 46 | 47 | - name: Install Required Packages 48 | yum: 49 | name: "{{item}}" 50 | state: installed 51 | with_items: "{{packages}}" 52 | 53 | - name: Ensure NetworkManager is enabled and started 54 | systemd: 55 | name: NetworkManager 56 | enabled: yes 57 | state: started 58 | -------------------------------------------------------------------------------- /roles/all-prep/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 2 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 3 | {% for host in play_hosts %} 4 | {% if 'ansible_eth0' in hostvars[host] %} 5 | {{ hostvars[host]['ansible_eth0']['ipv4']['address'] }} {{ hostvars[host]['ansible_nodename'] }} {{ host }}.local 6 | {% endif %} 7 | {% endfor %} 8 | -------------------------------------------------------------------------------- /roles/bastion-prep/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Enable bastion specific repos (redhat) 2 | command: "subscription-manager repos --enable={{item}}" 3 | retries: 5 4 | delay: 5 5 | with_items: "{{repos_bastion}}" 6 | when: openshift_deployment == "openshift-enterprise" and not rhn_local_repo 7 | 8 | - name: Enable correct local repos 9 | yum_repository: 10 | name: "{{item}}" 11 | description: "{{item}}" 12 | file: openshift 13 | baseurl: "{{rhn_local_repo_base_url}}/{{item}}" 14 | gpgcheck: no 15 | with_items: 16 | - "{{repos_bastion}}" 17 | - "{{repos}}" 18 | when: openshift_deployment == "openshift-enterprise" and rhn_local_repo 19 | 20 | - name: Enable bastion specific repos (centos) 21 | yum: 22 | name: "{{item}}" 23 | state: latest 24 | retries: 5 25 | delay: 5 26 | with_items: "{{origin_repos_bastion}}" 27 | when: openshift_deployment == "origin" 28 | 29 | - name: Ensure that required packages are installed on bastion 30 | yum: 31 | name: "{{item}}" 32 | state: latest 33 | retries: 5 34 | delay: 5 35 | with_items: "{{packages}}" 36 | 37 | - name: Perform yum update 38 | yum: 39 | name=* 40 | state=latest 41 | 42 | - name: Install required openstack packages for bastion (redhat) 43 | yum: 44 | name: "{{item}}" 45 | state: installed 46 | disablerepo: rhel-7-fast-datapath-rpms 47 | disablerepo: rhel-7-server-ose-{{ openshift_version }}-rpms 48 | with_items: "{{packages_bastion_openstack}}" 49 | when: openshift_deployment == "openshift-enterprise" 50 | 51 | - name: Install required openstack packages for bastion (centos) 52 | yum: 53 | name: "{{item}}" 54 | state: installed 55 | with_items: "{{packages_bastion_openstack}}" 56 | when: openshift_deployment == "origin" 57 | 58 | - name: Install required installer packages for bastion (redhat) 59 | yum: 60 | name: "{{item}}" 61 | state: installed 62 | disablerepo: rhel-7-server-openstack-{{ openstack_version }}-tools-rpms 63 | with_items: "{{ packages_bastion_openshift }}" 64 | when: openshift_deployment == "openshift-enterprise" 65 | 66 | - name: Install required installer packages for bastion (centos) 67 | yum: 68 | name: "{{item}}" 69 | state: installed 70 | with_items: "{{packages_bastion_origin}}" 71 | when: openshift_deployment == "origin" 72 | 73 | - name: Disable epel repository (centos) 74 | yum_repository: 75 | name: epel 76 | baseurl: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 77 | description: "EPEL" 78 | enabled: no 79 | when: openshift_deployment == "origin" 80 | 81 | - name: Clone OKD repository 82 | git: 83 | repo: 'https://github.com/openshift/openshift-ansible' 84 | dest: /home/{{ ssh_user }}/openshift-ansible 85 | version: release-{{ openshift_version }} 86 | when: openshift_deployment == "origin" 87 | 88 | - name: Create ~/.ssh/ directory 89 | become: false 90 | file: 91 | path: ~/.ssh 92 | state: directory 93 | mode: 0700 94 | 95 | - name: Copy OpenStack ssh key to bastion 96 | become: false 97 | copy: 98 | src: "{{ ssh_key_path }}" 99 | dest: ~/.ssh/id_rsa 100 | mode: 0400 101 | owner: "{{ ssh_user }}" 102 | 103 | - name: Copy playbooks to bastion 104 | copy: 105 | src: "{{ playbook_dir }}" 106 | dest: /home/{{ ssh_user }} 107 | directory_mode: 0755 108 | mode: 0755 109 | owner: "{{ ssh_user }}" 110 | group: "{{ ssh_user }}" 111 | 112 | - name: Copy ansible.cfg to user home dir 113 | copy: 114 | src: "{{ playbook_dir }}/ansible.cfg" 115 | dest: /home/{{ ssh_user }}/.ansible.cfg 116 | mode: 0644 117 | owner: "{{ ssh_user }}" 118 | group: "{{ ssh_user }}" 119 | -------------------------------------------------------------------------------- /roles/bastion-repo/defaults/main.yml: -------------------------------------------------------------------------------- 1 | upstream_registry: registry.access.redhat.com 2 | local_registry: bastion:5000 3 | registry_tag: v3.11.16 4 | major_registry_tag: v3.11 5 | platform_infrastructure: 6 | # ose and ose-pod images are being tested with latest tag 7 | - "{{upstream_registry}}/openshift3/ose:latest" 8 | - "{{upstream_registry}}/openshift3/ose-pod:latest" 9 | - "{{upstream_registry}}/openshift3/apb-base:{{registry_tag}}" 10 | - "{{upstream_registry}}/openshift3/apb-tools:{{registry_tag}}" 11 | - "{{upstream_registry}}/openshift3/automation-broker-apb:{{registry_tag}}" 12 | - "{{upstream_registry}}/openshift3/csi-attacher:{{registry_tag}}" 13 | - "{{upstream_registry}}/openshift3/csi-driver-registrar:{{registry_tag}}" 14 | - "{{upstream_registry}}/openshift3/csi-livenessprobe:{{registry_tag}}" 15 | - "{{upstream_registry}}/openshift3/csi-provisioner:{{registry_tag}}" 16 | - "{{upstream_registry}}/openshift3/grafana:{{registry_tag}}" 17 | - "{{upstream_registry}}/openshift3/image-inspector:{{registry_tag}}" 18 | - "{{upstream_registry}}/openshift3/mariadb-apb:{{registry_tag}}" 19 | - "{{upstream_registry}}/openshift3/mediawiki:{{registry_tag}}" 20 | - "{{upstream_registry}}/openshift3/mediawiki-apb:{{registry_tag}}" 21 | - "{{upstream_registry}}/openshift3/mysql-apb:{{registry_tag}}" 22 | - "{{upstream_registry}}/openshift3/ose-ansible:{{registry_tag}}" 23 | - "{{upstream_registry}}/openshift3/ose-ansible-service-broker:{{registry_tag}}" 24 | - "{{upstream_registry}}/openshift3/ose-cli:{{registry_tag}}" 25 | - "{{upstream_registry}}/openshift3/ose-cluster-autoscaler:{{registry_tag}}" 26 | - "{{upstream_registry}}/openshift3/ose-cluster-capacity:{{registry_tag}}" 27 | - "{{upstream_registry}}/openshift3/ose-cluster-monitoring-operator:{{registry_tag}}" 28 | - "{{upstream_registry}}/openshift3/ose-console:{{registry_tag}}" 29 | - "{{upstream_registry}}/openshift3/ose-configmap-reloader:{{registry_tag}}" 30 | - "{{upstream_registry}}/openshift3/ose-control-plane:{{registry_tag}}" 31 | - "{{upstream_registry}}/openshift3/ose-deployer:{{registry_tag}}" 32 | - "{{upstream_registry}}/openshift3/ose-descheduler:{{registry_tag}}" 33 | - "{{upstream_registry}}/openshift3/ose-docker-builder:{{registry_tag}}" 34 | - "{{upstream_registry}}/openshift3/ose-docker-registry:{{registry_tag}}" 35 | - "{{upstream_registry}}/openshift3/ose-efs-provisioner:{{registry_tag}}" 36 | - "{{upstream_registry}}/openshift3/ose-egress-dns-proxy:{{registry_tag}}" 37 | - "{{upstream_registry}}/openshift3/ose-egress-http-proxy:{{registry_tag}}" 38 | - "{{upstream_registry}}/openshift3/ose-egress-router:{{registry_tag}}" 39 | - "{{upstream_registry}}/openshift3/ose-haproxy-router:{{registry_tag}}" 40 | - "{{upstream_registry}}/openshift3/ose-hyperkube:{{registry_tag}}" 41 | - "{{upstream_registry}}/openshift3/ose-hypershift:{{registry_tag}}" 42 | - "{{upstream_registry}}/openshift3/ose-keepalived-ipfailover:{{registry_tag}}" 43 | - "{{upstream_registry}}/openshift3/ose-kube-rbac-proxy:{{registry_tag}}" 44 | - "{{upstream_registry}}/openshift3/ose-kube-state-metrics:{{registry_tag}}" 45 | - "{{upstream_registry}}/openshift3/ose-metrics-server:{{registry_tag}}" 46 | - "{{upstream_registry}}/openshift3/ose-node:{{registry_tag}}" 47 | - "{{upstream_registry}}/openshift3/ose-node-problem-detector:{{registry_tag}}" 48 | - "{{upstream_registry}}/openshift3/ose-operator-lifecycle-manager:{{registry_tag}}" 49 | - "{{upstream_registry}}/openshift3/ose-pod:{{registry_tag}}" 50 | - "{{upstream_registry}}/openshift3/ose-prometheus-config-reloader:{{registry_tag}}" 51 | - "{{upstream_registry}}/openshift3/ose-prometheus-operator:{{registry_tag}}" 52 | - "{{upstream_registry}}/openshift3/ose-recycler:{{registry_tag}}" 53 | - "{{upstream_registry}}/openshift3/ose-service-catalog:{{registry_tag}}" 54 | - "{{upstream_registry}}/openshift3/ose-template-service-broker:{{registry_tag}}" 55 | - "{{upstream_registry}}/openshift3/ose-web-console:{{registry_tag}}" 56 | - "{{upstream_registry}}/openshift3/postgresql-apb:{{registry_tag}}" 57 | - "{{upstream_registry}}/openshift3/registry-console:{{registry_tag}}" 58 | - "{{upstream_registry}}/openshift3/snapshot-controller:{{registry_tag}}" 59 | - "{{upstream_registry}}/openshift3/snapshot-provisioner:{{registry_tag}}" 60 | - "{{upstream_registry}}/rhel7/etcd:3.2.22" 61 | 62 | platform_infrastructure_optional: 63 | - "{{upstream_registry}}/openshift3/metrics-cassandra:{{registry_tag}}" 64 | - "{{upstream_registry}}/openshift3/metrics-hawkular-metrics:{{registry_tag}}" 65 | - "{{upstream_registry}}/openshift3/metrics-hawkular-openshift-agent:{{registry_tag}}" 66 | - "{{upstream_registry}}/openshift3/metrics-heapster:{{registry_tag}}" 67 | - "{{upstream_registry}}/openshift3/oauth-proxy:{{registry_tag}}" 68 | - "{{upstream_registry}}/openshift3/ose-logging-curator5:{{registry_tag}}" 69 | - "{{upstream_registry}}/openshift3/ose-logging-elasticsearch5:{{registry_tag}}" 70 | - "{{upstream_registry}}/openshift3/ose-logging-eventrouter:{{registry_tag}}" 71 | - "{{upstream_registry}}/openshift3/ose-logging-fluentd:{{registry_tag}}" 72 | - "{{upstream_registry}}/openshift3/ose-logging-kibana5:{{registry_tag}}" 73 | - "{{upstream_registry}}/openshift3/ose-metrics-schema-installer:{{registry_tag}}" 74 | - "{{upstream_registry}}/openshift3/prometheus:{{registry_tag}}" 75 | - "{{upstream_registry}}/openshift3/prometheus-alert-buffer:{{registry_tag}}" 76 | - "{{upstream_registry}}/openshift3/prometheus-alertmanager:{{registry_tag}}" 77 | - "{{upstream_registry}}/openshift3/prometheus-node-exporter:{{registry_tag}}" 78 | - "{{upstream_registry}}/cloudforms46/cfme-openshift-postgresql" 79 | - "{{upstream_registry}}/cloudforms46/cfme-openshift-memcached" 80 | - "{{upstream_registry}}/cloudforms46/cfme-openshift-app-ui" 81 | - "{{upstream_registry}}/cloudforms46/cfme-openshift-app" 82 | - "{{upstream_registry}}/cloudforms46/cfme-openshift-embedded-ansible" 83 | - "{{upstream_registry}}/cloudforms46/cfme-openshift-httpd" 84 | - "{{upstream_registry}}/cloudforms46/cfme-httpd-configmap-generator" 85 | - "{{upstream_registry}}/rhgs3/rhgs-server-rhel7" 86 | - "{{upstream_registry}}/rhgs3/rhgs-volmanager-rhel7" 87 | - "{{upstream_registry}}/rhgs3/rhgs-gluster-block-prov-rhel7" 88 | - "{{upstream_registry}}/rhgs3/rhgs-s3-server-rhel7" 89 | -------------------------------------------------------------------------------- /roles/bastion-repo/files/rclonefs: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | remote=$1 3 | mountpoint=$2 4 | shift 2 5 | 6 | # Process -o parameters 7 | while getopts :o: opts; do 8 | case $opts in 9 | o) 10 | params=${OPTARG//,/ } 11 | for param in $params; do 12 | if [ "$param" == "rw" ]; then continue; fi 13 | if [ "$param" == "ro" ]; then continue; fi 14 | if [ "$param" == "dev" ]; then continue; fi 15 | if [ "$param" == "suid" ]; then continue; fi 16 | if [ "$param" == "exec" ]; then continue; fi 17 | if [ "$param" == "auto" ]; then continue; fi 18 | if [ "$param" == "nodev" ]; then continue; fi 19 | if [ "$param" == "nosuid" ]; then continue; fi 20 | if [ "$param" == "noexec" ]; then continue; fi 21 | if [ "$param" == "noauto" ]; then continue; fi 22 | if [[ $param == x-systemd.* ]]; then continue; fi 23 | trans="$trans --$param" 24 | done 25 | ;; 26 | \?) 27 | echo "Invalid option: -$OPTARG" 28 | ;; 29 | esac 30 | done 31 | 32 | # exec rclone 33 | trans="$trans $remote $mountpoint" 34 | PATH=$PATH rclone mount $trans & 35 | 36 | out=`ls -l $dst` 37 | until [ "$out" != 'total 0' ]; do 38 | out=`ls -l $dst` 39 | sleep 1 40 | done 41 | -------------------------------------------------------------------------------- /roles/bastion-repo/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Update host file 2 | lineinfile: 3 | dest: /etc/hosts 4 | regexp: "{{ hostvars['bastion']['ansible_default_ipv4']['address'] }} bastion bastion.local" 5 | line: "{{ hostvars['bastion']['ansible_default_ipv4']['address'] }} bastion bastion.local" 6 | state: present 7 | 8 | - name: Install required installer packages for bastion (redhat) 9 | yum: 10 | name: "{{item}}" 11 | state: installed 12 | with_items: 13 | - yum-utils 14 | - createrepo 15 | - httpd.x86_64 16 | - docker-python.x86_64 17 | - python-virtualenv.noarch 18 | - fuse 19 | - docker 20 | 21 | - name: Enable and start httpd 22 | systemd: 23 | state: started 24 | name: httpd 25 | enabled: yes 26 | 27 | - name: Create repo directory 28 | file: 29 | path: /var/www/html/repo 30 | state: directory 31 | 32 | - name: Install rclone 33 | yum: 34 | name: "{{rclone_url}}" 35 | state: present 36 | 37 | - name: Create rclone config directory 38 | file: 39 | path: /root/.config/rclone 40 | state: directory 41 | 42 | - name: Create rclone config 43 | template: 44 | src: templates/rclone.conf.j2 45 | dest: /root/.config/rclone/rclone.conf 46 | 47 | - name: Copy rclonefs script 48 | copy: 49 | src: files/rclonefs 50 | dest: /bin/rclonefs 51 | mode: 0755 52 | 53 | - name: Create rclone systemd mount file 54 | template: 55 | src: templates/rclone-systemd.mount.j2 56 | dest: "/etc/systemd/system/{{swift_openshift_rhn_repo_mount[1:]| replace('/', '-')}}.mount" 57 | 58 | - name: Enable and start rclone systemd 59 | systemd: 60 | state: started 61 | daemon_reload: yes 62 | name: "{{swift_openshift_rhn_repo_mount[1:]| replace('/', '-')}}.mount" 63 | enabled: yes 64 | 65 | - name: Create python virual environment 66 | pip: 67 | name: certauth 68 | virtualenv: ~/certauth 69 | virtualenv_site_packages: yes 70 | 71 | - name: Allow httpd access to fuse mount 72 | seboolean: 73 | name: httpd_use_fusefs 74 | state: yes 75 | persistent: yes 76 | 77 | - name: Check if we generate certificates 78 | stat: 79 | path: "{{registry_config_dir}}/certs" 80 | register: certs 81 | 82 | - name: Create CA certificate 83 | shell: | 84 | source certauth/bin/activate 85 | certauth ./rootca.pem -c "Bastion CA" 86 | certauth ./rootca.pem --hostname "{{local_registry}}" -d ./certs_dir 87 | openssl x509 -in certs_dir/{{local_registry}}.pem > fullchain.pem 88 | openssl x509 -in rootca.pem > cacert.pem 89 | cat cacert.pem >> fullchain.pem 90 | openssl rsa -in certs_dir/{{local_registry}}.pem > privkey.pem 91 | mkdir -p {{registry_config_dir}}/certs 92 | cp privkey.pem {{registry_config_dir}}/certs 93 | cp fullchain.pem {{registry_config_dir}}/certs 94 | args: 95 | chdir: "{{ lookup('env','HOME') }}" 96 | when: certs.stat.exists == False 97 | 98 | - name: Copy cacert.pem to trusted 99 | copy: 100 | src: "{{ lookup('env','HOME') }}/cacert.pem" 101 | dest: /etc/pki/ca-trust/source/anchors/cacert.pem 102 | remote_src: yes 103 | register: cacert 104 | 105 | - name: Update CA trust 106 | command: update-ca-trust 107 | when: cacert.changed 108 | 109 | - name: Create docker registry container 110 | file: 111 | path: "{{registry_config_dir}}" 112 | state: directory 113 | mode: 0755 114 | 115 | - name: Create docker registry config file 116 | template: 117 | src: templates/registry-config.yml.j2 118 | dest: "{{registry_config_dir}}/config.yml" 119 | 120 | - name: Check if htpasswd is already present 121 | stat: 122 | path: "{{registry_config_dir}}/htpasswd" 123 | register: p 124 | 125 | - name: Start and enable docker 126 | systemd: 127 | name: docker 128 | state: restarted 129 | enabled: yes 130 | 131 | - name: Create docker registry user/pass 132 | shell: | 133 | docker run --entrypoint htpasswd registry:2 -Bbn {{registry_username}} {{ registry_password}} > {{registry_config_dir}}/htpasswd 134 | when: p.stat.exists == False 135 | become: yes 136 | 137 | - name: Run doocker registry 138 | docker_container: 139 | name: registry 140 | image: registry:2 141 | volumes: 142 | - "{{registry_config_dir}}/config.yml:/etc/docker/registry/config.yml:Z" 143 | - "{{registry_config_dir}}/htpasswd:/auth/htpasswd:Z" 144 | - "{{registry_config_dir}}/certs:/certs:Z" 145 | restart_policy: always 146 | published_ports: "{{ansible_default_ipv4.address}}:443:443" 147 | 148 | #- name: Add insecure registry 149 | # template: 150 | # src: templates/daemon.json.j2 151 | # dest: /etc/docker/daemon.json 152 | 153 | - name: Restart docker 154 | systemd: 155 | name: docker 156 | state: restarted 157 | enabled: yes 158 | 159 | - name: Log into upstream registry 160 | docker_login: 161 | registry: "{{upstream_registry}}" 162 | username: "{{rhn_username_or_org_id}}" 163 | password: "{{rhn_password_or_activation_key}}" 164 | 165 | - name: Log into private registry 166 | docker_login: 167 | registry: "{{local_registry}}" 168 | username: "{{registry_username}}" 169 | password: "{{registry_password}}" 170 | 171 | - name: Check if our registry already has images 172 | uri: 173 | user: "{{registry_username}}" 174 | password: "{{registry_password}}" 175 | url: "https://{{local_registry}}/v2/_catalog" 176 | register: registry_catalog 177 | 178 | - name: Docker images 179 | block: 180 | - name: pull an image 181 | docker_image: 182 | name: "{{item}}" 183 | with_items: 184 | - "{{platform_infrastructure}}" 185 | - "{{platform_infrastructure_optional}}" 186 | 187 | - name: Tag images with local registry 188 | command: docker tag "{{item}}" "{{item | replace(upstream_registry,local_registry)}}" 189 | with_items: 190 | - "{{platform_infrastructure}}" 191 | - "{{platform_infrastructure_optional}}" 192 | 193 | - name: Tag images with major tag 194 | command: docker tag "{{item | replace(upstream_registry,local_registry)}}" "{{item | replace(upstream_registry,local_registry) | replace(registry_tag,major_registry_tag)}}" 195 | with_items: 196 | - "{{platform_infrastructure}}" 197 | - "{{platform_infrastructure_optional}}" 198 | 199 | - name: Push docker images with minor version 200 | command: docker push "{{item | replace(upstream_registry,local_registry)}}" 201 | with_items: 202 | - "{{platform_infrastructure}}" 203 | - "{{platform_infrastructure_optional}}" 204 | 205 | - name: Push docker images with major version 206 | command: docker push "{{item | replace(upstream_registry,local_registry) | replace(registry_tag,major_registry_tag)}}" 207 | with_items: 208 | - "{{platform_infrastructure}}" 209 | - "{{platform_infrastructure_optional}}" 210 | 211 | when: "'openshift3/ose' not in registry_catalog.json.repositories" 212 | 213 | - name: Create reposync directory 214 | file: 215 | path: "{{ lookup('env','HOME') }}/repo" 216 | state: directory 217 | 218 | - name: Check if repositories already exist 219 | stat: 220 | path: "/var/www/html/repo/{{item}}" 221 | #state: directory 222 | with_items: "{{repos}}" 223 | register: bastion_repo_dirs 224 | 225 | - name: Sync all required repositories 226 | shell: | 227 | reposync -n -p repo/ -r {{item.item}} 228 | createrepo --update repo/{{item.item}} 229 | rclone sync repo/{{item.item}} Swift1://{{swift_rhn_repo_container_name}}/{{item.item}} 230 | rm -rf repo/{{item.item}} 231 | args: 232 | chdir: "{{ lookup('env','HOME') }}" 233 | with_items: "{{ bastion_repo_dirs.results }}" 234 | when: 235 | - not item | skipped 236 | - item.stat.exists == false 237 | become: yes 238 | register: new_data 239 | 240 | - name: Refresh rclone cache 241 | shell: | 242 | kill -SIGHUP $(pidof rclone) 243 | when: new_data.changed 244 | become: yes 245 | args: 246 | executable: /bin/bash 247 | -------------------------------------------------------------------------------- /roles/bastion-repo/templates/daemon.json.j2: -------------------------------------------------------------------------------- 1 | { "insecure-registries":["bastion:5000"] } 2 | -------------------------------------------------------------------------------- /roles/bastion-repo/templates/rclone-systemd.mount.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rclone mount for Swift1://{{swift_rhn_repo_container_name}} 3 | Requires=NetworkManager.service 4 | Wants=network-online.target 5 | After=network-online.target 6 | 7 | [Mount] 8 | What=rclonefs#Swift1://{{swift_rhn_repo_container_name}} 9 | Where={{swift_openshift_rhn_repo_mount}} 10 | Type=fuse 11 | Options=auto,config=/root/.config/rclone/rclone.conf,allow-other,default-permissions,rw,fast-list,cache-writes,vfs-cache-mode=writes,cache-dir=/tmp/rclone/vfs,cache-db-path=/tmp/rclone/cache 12 | TimeoutSec=30 13 | 14 | [Install] 15 | WantedBy=multi-user.target 16 | -------------------------------------------------------------------------------- /roles/bastion-repo/templates/rclone.conf.j2: -------------------------------------------------------------------------------- 1 | [Swift1] 2 | type = swift 3 | env_auth = false 4 | user = {{openstack_user}} 5 | key = {{openstack_passwd}} 6 | auth = {{openstack_auth_url}} 7 | user_id = 8 | domain = default 9 | tenant = {{openstack_project}} 10 | tenant_id = 11 | tenant_domain = default 12 | region = 13 | storage_url = 14 | auth_token = 15 | auth_version = 3 16 | endpoint_type = public 17 | -------------------------------------------------------------------------------- /roles/bastion-repo/templates/registry-config.yml.j2: -------------------------------------------------------------------------------- 1 | version: 0.1 2 | log: 3 | accesslog: 4 | disabled: false 5 | level: info 6 | formatter: text 7 | fields: 8 | service: registry 9 | environment: staging 10 | storage: 11 | swift: 12 | username: {{openstack_user}} 13 | password: {{openstack_passwd}} 14 | authurl: {{openstack_auth_url}} 15 | tenant: {{openstack_project}} 16 | domain: {{openstack_domain}} 17 | insecureskipverify: true 18 | container: {{swift_rhn_registry_container_name}} 19 | rootdirectory: / 20 | delete: 21 | enabled: true 22 | redirect: 23 | disable: false 24 | auth: 25 | htpasswd: 26 | realm: basic-realm 27 | path: /auth/htpasswd 28 | http: 29 | addr: 0.0.0.0:443 30 | prefix: / 31 | host: https://{{local_registry}}:443 32 | relativeurls: false 33 | secret: asecretforlocaldevelopment 34 | tls: 35 | certificate: /certs/fullchain.pem 36 | key: /certs/privkey.pem 37 | -------------------------------------------------------------------------------- /roles/docker-prep/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install Docker {{ docker_version }} 2 | package: 3 | name: docker-{{ docker_version }} 4 | state: installed 5 | 6 | - name: Perform yum update 7 | yum: 8 | name=* 9 | state=latest 10 | 11 | - name: Update /etc/sysconfig/docker 12 | lineinfile: 13 | dest: /etc/sysconfig/docker 14 | regexp: '^(.*)OPTIONS(.*)$' 15 | line: "OPTIONS='--insecure-registry=172.30.0.0/16 --selinux-enabled --log-driver=journald'" 16 | backrefs: yes 17 | 18 | - name: Configure Docker Storage 19 | blockinfile: | 20 | dest=/etc/sysconfig/docker-storage-setup 21 | backup=yes 22 | content="DEVS=/dev/vdb 23 | VG=docker-vg" 24 | 25 | - name: Run Docker Storage Setup 26 | command: docker-storage-setup 27 | 28 | - name: Enable and Start Docker 29 | systemd: 30 | name: docker 31 | enabled: yes 32 | state: started 33 | -------------------------------------------------------------------------------- /roles/ocp-inventory/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Setup OpenShift Enterprise inventory file 2 | template: 3 | src: templates/openshift-inventory-enterprise.j2 4 | dest: /home/{{ ssh_user }}/openshift-inventory 5 | owner: root 6 | group: root 7 | mode: 0755 8 | when: openshift_deployment == "openshift-enterprise" 9 | 10 | - name: Setup OpenShift OKD inventory file 11 | template: 12 | src: templates/openshift-inventory-origin.j2 13 | dest: /home/{{ ssh_user }}/openshift-inventory 14 | owner: root 15 | group: root 16 | mode: 0755 17 | when: openshift_deployment == "origin" 18 | 19 | - name: Configure masters dynamically 20 | lineinfile: dest=/home/{{ ssh_user }}/openshift-inventory 21 | line="{{ item }} server_type=\"master\"" 22 | insertafter="^#?\[masters\]" 23 | with_items: "{{ groups['masters'] }}" 24 | 25 | - name: Configure etcd nodes dynamically 26 | lineinfile: dest=/home/{{ ssh_user }}/openshift-inventory 27 | line="{{ item }} server_type=\"etcd\"" 28 | insertafter="^#?\[etcd\]" 29 | with_items: "{{ groups['masters'] }}" 30 | 31 | - name: Configure master nodes dynamically 32 | lineinfile: dest=/home/{{ ssh_user }}/openshift-inventory 33 | line="{{ item }} openshift_schedulable=True openshift_node_group_name='node-config-master-crio'" 34 | insertafter="^#?\[nodes\]" 35 | with_items: "{{ groups['masters'] }}" 36 | 37 | - name: Configure infra nodes dynamically 38 | lineinfile: dest=/home/{{ ssh_user }}/openshift-inventory 39 | line="{{ item }} openshift_schedulable=True openshift_node_group_name='node-config-infra-crio'" 40 | insertafter="^#?{{ master_regex }}" 41 | with_items: "{{ groups['infras'] }}" 42 | 43 | - name: Configure app nodes dynamically 44 | lineinfile: dest=/home/{{ ssh_user }}/openshift-inventory 45 | line="{{ item }} openshift_schedulable=True openshift_node_group_name='node-config-compute-crio'" 46 | insertafter="^#?{{ infra_regex }}" 47 | with_items: "{{ groups['nodes'] }}" 48 | -------------------------------------------------------------------------------- /roles/ocp-inventory/templates/openshift-inventory-enterprise.j2: -------------------------------------------------------------------------------- 1 | # Create an OSEv3 group that contains the masters and nodes groups 2 | [OSEv3:children] 3 | masters 4 | etcd 5 | nodes 6 | 7 | # Set variables common for all OSEv3 hosts 8 | [OSEv3:vars] 9 | # SSH user, this user should allow ssh based auth without requiring a password 10 | ansible_ssh_user={{ ssh_user }} 11 | ansible_become=yes 12 | 13 | # OpenShift Deployment, enterprise of course! 14 | openshift_deployment_type={{ openshift_deployment }} 15 | {% if not bastion_repo %} 16 | oreg_url=registry.redhat.io/openshift3/ose-${component}:${version} 17 | oreg_auth_user={{ rhn_username_or_org_id }} 18 | oreg_auth_password={{ rhn_password_or_activation_key }} 19 | {% else %} 20 | oreg_url={{local_registry}}/openshift3/ose-${component}:${version} 21 | oreg_auth_user={{registry_username}} 22 | oreg_auth_password={{registry_password}} 23 | openshift_examples_modify_imagestreams=true 24 | {% endif %} 25 | os_firewall_use_firewalld=true 26 | openshift_clock_enabled=true 27 | 28 | # Enable CRI-O + Docker 29 | openshift_use_crio=True 30 | openshift_use_crio_only=False 31 | openshift_crio_enable_docker_gc=True 32 | openshift_crio_var_sock='unix:///var/run/crio/crio.sock' 33 | 34 | # Set Domain for Apps 35 | openshift_master_default_subdomain=apps.{{ hostvars['infra0']['lb_infra'] }}.xip.io 36 | 37 | # default project node selector 38 | osm_default_node_selector='node-role.kubernetes.io/compute=true' 39 | 40 | # Native high availability cluster method with optional load balancer. 41 | openshift_master_cluster_hostname=openshift.{{ hostvars['master0']['lb_master'] }}.xip.io 42 | openshift_master_cluster_public_hostname=openshift.{{ hostvars['master0']['lb_master'] }}.xip.io 43 | openshift_override_hostname_check=true 44 | 45 | # Openstack 46 | openshift_cloudprovider_kind=openstack 47 | openshift_cloudprovider_openstack_auth_url={{ openstack_auth_url }} 48 | openshift_cloudprovider_openstack_username={{ openstack_user }} 49 | openshift_cloudprovider_openstack_password='"{{ openstack_passwd }}"' 50 | openshift_cloudprovider_openstack_domain_name=Default 51 | openshift_cloudprovider_openstack_tenant_name={{ openstack_project }} 52 | openshift_cloudprovider_openstack_region=RegionOne 53 | openshift_cloudprovider_openstack_blockstorage_version=v2 54 | openshift_cloudprovider_openstack_lb_subnet_id={{ service_subnet_id }} 55 | 56 | # Enable OpenStack Keystone authentication 57 | openshift_master_identity_providers=[{'name': 'keystone', 'login': 'true', 'challenge': 'true', 'mappingMethod': 'claim', 'kind': 'KeystonePasswordIdentityProvider', 'domainName': 'default', 'url': '{{ openstack_auth_url }}'}] 58 | 59 | # Httpasswd authentication, disabled 60 | #openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 61 | #openshift_master_htpasswd_users={'{{ openshift_user }}': '{{ openshift_passwd }}'} 62 | 63 | # Set networking to multi-tenant 64 | os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' 65 | 66 | # Configure SDN cluster network and kubernetes service CIDR blocks. These 67 | # network blocks should be private and should not conflict with network blocks 68 | # in your infrastructure that pods may require access to. Can not be changed 69 | # after deployment. 70 | osm_cluster_network_cidr=10.30.0.0/16 71 | openshift_portal_net=172.30.0.0/16 72 | osm_host_subnet_length=8 73 | 74 | # Disable disk and memory checks 75 | openshift_disable_check=disk_availability,memory_availability,docker_storage,package_availability,package_version,docker_image_availability 76 | 77 | # Deploy router 78 | openshift_hosted_manage_router=true 79 | openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' 80 | 81 | # Deploy Registry 82 | openshift_hosted_manage_registry=true 83 | openshift_registry_selector='node-role.kubernetes.io/infra=true' 84 | openshift_hosted_registry_replicas={{ registry_replicas }} 85 | 86 | # Deploy logging 87 | openshift_logging_install_logging=false 88 | openshift_logging_kibana_hostname=logging.apps.{{ hostvars['infra0']['lb_infra'] }}.xip.io 89 | openshift_logging_use_ops=false 90 | openshift_logging_master_url=https://kubernetes.default.svc.cluster.local 91 | openshift_logging_public_master_url=openshift.{{ hostvars['master0']['lb_master'] }}.xip.io 92 | openshift_logging_curator_default_days=7 93 | openshift_logging_es_memory_limit=1Gi 94 | openshift_logging_es_cpu_limit=250m 95 | openshift_logging_es_pvc_dynamic=true 96 | openshift_logging_es_pvc_size=10Gi 97 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra":"true"} 98 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} 99 | openshift_logging_fluentd_nodeselector={"fluentd":"true"} 100 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra":"true"} 101 | 102 | # Deploy Metrics 103 | openshift_metrics_install_metrics=false 104 | openshift_metrics_start_cluster=true 105 | openshift_metrics_hawkular_hostname=metrics.apps.{{ hostvars['infra0']['lb_infra'] }}.xip.io 106 | openshift_metrics_cassandra_limits_memory=1Gi 107 | openshift_metrics_cassandra_requests_cpu=250m 108 | openshift_metrics_hawkular_replicas=1 109 | openshift_metrics_hawkular_limits_memory=1Gi 110 | openshift_metrics_hawkular_limits_cpu=500m 111 | openshift_metrics_hawkular_requests_memory=768Mi 112 | openshift_metrics_hawkular_requests_cpu=250m 113 | openshift_metrics_duration=5 114 | openshift_metrics_cassandra_storage_type=dynamic 115 | openshift_metrics_cassandra_pvc_size=10Gi 116 | openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} 117 | openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra":"true"} 118 | openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra":"true"} 119 | openshift_metrics_heapster_requests_memory=256Mi 120 | openshift_metrics_heapster_limits_memory=500Mi 121 | 122 | # Deploy Prometheus 123 | openshift_cluster_monitoring_operator_install=true 124 | openshift_cluster_monitoring_operator_prometheus_storage_capacity=50Gi 125 | openshift_cluster_monitoring_operator_alertmanager_storage_capacity=2Gi 126 | openshift_cluster_monitoring_operator_node_selector={"node-role.kubernetes.io/infra":"true"} 127 | openshift_cluster_monitoring_operator_prometheus_storage_enabled=true 128 | openshift_cluster_monitoring_operator_alertmanager_storage_enabled=true 129 | 130 | # OpenShift Template Service Broker 131 | openshift_template_service_broker_namespaces=['openshift'] 132 | 133 | # Deploy Service Catalog 134 | openshift_enable_service_catalog=true 135 | 136 | # host group for masters 137 | [masters] 138 | 139 | # host group for etcd 140 | [etcd] 141 | 142 | # host group for nodes, includes region info 143 | [nodes] 144 | -------------------------------------------------------------------------------- /roles/ocp-inventory/templates/openshift-inventory-origin.j2: -------------------------------------------------------------------------------- 1 | # Create an OSEv3 group that contains the masters and nodes groups 2 | [OSEv3:children] 3 | masters 4 | etcd 5 | nodes 6 | 7 | # Set variables common for all OSEv3 hosts 8 | [OSEv3:vars] 9 | # SSH user, this user should allow ssh based auth without requiring a password 10 | ansible_ssh_user={{ ssh_user }} 11 | ansible_become=yes 12 | 13 | # OpenShift Deployment, not enterprise, you are a loser! 14 | openshift_deployment_type={{ openshift_deployment }} 15 | os_firewall_use_firewalld=true 16 | openshift_clock_enabled=true 17 | 18 | # Enable CRI-O + Docker 19 | openshift_use_crio=True 20 | openshift_use_crio_only=False 21 | openshift_crio_enable_docker_gc=True 22 | 23 | # OKD Additional repos in case not released yet to centos 24 | #openshift_additional_repos=[{'id': 'centos-okd-ci', 'name': 'centos-okd-ci', 'baseurl' :'https://rpms.svc.ci.openshift.org/openshift-origin-v3.11', 'gpgcheck' :'0', 'enabled' :'1'}] 25 | 26 | # Set Domain for Apps 27 | openshift_master_default_subdomain=apps.{{ hostvars['infra0']['lb_infra'] }}.xip.io 28 | 29 | # default project node selector 30 | osm_default_node_selector='node-role.kubernetes.io/compute=true' 31 | 32 | # Native high availability cluster method with optional load balancer. 33 | openshift_master_cluster_hostname=openshift.{{ hostvars['master0']['lb_master'] }}.xip.io 34 | openshift_master_cluster_public_hostname=openshift.{{ hostvars['master0']['lb_master'] }}.xip.io 35 | openshift_override_hostname_check=true 36 | 37 | # Openstack 38 | openshift_cloudprovider_kind=openstack 39 | openshift_cloudprovider_openstack_auth_url={{ openstack_auth_url }} 40 | openshift_cloudprovider_openstack_username={{ openstack_user }} 41 | openshift_cloudprovider_openstack_password='"{{ openstack_passwd }}"' 42 | openshift_cloudprovider_openstack_domain_name=Default 43 | openshift_cloudprovider_openstack_tenant_name={{ openstack_project }} 44 | openshift_cloudprovider_openstack_region=RegionOne 45 | openshift_cloudprovider_openstack_blockstorage_version=v2 46 | openshift_cloudprovider_openstack_lb_subnet_id={{ service_subnet_id }} 47 | 48 | # Enable OpenStack Keystone authentication 49 | openshift_master_identity_providers=[{'name': 'keystone', 'login': 'true', 'challenge': 'true', 'mappingMethod': 'claim', 'kind': 'KeystonePasswordIdentityProvider', 'domainName': 'default', 'url': '{{ openstack_auth_url }}'}] 50 | 51 | # Httpasswd authentication, disabled 52 | #openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 53 | #openshift_master_htpasswd_users={'{{ openshift_user }}': '{{ openshift_passwd }}'} 54 | 55 | # Set networking to multi-tenant 56 | os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' 57 | 58 | # Configure SDN cluster network and kubernetes service CIDR blocks. These 59 | # network blocks should be private and should not conflict with network blocks 60 | # in your infrastructure that pods may require access to. Can not be changed 61 | # after deployment. 62 | osm_cluster_network_cidr=10.30.0.0/16 63 | openshift_portal_net=172.30.0.0/16 64 | osm_host_subnet_length=8 65 | 66 | # Disable disk and memory checks 67 | openshift_disable_check=disk_availability,memory_availability,docker_storage,package_availability,package_version,docker_image_availability 68 | 69 | # Deploy router 70 | openshift_hosted_manage_router=true 71 | openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' 72 | 73 | # Deploy Registry 74 | openshift_hosted_manage_registry=true 75 | openshift_registry_selector='node-role.kubernetes.io/infra=true' 76 | openshift_hosted_registry_replicas={{ registry_replicas }} 77 | 78 | # Deploy logging 79 | openshift_logging_install_logging=false 80 | openshift_logging_kibana_hostname=logging.apps.{{ hostvars['infra0']['lb_infra'] }}.xip.io 81 | openshift_logging_use_ops=false 82 | openshift_logging_master_url=https://kubernetes.default.svc.cluster.local 83 | openshift_logging_public_master_url=openshift.{{ hostvars['master0']['lb_master'] }}.xip.io 84 | openshift_logging_curator_default_days=7 85 | openshift_logging_es_memory_limit=1Gi 86 | openshift_logging_es_cpu_limit=250m 87 | openshift_logging_es_pvc_dynamic=true 88 | openshift_logging_es_pvc_size=10Gi 89 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra":"true"} 90 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} 91 | openshift_logging_fluentd_nodeselector={"fluentd":"true"} 92 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra":"true"} 93 | 94 | # Deploy Metrics 95 | openshift_metrics_install_metrics=false 96 | openshift_metrics_start_cluster=true 97 | openshift_metrics_hawkular_hostname=metrics.apps.{{ hostvars['infra0']['lb_infra'] }}.xip.io 98 | openshift_metrics_cassandra_limits_memory=1Gi 99 | openshift_metrics_cassandra_requests_cpu=250m 100 | openshift_metrics_hawkular_replicas=1 101 | openshift_metrics_hawkular_limits_memory=1Gi 102 | openshift_metrics_hawkular_limits_cpu=500m 103 | openshift_metrics_hawkular_requests_memory=768Mi 104 | openshift_metrics_hawkular_requests_cpu=250m 105 | openshift_metrics_duration=5 106 | openshift_metrics_cassandra_storage_type=dynamic 107 | openshift_metrics_cassandra_pvc_size=10Gi 108 | openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} 109 | openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra":"true"} 110 | openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra":"true"} 111 | openshift_metrics_heapster_requests_memory=256Mi 112 | openshift_metrics_heapster_limits_memory=500Mi 113 | 114 | # Deploy Prometheus 115 | openshift_cluster_monitoring_operator_install=true 116 | openshift_cluster_monitoring_operator_prometheus_storage_capacity=50Gi 117 | openshift_cluster_monitoring_operator_alertmanager_storage_capacity=2Gi 118 | openshift_cluster_monitoring_operator_node_selector={"node-role.kubernetes.io/infra":"true"} 119 | openshift_cluster_monitoring_operator_prometheus_storage_enabled=true 120 | openshift_cluster_monitoring_operator_alertmanager_storage_enabled=true 121 | 122 | # OpenShift Template Service Broker 123 | openshift_template_service_broker_namespaces=['openshift'] 124 | 125 | # Deploy Service Catalog 126 | openshift_enable_service_catalog=true 127 | 128 | # host group for masters 129 | [masters] 130 | 131 | # host group for etcd 132 | [etcd] 133 | 134 | # host group for nodes, includes region info 135 | [nodes] 136 | -------------------------------------------------------------------------------- /roles/ocp-inventory/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | master_regex: "master{{ master_count - 1 }}.{{ domain_name }} openshift_schedulable=False" 3 | infra_regex: "infra{{ infra_count - 1 }}.{{ domain_name }} openshift_schedulable=True" 4 | -------------------------------------------------------------------------------- /roles/ocp-setup-project/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Set facts for free tier 2 | set_fact: 3 | num_pods: 4 4 | cpu_requests: 1 5 | cpu_limits: 2 6 | mem_requests: 1Gi 7 | mem_limits: 2Gi 8 | pod_max_cpu: 1 9 | pod_min_cpu: 100m 10 | pod_max_mem: 1Gi 11 | pod_min_mem: 100Mi 12 | container_max_cpu: 1 13 | container_min_cpu: 100m 14 | container_max_mem: 1Gi 15 | container_min_mem: 100Mi 16 | when: tier == "free" 17 | 18 | - name: Set facts for small tier 19 | set_fact: 20 | num_pods: 8 21 | cpu_requests: 2 22 | cpu_limits: 4 23 | mem_requests: 2Gi 24 | mem_limits: 4Gi 25 | pod_max_cpu: 2 26 | pod_min_cpu: 100m 27 | pod_max_mem: 2Gi 28 | pod_min_mem: 100Mi 29 | container_max_cpu: 2 30 | container_min_cpu: 100m 31 | container_max_mem: 2Gi 32 | container_min_mem: 100Mi 33 | when: tier == "small" 34 | 35 | - name: Set facts for medium tier 36 | set_fact: 37 | num_pods: 16 38 | cpu_requests: 4 39 | cpu_limits: 8 40 | mem_requests: 4Gi 41 | mem_limits: 8Gi 42 | pod_max_cpu: 4 43 | pod_min_cpu: 100m 44 | pod_max_mem: 4Gi 45 | pod_min_mem: 100Mi 46 | container_max_cpu: 4 47 | container_min_cpu: 100m 48 | container_max_mem: 4Gi 49 | container_min_mem: 100Mi 50 | when: tier == "medium" 51 | 52 | - name: Set facts for large tier 53 | set_fact: 54 | num_pods: 32 55 | cpu_requests: 8 56 | cpu_limits: 16 57 | mem_requests: 8Gi 58 | mem_limits: 16Gi 59 | pod_max_cpu: 8 60 | pod_min_cpu: 100m 61 | pod_max_mem: 8Gi 62 | pod_min_mem: 100Mi 63 | container_max_cpu: 8 64 | container_min_cpu: 100m 65 | container_max_mem: 8Gi 66 | container_min_mem: 100Mi 67 | when: tier == "large" 68 | 69 | - name: Authenticate to OpenShift via Token 70 | shell: oc login {{ url }} --insecure-skip-tls-verify=true --token {{ token }} 71 | 72 | - name: Create OpenShift Project 73 | shell: oc new-project {{ project }} 74 | when: new_project == "True" 75 | 76 | - name: Add users and roles to project 77 | shell: oc policy add-role-to-user {{ role }} {{ user }} -n {{ project }} 78 | 79 | - name: Copy Compute Quota Template to /tmp 80 | template: 81 | src: templates/compute-resources.j2 82 | dest: /tmp/compute-resources.yml 83 | mode: 0644 84 | 85 | - name: Copy Limit Range Template to /tmp 86 | template: 87 | src: templates/limit-ranges.j2 88 | dest: /tmp/limit-ranges.yml 89 | mode: 0644 90 | 91 | - name: Set Project Quota 92 | shell: oc create -f /tmp/compute-resources.yml -n {{ project }} 93 | when: new_project == "True" 94 | 95 | - name: Set Project Limit Ranges 96 | shell: oc create -f /tmp/limit-ranges.yml -n {{ project }} 97 | when: new_project == "True" 98 | 99 | - name: Label Project Namespace 100 | shell: oc label namespace {{ project }} box={{ label }} -n {{ project }} 101 | when: new_project == "True" 102 | -------------------------------------------------------------------------------- /roles/ocp-setup-project/templates/compute-resources.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: ResourceQuota 3 | metadata: 4 | name: compute-resources 5 | spec: 6 | hard: 7 | pods: "{{ num_pods }}" 8 | requests.cpu: "{{ cpu_requests }}" 9 | requests.memory: {{ mem_requests }} 10 | limits.cpu: "{{ cpu_limits }}" 11 | limits.memory: {{ mem_limits }} 12 | -------------------------------------------------------------------------------- /roles/ocp-setup-project/templates/limit-ranges.j2: -------------------------------------------------------------------------------- 1 | apiVersion: "v1" 2 | kind: "LimitRange" 3 | metadata: 4 | name: "resource-limits" 5 | spec: 6 | limits: 7 | - type: "Pod" 8 | max: 9 | cpu: "{{ pod_max_cpu }}" 10 | memory: "{{ pod_max_mem }}" 11 | min: 12 | cpu: "{{ pod_min_cpu }}" 13 | memory: "{{ pod_min_mem }}" 14 | - type: "Container" 15 | max: 16 | cpu: "{{ container_max_cpu }}" 17 | memory: "{{ container_max_mem }}" 18 | min: 19 | cpu: "{{ container_min_cpu }}" 20 | memory: "{{ container_min_mem }}" 21 | default: 22 | cpu: "300m" 23 | memory: "200Mi" 24 | defaultRequest: 25 | cpu: "200m" 26 | memory: "100Mi" 27 | maxLimitRequestRatio: 28 | cpu: "10" 29 | -------------------------------------------------------------------------------- /roles/osp-inventory/library/os_stack.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from ansible.module_utils.basic import * 4 | 5 | import subprocess 6 | 7 | 8 | def main(): 9 | module = AnsibleModule(argument_spec=dict( 10 | name=dict(required=True, type='str'), 11 | template=dict(required=True, type='str'), 12 | parameters=dict(required=False, type='dict'), 13 | )) 14 | 15 | stack_name = module.params.get('name') 16 | 17 | command = [ 18 | 'openstack', 'stack', 'create', 19 | '--wait', 20 | '-t', module.params.get('template') 21 | ] 22 | 23 | parameters = module.params.get('parameters') or {} 24 | 25 | for key, value in parameters.items(): 26 | command.append('--parameter') 27 | if isinstance(value, list): 28 | value = ",".join(value) 29 | command.append("{}={}".format(key, value)) 30 | 31 | command.append(stack_name) 32 | 33 | process = subprocess.Popen(command, 34 | stdout=subprocess.PIPE, 35 | stderr=subprocess.PIPE) 36 | stdout, stderr = process.communicate() 37 | exit_code = process.wait() 38 | if exit_code == 0: 39 | module.exit_json( 40 | msg="Stack '{}' deployed successfully.".format(stack_name), 41 | stdout=stdout, 42 | stderr=stderr, 43 | rc=exit_code, 44 | changed=True) 45 | else: 46 | module.fail_json( 47 | msg="Stack '{}' failed.".format(stack_name), 48 | stdout=stdout, 49 | stderr=stderr, 50 | rc=exit_code) 51 | 52 | if __name__ == '__main__': 53 | main() 54 | -------------------------------------------------------------------------------- /roles/osp-inventory/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check whether the stack exists already 2 | command: "openstack stack show {{ stack_name }}" 3 | register: stack_check 4 | 5 | - name: Register stack output 6 | command: > 7 | openstack stack output show -f json -c output_value 8 | {{ stack_name }} ip_address 9 | register: stack_output_raw 10 | 11 | - name: print debug 12 | debug: 13 | msg: "here {{ stack_output_raw.stdout }}" 14 | 15 | - set_fact: 16 | stack_output: "{{ (stack_output_raw.stdout|from_json).output_value }}" 17 | 18 | - name: Add the bastion to the inventory 19 | add_host: 20 | name: "{{ stack_output.bastion.name }}" 21 | groups: bastion,all 22 | zone: "{{ domain_name }}" 23 | domain_name: "{{ domain_name }}" 24 | contact: "{{ contact }}" 25 | ansible_user: "{{ ssh_user }}" 26 | ansible_ssh_host: "{{ stack_output.bastion.address }}" 27 | 28 | - name: Add the masters to the inventory 29 | add_host: 30 | name: "{{ item.name }}" 31 | groups: masters,all,openshift 32 | zone: "{{ domain_name }}" 33 | domain_name: "{{ domain_name }}" 34 | contact: "{{ contact }}" 35 | ansible_user: "{{ ssh_user }}" 36 | ansible_ssh_host: "{{ item.address }}" 37 | lb_master: "{{ stack_output.lb_master.address }}" 38 | with_items: "{{ stack_output.masters }}" 39 | 40 | - name: Add the infras to the inventory 41 | add_host: 42 | name: "{{ item.name }}" 43 | groups: infras,all,openshift 44 | zone: "{{ domain_name }}" 45 | domain_name: "{{ domain_name }}" 46 | contact: "{{ contact }}" 47 | ansible_user: "{{ ssh_user }}" 48 | ansible_ssh_host: "{{ item.address }}" 49 | lb_infra: "{{ stack_output.lb_infra.address }}" 50 | with_items: "{{ stack_output.infras }}" 51 | 52 | - name: Add the nodes to the inventory 53 | add_host: 54 | name: "{{ item.name }}" 55 | groups: nodes,all,openshift 56 | zone: "{{ domain_name }}" 57 | domain_name: "{{ domain_name }}" 58 | contact: "{{ contact }}" 59 | ansible_user: "{{ ssh_user }}" 60 | ansible_ssh_host: "{{ item.address }}" 61 | with_items: "{{ stack_output.nodes }}" 62 | -------------------------------------------------------------------------------- /roles/osp-setup-project/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create OpenStack Project 2 | shell: openstack project create {{ project }} 3 | register: openstack_project_output 4 | 5 | - name: Add OpenStack User 6 | shell: openstack user create --project {{ project }} --password {{ passwd }} {{ user }} 7 | when: openstack_project_output.rc == 0 8 | 9 | - name: Add Role to User 10 | shell: openstack role add --project {{ project }} --user {{ user }} {{ role }} 11 | -------------------------------------------------------------------------------- /roles/osp-stack-create/library/stack_create.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from ansible.module_utils.basic import * 4 | 5 | import subprocess 6 | 7 | 8 | def main(): 9 | module = AnsibleModule(argument_spec=dict( 10 | name=dict(required=True, type='str'), 11 | template=dict(required=True, type='str'), 12 | parameters=dict(required=False, type='dict'), 13 | )) 14 | 15 | stack_name = module.params.get('name') 16 | 17 | command = [ 18 | 'openstack', 'stack', 'create', 19 | '--wait', 20 | '-t', module.params.get('template') 21 | ] 22 | 23 | parameters = module.params.get('parameters') or {} 24 | 25 | for key, value in parameters.items(): 26 | command.append('--parameter') 27 | if isinstance(value, list): 28 | value = ",".join(value) 29 | command.append("{}={}".format(key, value)) 30 | 31 | command.append(stack_name) 32 | 33 | process = subprocess.Popen(command, 34 | stdout=subprocess.PIPE, 35 | stderr=subprocess.PIPE) 36 | stdout, stderr = process.communicate() 37 | exit_code = process.wait() 38 | if exit_code == 0: 39 | module.exit_json( 40 | msg="Stack '{}' deployed successfully.".format(stack_name), 41 | stdout=stdout, 42 | stderr=stderr, 43 | rc=exit_code, 44 | changed=True) 45 | else: 46 | module.fail_json( 47 | msg="Stack '{}' failed.".format(stack_name), 48 | stdout=stdout, 49 | stderr=stderr, 50 | rc=exit_code) 51 | 52 | if __name__ == '__main__': 53 | main() 54 | -------------------------------------------------------------------------------- /roles/osp-stack-create/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: validate-parameters.yml 2 | 3 | - name: Check if {{ stack_name }} stack exists 4 | command: "openstack stack show {{ stack_name }}" 5 | ignore_errors: yes 6 | register: stack_check 7 | 8 | - name: Create heat stack {{ stack_name }} 9 | stack_create: 10 | name: "{{ stack_name }}" 11 | template: "{{ heat_template_path }}" 12 | parameters: 13 | domain_name: "{{ domain_name }}" 14 | external_network: "{{ external_network }}" 15 | service_network: "{{ service_network }}" 16 | service_subnet: "{{ service_subnet_id }}" 17 | ssh_key_name: "{{ ssh_key_name }}" 18 | image: "{{ image }}" 19 | bastion_flavor: "{{ bastion_flavor }}" 20 | master_flavor: "{{ master_flavor }}" 21 | infra_flavor: "{{ infra_flavor }}" 22 | node_flavor: "{{ node_flavor }}" 23 | 24 | master_count: "{{ master_count }}" 25 | infra_count: "{{ infra_count }}" 26 | node_count: "{{ node_count }}" 27 | master_server_group_policies: "{{ master_server_group_policies }}" 28 | infra_server_group_policies: "{{ infra_server_group_policies }}" 29 | node_server_group_policies: "{{ node_server_group_policies }}" 30 | register: stack_output 31 | when: stack_check.rc != 0 32 | 33 | - name: Register stack output 34 | command: > 35 | openstack stack output show -f json -c output_value 36 | {{ stack_name }} ip_address 37 | register: stack_output_raw 38 | 39 | - set_fact: 40 | #stack_output: "{{ stack_output_raw.stdout|from_json }}" 41 | stack_output: "{{ stack_output_raw }}" 42 | 43 | #- name: Update openshift subnet in inventory file 44 | # lineinfile: 45 | # path: /etc/selinux/config 46 | # regexp: '^openshift_cloudprovider_openstack_lb_subnet_id=' 47 | # line: 'openshift_cloudprovider_openstack_lb_subnet_id={{ osp_subnet_id }}' 48 | # vars: 49 | # ansible_connection: local 50 | 51 | - name: Wait 300 seconds for port 22 to become open and contain "OpenSSH" 52 | wait_for: 53 | port: 22 54 | host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}' 55 | search_regex: OpenSSH 56 | delay: 10 57 | vars: 58 | ansible_connection: local 59 | -------------------------------------------------------------------------------- /roles/osp-stack-create/tasks/validate-parameters.yml: -------------------------------------------------------------------------------- 1 | - name: Validate masters 2 | assert: 3 | that: 4 | - (master_count == 1) or (master_count == 3) 5 | msg: "Master count is currently {{ master_count }} but must be 1 or 3" 6 | 7 | - name: Validate infras 8 | assert: 9 | that: 10 | - (infra_count >= 1 ) 11 | - (infra_count <= 3) 12 | msg: "Infra count s currently {{ infra_count }} but must be between 1 and 3" 13 | 14 | - name: Validate nodes 15 | assert: 16 | that: 17 | - (node_count >= 1 ) 18 | msg: "Node count {{ node_count }} must be >= 1" 19 | 20 | - name: Validate OpenShift HA 21 | assert: 22 | that: 23 | - (master_count == 3) and (infra_count > 1) 24 | msg: "OpenShift HA requires 3 masters and at least 2 infra nodes" 25 | when: openshift_ha 26 | 27 | #- name: Validate Registry HA 28 | # assert: 29 | # that: 30 | # - (registry_replicas > 1 ) 31 | # msg: "OpenShift HA requires at least 2 registry replicas" 32 | # when: openshift_ha 33 | -------------------------------------------------------------------------------- /roles/osp-stack-update/library/stack_update.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | from ansible.module_utils.basic import * 4 | 5 | import subprocess 6 | 7 | 8 | def main(): 9 | module = AnsibleModule(argument_spec=dict( 10 | name=dict(required=True, type='str'), 11 | template=dict(required=True, type='str'), 12 | parameters=dict(required=False, type='dict'), 13 | )) 14 | 15 | stack_name = module.params.get('name') 16 | 17 | command = [ 18 | 'openstack', 'stack', 'update', 19 | '--wait', 20 | '-t', module.params.get('template') 21 | ] 22 | 23 | parameters = module.params.get('parameters') or {} 24 | 25 | for key, value in parameters.items(): 26 | command.append('--parameter') 27 | if isinstance(value, list): 28 | value = ",".join(value) 29 | command.append("{}={}".format(key, value)) 30 | 31 | command.append(stack_name) 32 | 33 | process = subprocess.Popen(command, 34 | stdout=subprocess.PIPE, 35 | stderr=subprocess.PIPE) 36 | stdout, stderr = process.communicate() 37 | exit_code = process.wait() 38 | if exit_code == 0: 39 | module.exit_json( 40 | msg="Stack '{}' deployed successfully.".format(stack_name), 41 | stdout=stdout, 42 | stderr=stderr, 43 | rc=exit_code, 44 | changed=True) 45 | else: 46 | module.fail_json( 47 | msg="Stack '{}' failed.".format(stack_name), 48 | stdout=stdout, 49 | stderr=stderr, 50 | rc=exit_code) 51 | 52 | if __name__ == '__main__': 53 | main() 54 | -------------------------------------------------------------------------------- /roles/osp-stack-update/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - include_tasks: validate-parameters.yml 2 | 3 | - name: Check if {{ stack_name }} stack exists 4 | command: "openstack stack show {{ stack_name }}" 5 | register: stack_check 6 | 7 | - name: Update heat stack {{ stack_name }} 8 | stack_update: 9 | name: "{{ stack_name }}" 10 | template: "{{ heat_template_path }}" 11 | parameters: 12 | domain_name: "{{ domain_name }}" 13 | external_network: "{{ external_network }}" 14 | service_network: "{{ service_network }}" 15 | service_subnet: "{{ service_subnet_id }}" 16 | ssh_key_name: "{{ ssh_key_name }}" 17 | image: "{{ image }}" 18 | bastion_flavor: "{{ bastion_flavor }}" 19 | master_flavor: "{{ master_flavor }}" 20 | infra_flavor: "{{ infra_flavor }}" 21 | node_flavor: "{{ node_flavor }}" 22 | 23 | master_count: "{{ master_count }}" 24 | infra_count: "{{ infra_count }}" 25 | node_count: "{{ node_count }}" 26 | master_server_group_policies: "{{ master_server_group_policies }}" 27 | infra_server_group_policies: "{{ infra_server_group_policies }}" 28 | node_server_group_policies: "{{ node_server_group_policies }}" 29 | register: stack_output 30 | when: stack_check.rc == 0 31 | 32 | - name: Register stack output 33 | command: > 34 | openstack stack output show -f json -c output_value 35 | {{ stack_name }} ip_address 36 | register: stack_output_raw 37 | 38 | - set_fact: 39 | #stack_output: "{{ stack_output_raw.stdout|from_json }}" 40 | stack_output: "{{ stack_output_raw }}" 41 | 42 | #- name: Update openshift subnet in inventory file 43 | # lineinfile: 44 | # path: /etc/selinux/config 45 | # regexp: '^openshift_cloudprovider_openstack_lb_subnet_id=' 46 | # line: 'openshift_cloudprovider_openstack_lb_subnet_id={{ osp_subnet_id }}' 47 | # vars: 48 | # ansible_connection: local 49 | 50 | - name: Wait 300 seconds for port 22 to become open and contain "OpenSSH" 51 | wait_for: 52 | port: 22 53 | host: '{{ (ansible_ssh_host|default(ansible_host))|default(inventory_hostname) }}' 54 | search_regex: OpenSSH 55 | delay: 10 56 | vars: 57 | ansible_connection: local 58 | 59 | -------------------------------------------------------------------------------- /roles/osp-stack-update/tasks/validate-parameters.yml: -------------------------------------------------------------------------------- 1 | - name: Validate masters 2 | assert: 3 | that: 4 | - (master_count == 1) or (master_count == 3) 5 | msg: "Master count is currently {{ master_count }} but must be 1 or 3" 6 | 7 | - name: Validate infras 8 | assert: 9 | that: 10 | - (infra_count >= 1 ) 11 | - (infra_count <= 3) 12 | msg: "Infra count s currently {{ infra_count }} but must be between 1 and 3" 13 | 14 | - name: Validate nodes 15 | assert: 16 | that: 17 | - (node_count >= 1 ) 18 | msg: "Node count {{ node_count }} must be >= 1" 19 | 20 | - name: Validate OpenShift HA 21 | assert: 22 | that: 23 | - (master_count == 3) and (infra_count > 1) 24 | msg: "OpenShift HA requires 3 masters and at least 2 infra nodes" 25 | when: openshift_ha 26 | 27 | #- name: Validate Registry HA 28 | # assert: 29 | # that: 30 | # - (registry_replicas > 1 ) 31 | # msg: "OpenShift HA requires at least 2 registry replicas" 32 | # when: openshift_ha 33 | -------------------------------------------------------------------------------- /roles/redhat_subscription/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: check if host already registered 2 | command: subscription-manager list --available --match-installed --matches=*Openshift* --pool-only 3 | register: subs_result 4 | failed_when: false 5 | changed_when: "'This system is not yet registered' in subs_result.stderr" 6 | 7 | - name: Subscribe to Red Hat subscription using username 8 | redhat_subscription: 9 | state: present 10 | username: "{{ rhn_username_or_org_id }}" 11 | password: "{{ rhn_password_or_activation_key }}" 12 | pool: "{{ rhn_pool }}" 13 | #force_register: yes 14 | register: subscribe_user 15 | retries: 5 16 | delay: 5 17 | when: subscription_use_username == True and subs_result.changed 18 | 19 | - name: Subscribe to Red Hat using activation key 20 | redhat_subscription: 21 | state: present 22 | org_id: "{{ rhn_username_or_org_id }}" 23 | activationkey: "{{ rhn_password_or_activation_key }}" 24 | pool: "{{ rhn_pool }}" 25 | #force_register: yes 26 | register: subscribe_activation_key 27 | retries: 5 28 | delay: 5 29 | when: subscription_use_username == False and subs_result.changed 30 | 31 | - debug: 32 | msg: "username subscription {{ subscribe_user.changed }}" 33 | 34 | - debug: 35 | msg: "username subscription {{ subscribe_activation_key.changed }}" 36 | 37 | - name: Disable all repos 38 | shell: | 39 | subscription-manager repos --disable=* 40 | retries: 5 41 | delay: 5 42 | when: subscribe_user.changed or subscribe_activation_key.changed 43 | 44 | - name: Enable correct repos 45 | command: "subscription-manager repos --enable={{item}}" 46 | when: subscribe_user.changed or subscribe_activation_key.changed 47 | retries: 5 48 | delay: 5 49 | with_items: "{{repos}}" 50 | -------------------------------------------------------------------------------- /sample_vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ### OpenStack Setting ### 3 | openstack_user: admin 4 | openstack_passwd: 5 | openstack_auth_url: 6 | openstack_project: 7 | domain_name: ocp3.lab 8 | external_network: public 9 | service_network: mgmt 10 | service_subnet_id: 718f3675-55b9-498a-8c7b-e4eb2e41dde4 11 | image: rhel74 12 | ssh_user: cloud-user 13 | ssh_key_path: /root/admin.pem 14 | ssh_key_name: admin 15 | stack_name: openshift 16 | openstack_release: queens 17 | openstack_version: "13" 18 | contact: admin@ocp3.lab 19 | heat_template_path: /root/openshift-on-openstack-123/heat/openshift_single_lbaas.yaml 20 | 21 | ### OpenShift Settings ### 22 | ### Deployment can be openshift-enterprise or origin for OKD ### 23 | openshift_deployment: openshift-enterprise 24 | openshift_repoversion: "311" 25 | openshift_version: "3.11" 26 | docker_version: "1.13.1" 27 | openshift_ha: false 28 | registry_replicas: 1 29 | openshift_user: admin 30 | openshift_passwd: 31 | 32 | ### Red Hat Subscription ### 33 | ### This is only needed for openshift-enterprise ### 34 | subscription_use_username: True 35 | rhn_username_or_org_id: 36 | rhn_password_or_activation_key: 37 | rhn_pool: 38 | 39 | ### OpenStack Instance Count ### 40 | master_count: 1 41 | infra_count: 1 42 | node_count: 3 43 | 44 | ### OpenStack Instance Group Policies ### 45 | ### Set to 'anti-affinity' if running on multiple compute node ### 46 | master_server_group_policies: "['soft-anti-affinity']" 47 | infra_server_group_policies: "['soft-anti-affinity']" 48 | node_server_group_policies: "['soft-anti-affinity']" 49 | 50 | ### OpenStack Instance Flavors ### 51 | ### These flavors need to exist in OpenStack ### 52 | bastion_flavor: ocp.bastion 53 | master_flavor: ocp.master 54 | infra_flavor: ocp.infra 55 | node_flavor: ocp.node 56 | 57 | ### Disconnected Installation Settings ### 58 | ### Only needed if no access to internet ### 59 | ### If access to internet ignore below settings ### 60 | bastion_repo: False 61 | rhn_local_repo: False 62 | rhn_local_repo_base_url: http:// 63 | 64 | ### Bastion repo for disconnected install ### 65 | upstream_registry: registry.redhat.io 66 | local_registry: bastion.local 67 | registry_tag: v3.11.16 68 | major_registry_tag: "v{{openshift_version}}" 69 | rclone_url: https://downloads.rclone.org/v1.44/rclone-v1.44-linux-amd64.rpm 70 | 71 | ### Required swift containers for disconnected install ### 72 | swift_rhn_repo_container_name: openshift_rhn_repo 73 | swift_rhn_registry_container_name: openshift_rhn_registry 74 | swift_openshift_rhn_repo_mount: /var/www/html/repo 75 | 76 | ### Bastion local registry details for disconnected install ### 77 | registry_config_dir: /registry 78 | registry_username: docker 79 | registry_password: registry 80 | -------------------------------------------------------------------------------- /update-openstack-infra.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - name: Deploy the OpenShift Cluster Infrastructure 4 | hosts: localhost 5 | connection: local 6 | become: no 7 | gather_facts: no 8 | environment: 9 | OS_USERNAME: "{{ openstack_user }}" 10 | OS_PASSWORD: "{{ openstack_passwd }}" 11 | OS_AUTH_URL: "{{ openstack_auth_url }}" 12 | OS_PROJECT_NAME: "{{ openstack_project }}" 13 | OS_USER_DOMAIN_NAME: Default 14 | OS_PROJECT_DOMAIN_NAME: Default 15 | OS_IDENTITY_API_VERSION: 3 16 | OS_INTERFACE: public 17 | 18 | tasks: 19 | - name: Include vars.yml 20 | include_vars: 21 | file: vars.yml 22 | 23 | - name: Set ansible_ssh_private_key_file 24 | set_fact: 25 | ansible_ssh_private_key_file: "{{ ssh_key_path }}" 26 | 27 | - import_role: 28 | name: osp-stack-update 29 | 30 | - import_role: 31 | name: osp-inventory 32 | 33 | - name: Configure OpenStack Client on Bastion 34 | hosts: bastion 35 | become: true 36 | 37 | tasks: 38 | - import_role: 39 | name: bastion-prep 40 | 41 | 42 | --------------------------------------------------------------------------------