├── LICENSE.txt ├── README.adoc ├── README_bugs.adoc ├── README_debugging.adoc ├── bastion.yaml ├── collect-config-setup ├── fragments │ ├── configure_config_agent.sh │ ├── install_config_agent_yum.sh │ └── start_config_agent.sh └── install_config_agent_centos_yum.yaml ├── customize-disk-image ├── env_aop.yaml ├── env_flannel.yaml ├── env_ipfailover_keepalived.yaml ├── env_ipfailover_none.yaml ├── env_ldap.yaml ├── env_loadbalancer_dedicated.yaml ├── env_loadbalancer_external.yaml ├── env_loadbalancer_neutron.yaml ├── env_loadbalancer_none.yaml ├── env_origin.yaml ├── env_registry_ephemeral.yaml ├── env_registry_persistent.yaml ├── env_sdn_flannel.yaml ├── fragments ├── add_dns_record.sh ├── bastion-ansible.sh ├── bastion-boot.sh ├── bastion-node-add.sh ├── bastion-node-cleanup.sh ├── ca_cert.sh ├── common_functions.sh ├── common_openshift_functions.sh ├── host-update.sh ├── ifcfg-eth ├── infra-boot.sh ├── lb-boot.sh ├── master-boot.sh ├── merge_dict.py ├── node-boot.sh ├── retry.sh ├── rhn-register.sh ├── set-extra-docker-repos.sh ├── set-extra-repos.sh ├── tune-ansible.sh └── update_dns.py ├── graphics ├── architecture.png └── architecture.svg ├── heat-docker-agent ├── Dockerfile └── configure_container_agent.sh ├── infra.yaml ├── ipfailover_keepalived.yaml ├── ipfailover_none.yaml ├── loadbalancer_dedicated.yaml ├── loadbalancer_external.yaml ├── loadbalancer_neutron.yaml ├── loadbalancer_none.yaml ├── master.yaml ├── node.yaml ├── openshift-on-openstack.spec ├── openshift.yaml ├── registry_ephemeral.yaml ├── registry_persistent.yaml ├── sdn_flannel.yaml ├── sdn_openshift_sdn.yaml ├── templates └── var │ └── lib │ └── ansible │ ├── group_vars │ ├── OSv3.yml │ ├── masters.yml │ └── nodes.yml │ ├── host_vars │ └── loadbalancer.yml │ ├── inventory │ ├── playbooks │ ├── flannel.yml │ ├── haproxy.yml │ ├── ipfailover.yml │ ├── main.yml │ ├── quota.yml │ ├── registry.yml │ ├── scaledown.yml │ └── scaleup.yml │ ├── roles │ ├── fstab_mount_options │ │ └── tasks │ │ │ └── main.yml │ ├── reboot │ │ └── tasks │ │ │ └── main.yml │ └── xfs_grub_quota │ │ └── tasks │ │ └── main.yml │ └── templates │ └── etc │ └── haproxy │ └── router.cfg.j2 ├── tests ├── filter_plugins │ └── oo_filters.py ├── main.yml ├── roles │ ├── checkstatus │ │ └── tasks │ │ │ └── main.yml │ └── deploy │ │ └── tasks │ │ └── main.yml └── scaling.yml ├── volume_attachment_docker.yaml ├── volume_attachment_noop.yaml ├── volume_docker.yaml └── volume_noop.yaml /LICENSE.txt: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README_bugs.adoc: -------------------------------------------------------------------------------- 1 | = Known Bugs 2 | 3 | == Stack creation times out 4 | 5 | By default heat client timeout is set to 60 minutes, if a stack operation 6 | doesn't finish in the specified timeframe Heat sets the stack in FAILED state. 7 | In some cases timeout is caused by a reason which can not be fixed (e.g. slow 8 | hardware or network). Then it's possible to increase timeout and give Heat 9 | more time to finish a stack operation. You can increase timeout by passing 10 | `-t ` when running `heat stack-create` command. 11 | 12 | Also make sure you increase keystone token timeout in `/etc/keystone/keystone.conf`: 13 | 14 | ``` 15 | [token] 16 | expiration = # e.g. 7200 for two hours 17 | ``` 18 | 19 | And restart keystone service. Otherwise token expires sooner than heat 20 | finishes openshift-ansible run and fails to create some resources (e.g. 21 | ceilometer alarms). 22 | 23 | 24 | == Stack is stuck in DELETE_IN_PROGRESS 25 | 26 | It should not take more than few minutes to delete a stack. There is couple of 27 | operations done for each node on delete: 28 | 29 | * existing pods are evacuated from OpenShift nodes 30 | * each node is unregistered with subscription-manager 31 | 32 | These operations are represented by `deployment_*_cleanup` Software Deployments 33 | in heat templates for each type of node and are executed by 34 | `os-collect-config` service. Heat waits for finishing cleanup script 35 | on each node until heat-agent sends signal back to Heat that the script has 36 | finished. If the script is not executed or gets stuck for some reason (e.g. 37 | os-collect-config service is not running on `bastion` host or 38 | `bastion` host has been deleted), then Heat timeouts 39 | when waiting on the signal from heat-agent. You can check 40 | `journalctl -u os-collect-config` on `bastion` host for details, if it doesn't 41 | help you can manually send signal for the hanging resource: 42 | 43 | ``` 44 | $ # check what resources are in progress 45 | $ heat resource-list -n 2 mystack|grep PROGRESS 46 | | openshift_infra_nodes | 9d7ba190-81dc-4ea2-b99e-627ac045fb92 | OS::Heat::ResourceGroup | DELETE_IN_PROGRESS | 2016-09-29T01:58:05 | mystack | 47 | | openshift_masters | 2f90c3a1-ac7c-4314-aa99-c9dd77062f26 | OS::Heat::ResourceGroup | DELETE_IN_PROGRESS | 2016-09-29T01:58:05 | mystack | 48 | | 0 | 867696e5-cbcc-4b1d-82f7-f5a94ae6f6f2 | file:///root/openshift-on-openstack/master.yaml | DELETE_IN_PROGRESS | 2016-09-29T02:16:01 | mystack-openshift_masters-bum3pew2mbex | 49 | | 0 | d87be046-5b90-4939-9492-076e6e82c31c | file:///root/openshift-on-openstack/infra.yaml | DELETE_IN_PROGRESS | 2016-09-29T02:16:03 | mystack-openshift_infra_nodes-v4p5h5354avk | 50 | | deployment_bastion_node_cleanup | 9b76a4af-d35d-4777-8236-88609a4cd2f0 | OS::Heat::SoftwareDeployment | DELETE_IN_PROGRESS | 2016-09-29T02:16:06 | mystack-openshift_masters-bum3pew2mbex-0-jfs2liw5l3yq | 51 | | deployment_bastion_node_cleanup | 18849c7e-b9fc-4f49-a4ce-5bce5f0b52a4 | OS::Heat::SoftwareDeployment | DELETE_IN_PROGRESS | 2016-09-29T02:16:07 | mystack-openshift_infra_nodes-v4p5h5354avk-0-a3ieqspeho2m | 52 | ``` 53 | 54 | In the above output we can see two deployment_bastion_node_cleanup resources. 55 | We can send signal manually by: 56 | 57 | ``` 58 | $ # heat resource-signal 59 | $ heat resource-signal mystack-openshift_infra_nodes-v4p5h5354avk-0-a3ieqspeho2m deployment_bastion_node_cleanup 60 | $ heat resource-signal mystack-openshift_masters-bum3pew2mbex-0-jfs2liw5l3yq deployment_bastion_node_cleanup 61 | ``` 62 | 63 | This command works when the stack is in DELETE_IN_PROGRESS state. If it's in 64 | DELETE_FAILED then run `heat stack-delete mystack` at first. 65 | -------------------------------------------------------------------------------- /README_debugging.adoc: -------------------------------------------------------------------------------- 1 | = Debugging 2 | 3 | == Deployment flow 4 | 5 | Heat templates for OpenShift deployment are quite complex and many resources 6 | are created during the deployment. 7 | `heat resource-list -n 2 oshift|grep PROGRESS` command can be used to monitor 8 | progress of the deployment. Resources are created in this order (very 9 | high-level flow): 10 | 11 | * neutron networks/subnets/router 12 | * ceilometer alarms 13 | * bastion node 14 | * master nodes 15 | * load balancer 16 | * openshift nodes 17 | * openshift-ansible 18 | 19 | It usually takes 30-60 minutes for creating heat stack (COMPLETE state) 20 | depending on number of nodes and OpenStack environment. Then the stack should 21 | either in COMPLETE or FAILED state. All nodes should be up and running in 22 | 30 minutes - you should see all of them with `nova list` command. If you see 23 | only bastion node or master nodes for a long time it usually means that 24 | cloud-init on some node failed. 25 | 26 | == Heat stack 27 | 28 | If you hit FAILED state then in most cases 29 | you can find the issue by inspecting stack events and resources: 30 | 31 | ```bash 32 | $ heat event-list -n 2 oshift 33 | ... 34 | | 2 | bec1a588-f74c-4f92-adff-dd389b37258e | OverLimit: resources[2].resources.docker_volume: VolumeSizeExceedsAvailableQuota: Requested volume or snapshot exceeds allowed gigabytes quota. Requested 600G, quota is 1000G and 629G has been consumed. (HTTP 413) (Request-ID: req-602fa6f8-c796-433b-9288- | CREATE_FAILED | 2016-05-18T11:58:12 | oshift-openshift_masters-ug2z3lp2cvni | 35 | ``` 36 | 37 | We can see that the deployment failed in this case because of exceeded volume 38 | quota. 39 | 40 | Another handy command which can tell us what went wrong is checking heat 41 | resources: 42 | 43 | ```bash 44 | $ heat resource-list -n 2 atomic|grep FAIL 45 | | openshift_nodes | 6f7fe203-b4ae-4fd5-8327-f5f56c232805 | OS::Heat::AutoScalingGroup | CREATE_FAILED | 2016-05-18T10:53:07 | atomic | 46 | | rwgtfhmcflar | 5ebddb44-85e3-4373-a6a6-0730cf04ecbc | file:///root/openshift-on-openstack/node.yaml | CREATE_FAILED | 2016-05-18T10:57:54 | atomic-openshift_nodes-7alhsm5n4gqz | 47 | | deployment_run_ansible | f031008f-11d6-4d54-b88b-699cee85b1da | OS::Heat::SoftwareDeployment | CREATE_FAILED | 2016-05-18T10:57:57 | atomic-openshift_nodes-7alhsm5n4gqz-rwgtfhmcflar-lgghn54kifhm | 48 | ``` 49 | 50 | In this case we can see that there are three FAILED resources, the important 51 | is `deployment_run_ansible`. Both `rwgtfhmcflar` and `openshift_nodes` 52 | are parent resources of `deployment_run_ansible` and if a nested resource fails 53 | parent resources automatically fail too. 54 | 55 | Because `deployment_run_ansible` is `OS::Heat::SoftwareDeployment` we can use 56 | another heat command to inspect the resource (resource unique ID from the second column 57 | in the listing above is used in this case): 58 | ```bash 59 | $ heat deployment-show f031008f-11d6-4d54-b88b-699cee85b1da 60 | ... 61 | "output_values": { 62 | "deploy_stdout": "", 63 | "deploy_stderr": " 64 | ... 65 | Failed to run 'ansible-playbook -vvvv --inventory /var/lib/ansible/inventory /var/lib/ansible/playbooks/main.yml', full log is in atomic-bastion.example.com:/var/log/ansible.11656\n+ exit 1\n 66 | ", 67 | "deploy_status_code": 1 68 | }, 69 | "creation_time": "2016-05-18T10:59:02", 70 | "updated_time": "2016-05-18T11:12:54", 71 | "input_values": {}, 72 | "action": "CREATE", 73 | "status_reason": "deploy_status_code : Deployment exited with non-zero status code: 1", 74 | "id": "f031008f-11d6-4d54-b88b-699cee85b1da" 75 | } 76 | ``` 77 | 78 | From the output we found out that an error occurred when running openshift-ansible 79 | and we can find more details in openshift-ansible logfile (on bastion host). 80 | 81 | Note that we used `-n 2` parameter in all heat commands, the reason is that 82 | we use nested stacks. 83 | 84 | == Cloud-init 85 | 86 | Cloud-init scripts run when a node boots up, it contains quite a lot of logic: 87 | basic config files, subcribing node into RHN, updating/installing basic RPM 88 | packages... It might happen that something goes wrong during this process, 89 | in most cases Heat is notified about the error and heat stack is marked as 90 | FAILED immediately. Then you can usually find the issue by using heat commands 91 | above. In some cases the failure may not be reported back to Heat and then 92 | heat stack remains in IN_PROGRESS state until in times out (and then FAILED 93 | state is set), default timeout is set to 60 minutes. 94 | 95 | In that case you can find the error in `/var/log/cloud-init.log` on the failed 96 | node. 97 | 98 | == Os-collect-config 99 | 100 | During cloud-init setup os-collect-config tool is installed on bastion node. 101 | Os-collect-config polls Heat server for new metadata and runs a script if 102 | metadata change. 103 | 104 | Os-collect-config supports various sources for fetching metadata, so you can 105 | see warnings like this in your logs on bastion node: 106 | 107 | ```bash 108 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.586 19193 WARNING os-collect-config [-] Source [request] Unavailable. 109 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.586 19193 WARNING os_collect_config.local [-] /var/lib/os-collect-config/local-data not found. Skipping 110 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.587 19193 WARNING os_collect_config.local [-] No local metadata found (['/var/lib/os-collect-config/local-data']) 111 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.587 19193 WARNING os_collect_config.zaqar [-] No auth_url configured. 112 | ``` 113 | 114 | These are not important as we use heat-api-cfn service, what is important are 115 | errors related to cfn: 116 | 117 | ```bash 118 | May 18 13:09:17 localhost cloud-init: 2016-05-18 13:09:17.476 18977 INFO os_collect_config.cfn [-] No metadata_url configured. 119 | 120 | ``` 121 | 122 | At first you can check if correct metadata URL is set in 123 | `/etc/os-collect-config.conf`: 124 | 125 | ```bash 126 | [root@flannel-bastion ~]# cat /etc/os-collect-config.conf 127 | [DEFAULT] 128 | command = os-refresh-config 129 | 130 | [cfn] 131 | metadata_url = http://10.16.66.83:8000/v1/ 132 | stack_name = flannel-bastion_host-25ppv7is7f7q 133 | secret_access_key = bf9ada46ac8d43ada7a498e30d5b7bf8 134 | access_key_id = 5bf23697b1f4407286f3079778631316 135 | path = host.Metadata 136 | 137 | ``` 138 | 139 | And if you can GET the URL: 140 | ```bash 141 | [root@flannel-bastion ~]# curl http://10.16.66.83:8000/ 142 | {"versions": [{"status": "CURRENT", "id": "v1.0", "links": [{"href": "http://10.16.66.83:8000/v1/", "rel": "self"}]}]} 143 | ``` 144 | 145 | If metadata fetching works you should see similar lines in journalctl: 146 | ```bash 147 | [root@flannel-bastion ~]# journalctl -u os-collect-config 148 | 149 | -- Logs begin at Wed 2016-05-18 13:04:51 UTC, end at Wed 2016-05-18 13:19:05 UTC. -- 150 | May 18 13:09:55 flannel-bastion systemd[1]: Started Collect metadata and run hook commands.. 151 | May 18 13:09:55 flannel-bastion systemd[1]: Starting Collect metadata and run hook commands.... 152 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.586 19193 WARNING os-collect-config [-] Source [request] Unavailable. 153 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.586 19193 WARNING os_collect_config.local [-] /var/lib/os-collect-config/local-data not found. Skipping 154 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.587 19193 WARNING os_collect_config.local [-] No local metadata found (['/var/lib/os-collect-config/local-data']) 155 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: 2016-05-18 13:09:59.587 19193 WARNING os_collect_config.zaqar [-] No auth_url configured. 156 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: WARNING:root:Base directory /opt/stack/os-config-refresh is deprecated. The recommended base directory is /usr/libexec/os-refresh-con 157 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: [2016-05-18 13:09:59,640] (os-refresh-config) [INFO] Starting phase pre-configure 158 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: INFO:os-refresh-config:Starting phase pre-configure 159 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: ----------------------- PROFILING ----------------------- 160 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: Target: pre-configure.d 161 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: Script Seconds 162 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: --------------------------------------- ---------- 163 | May 18 13:09:59 flannel-bastion os-collect-config[19193]: --------------------- END PROFILING --------------------- 164 | ``` 165 | 166 | If a SoftwareDeployment script failed, you can fine the error in journalctl too: 167 | 168 | ```bash 169 | Failed to run 'ansible-playbook -vvvv --inventory /var/lib/ansible/inventory /var/lib/ansible/playbooks/main.yml', full log is in atomic-bastion.example.com:/var/log/ansible.11656 170 | + exit 1 171 | 172 | [2016-05-18 11:12:52,787] (heat-config) [ERROR] Error running /var/lib/heat-config/heat-config-script/ee5bdeea-5991-4f27-aaba-786efeaa2b2b. [1] 173 | ``` 174 | -------------------------------------------------------------------------------- /bastion.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | 4 | description: > 5 | A host used for running openshift-ansible playbooks and other optional services. 6 | 7 | 8 | parameters: 9 | 10 | node_count: 11 | type: number 12 | description: > 13 | Number of non-master nodes to create. 14 | 15 | # What version of OpenShift Container Platform to install 16 | # This value is used to select the RPM repo for the OCP release to install 17 | ocp_version: 18 | type: string 19 | description: > 20 | The version of OpenShift Container Platform to deploy 21 | 22 | # What version of OpenStack Platform to install 23 | # This value is used to select the RPM repo for the OSP release to install 24 | osp_version: 25 | type: string 26 | default: "10" 27 | description: > 28 | The version of OpenStack Platform to use to collect data 29 | 30 | # Allow the caller to specify the version of ansible 31 | ansible_version: 32 | type: string 33 | description: > 34 | Set the RPM version of Ansible that will run on the bastion 35 | If unset, use current version 36 | 37 | key_name: 38 | description: > 39 | A pre-submitted SSH key to access the VM hosts 40 | type: string 41 | constraints: 42 | - custom_constraint: nova.keypair 43 | 44 | image: 45 | description: > 46 | Select a base image to use for the bastion server 47 | type: string 48 | constraints: 49 | - custom_constraint: glance.image 50 | 51 | flavor: 52 | description: > 53 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 54 | type: string 55 | constraints: 56 | - custom_constraint: nova.flavor 57 | 58 | hostname: 59 | description: > 60 | The Infrastructure hostname portion of the FQDN 61 | type: string 62 | constraints: 63 | - allowed_pattern: '[a-z0-9\-]*' 64 | description: Hostname must contain only characters [a-z0-9\-]. 65 | 66 | domain_name: 67 | description: > 68 | All VMs will be placed in this domain 69 | type: string 70 | 71 | # Software Subscription information 72 | rhn_username: 73 | description: > 74 | A valid user with entitlements to RHEL and OpenShift software repos 75 | type: string 76 | 77 | rhn_password: 78 | description: > 79 | The password for the RHN user 80 | type: string 81 | hidden: true 82 | 83 | # Red Hat satellite subscription parameters 84 | sat6_hostname: 85 | type: string 86 | description: > 87 | The hostname of the Satellite 6 server which will provide software updates 88 | default: '' 89 | 90 | sat6_organization: 91 | type: string 92 | description: > 93 | An organization string provided by Sat6 to group subscriptions 94 | default: '' 95 | 96 | sat6_activationkey: 97 | type: string 98 | description: > 99 | An activation key string provided by Sat6 to enable subscriptions 100 | default: '' 101 | 102 | rhn_pool: 103 | description: > 104 | A subscription pool containing the RHEL and OpenShift software repos 105 | OPTIONAL 106 | type: string 107 | hidden: true 108 | 109 | extra_rhn_pools: 110 | type: comma_delimited_list 111 | description: List of rhn pools which will be installed on each node. 112 | default: '' 113 | 114 | ssh_user: 115 | description: > 116 | The user for SSH access to the VM hosts 117 | type: string 118 | 119 | fixed_network: 120 | description: > 121 | The name or ID of the internal network 122 | type: string 123 | constraints: 124 | - custom_constraint: neutron.network 125 | 126 | fixed_subnet: 127 | description: > 128 | The name or ID of the internal IPv4 space 129 | type: string 130 | constraints: 131 | - custom_constraint: neutron.subnet 132 | 133 | ansible_public_key: 134 | description: > 135 | The SSH public key that Ansible will use to access master and node hosts 136 | This will be placed on each VM host in /root/.ssh/authorized_keys 137 | type: string 138 | 139 | ansible_private_key: 140 | description: > 141 | The private key that Ansible will use to access master and node hosts 142 | This file will be placed on the bastion host and protected 143 | type: string 144 | 145 | docker_volume_size: 146 | description: > 147 | The size of a cinder volume in GB to allocate to docker for 148 | container/image storage 149 | type: number 150 | default: 25 151 | 152 | floating_ip: 153 | description: > 154 | TBD: What is this a floating IP for? For the DNS server? 155 | type: string 156 | 157 | port: 158 | description: Neutron port (with a floating IP address) to assign to the DNS Nova Server 159 | type: string 160 | 161 | # LDAP 162 | # For external and/or common user information with OpenStack 163 | ldap_ip: 164 | description: > 165 | The IP address of the LDAP server providing OpenShift user information 166 | type: string 167 | default: '' 168 | 169 | ldap_hostname: 170 | description: > 171 | The hostname of the LDAP server providing OpenShift user information 172 | type: string 173 | default: '' 174 | 175 | # Delay openshift installation until the master is ready to accept 176 | timeout: 177 | description: Time to wait until the master setup is ready. 178 | type: number 179 | default: 4000 180 | 181 | system_update: 182 | type: boolean 183 | 184 | openshift_ansible_git_url: 185 | description: > 186 | The location of the OpenShift Ansible playbooks. A Git respository URL 187 | type: string 188 | 189 | openshift_ansible_git_rev: 190 | description: > 191 | A specific revision of the Ansible playbooks to use for installation 192 | type: string 193 | 194 | extra_repository_urls: 195 | type: comma_delimited_list 196 | description: List of repository URLs which will be installed on each node. 197 | default: '' 198 | 199 | extra_docker_repository_urls: 200 | type: comma_delimited_list 201 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 202 | default: '' 203 | 204 | ca_cert: 205 | type: string 206 | description: Certificate Authority Certificate to be added to trust chain 207 | 208 | resources: 209 | 210 | # A VM to provide host based orchestration and other sub-services 211 | host: 212 | type: OS::Nova::Server 213 | properties: 214 | name: 215 | str_replace: 216 | template: "HOST.DOMAIN" 217 | params: 218 | HOST: {get_param: hostname} 219 | DOMAIN: {get_param: domain_name} 220 | admin_user: {get_param: ssh_user} 221 | image: {get_param: image} 222 | flavor: {get_param: flavor} 223 | key_name: {get_param: key_name} 224 | networks: 225 | - port: {get_param: port} 226 | user_data_format: SOFTWARE_CONFIG 227 | user_data_update_policy: IGNORE 228 | user_data: {get_resource: init} 229 | 230 | # Install, configure and enable the Heat configuration agent 231 | config_agent: 232 | type: collect-config-setup/install_config_agent_centos_yum.yaml 233 | properties: 234 | osp_version: {get_param: osp_version} 235 | 236 | # Collect the results from a set of resources 237 | init: 238 | type: OS::Heat::MultipartMime 239 | properties: 240 | parts: 241 | - config: {get_resource: set_hostname} 242 | - config: {get_resource: included_files} 243 | - config: {get_resource: update_ca_cert} 244 | - config: {get_resource: rhn_register} 245 | - config: {get_resource: set_extra_repos} 246 | - config: {get_resource: set_extra_docker_repos} 247 | - config: {get_resource: host_update} 248 | - config: {get_attr: [config_agent, config]} 249 | type: multipart 250 | - config: {get_resource: bastion_boot} 251 | 252 | # Install the DNS server and retrieve the Ansible playbooks for OpenShift 253 | bastion_boot: 254 | type: OS::Heat::SoftwareConfig 255 | properties: 256 | group: script 257 | config: 258 | str_replace: 259 | params: 260 | $OPENSHIFT_ANSIBLE_GIT_URL: {get_param: openshift_ansible_git_url} 261 | $OPENSHIFT_ANSIBLE_GIT_REV: {get_param: openshift_ansible_git_rev} 262 | $DOCKER_VOLUME_ID: {get_resource: docker_volume} 263 | $ANSIBLE_VERSION: {get_param: ansible_version} 264 | template: {get_file: fragments/bastion-boot.sh} 265 | 266 | # Compose the FQDN for cloud-init 267 | set_hostname: 268 | type: OS::Heat::CloudConfig 269 | properties: 270 | cloud_config: 271 | hostname: {get_param: hostname} 272 | fqdn: 273 | str_replace: 274 | template: "HOST.DOMAIN" 275 | params: 276 | HOST: {get_param: hostname} 277 | DOMAIN: {get_param: domain_name} 278 | 279 | # Place host configuration files via cloud-init 280 | included_files: 281 | type: OS::Heat::CloudConfig 282 | properties: 283 | cloud_config: 284 | write_files: 285 | - path: /usr/local/bin/retry 286 | permissions: 0755 287 | content: {get_file: fragments/retry.sh} 288 | - path: /usr/local/bin/merge_dict 289 | permissions: 0755 290 | content: {get_file: fragments/merge_dict.py} 291 | - path: /usr/local/share/openshift-on-openstack/common_functions.sh 292 | permissions: 0755 293 | content: 294 | str_replace: 295 | params: 296 | $WC_NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] } 297 | template: {get_file: fragments/common_functions.sh} 298 | - path: /root/.ssh/id_rsa 299 | permissions: 0600 300 | content: {get_param: ansible_private_key} 301 | - path: /root/.ssh/id_rsa.pub 302 | permissions: 0600 303 | content: {get_param: ansible_public_key} 304 | - path: /etc/pki/ca-trust/source/anchors/ca.crt 305 | permissions: 0600 306 | content: {get_param: ca_cert} 307 | ssh_authorized_keys: 308 | - {get_param: ansible_public_key} 309 | 310 | # Add CA Cert to trust chain 311 | update_ca_cert: 312 | type: OS::Heat::SoftwareConfig 313 | properties: 314 | config: {get_file: fragments/ca_cert.sh} 315 | 316 | # Register the host with RHN for access to software packages 317 | rhn_register: 318 | type: OS::Heat::SoftwareConfig 319 | properties: 320 | config: 321 | str_replace: 322 | params: 323 | $OCP_VERSION: {get_param: ocp_version} 324 | $RHN_USERNAME: {get_param: rhn_username} 325 | $RHN_PASSWORD: {get_param: rhn_password} 326 | $SAT6_HOSTNAME: {get_param: sat6_hostname} 327 | $SAT6_ORGANIZATION: {get_param: sat6_organization} 328 | $SAT6_ACTIVATIONKEY: {get_param: sat6_activationkey} 329 | $POOL_ID: {get_param: rhn_pool} 330 | $EXTRA_POOL_IDS: 331 | list_join: 332 | - " --pool=" 333 | - {get_param: extra_rhn_pools} 334 | template: {get_file: fragments/rhn-register.sh} 335 | 336 | # Enable any extra repositories 337 | set_extra_repos: 338 | type: OS::Heat::SoftwareConfig 339 | properties: 340 | config: 341 | str_replace: 342 | params: 343 | $REPOLIST: 344 | list_join: 345 | - " " 346 | - {get_param: extra_repository_urls} 347 | template: {get_file: fragments/set-extra-repos.sh} 348 | 349 | set_extra_docker_repos: 350 | type: OS::Heat::SoftwareConfig 351 | properties: 352 | config: 353 | str_replace: 354 | params: 355 | $REPOLIST: 356 | list_join: 357 | - " " 358 | - {get_param: extra_docker_repository_urls} 359 | template: {get_file: fragments/set-extra-docker-repos.sh} 360 | 361 | # Make sure the host software is current 362 | host_update: 363 | type: OS::Heat::SoftwareConfig 364 | properties: 365 | config: 366 | str_replace: 367 | params: 368 | $SYSTEM_UPDATE: {get_param: system_update} 369 | template: {get_file: fragments/host-update.sh} 370 | 371 | write_templates: 372 | type: OS::Heat::SoftwareConfig 373 | properties: 374 | group: script 375 | config: 376 | list_join: 377 | - "\n" 378 | - - "#!/bin/bash" 379 | - "set -eux" 380 | - get_file: templates/var/lib/ansible/group_vars/masters.yml 381 | - get_file: templates/var/lib/ansible/group_vars/nodes.yml 382 | - get_file: templates/var/lib/ansible/host_vars/loadbalancer.yml 383 | - get_file: templates/var/lib/ansible/group_vars/OSv3.yml 384 | - get_file: templates/var/lib/ansible/playbooks/registry.yml 385 | - get_file: templates/var/lib/ansible/playbooks/main.yml 386 | - get_file: templates/var/lib/ansible/playbooks/scaleup.yml 387 | - get_file: templates/var/lib/ansible/playbooks/scaledown.yml 388 | - get_file: templates/var/lib/ansible/inventory 389 | - get_file: templates/var/lib/ansible/playbooks/haproxy.yml 390 | - get_file: templates/var/lib/ansible/templates/etc/haproxy/router.cfg.j2 391 | - get_file: templates/var/lib/ansible/playbooks/ipfailover.yml 392 | - get_file: templates/var/lib/ansible/playbooks/flannel.yml 393 | - get_file: templates/var/lib/ansible/playbooks/quota.yml 394 | - get_file: templates/var/lib/ansible/roles/reboot/tasks/main.yml 395 | - get_file: templates/var/lib/ansible/roles/fstab_mount_options/tasks/main.yml 396 | - get_file: templates/var/lib/ansible/roles/xfs_grub_quota/tasks/main.yml 397 | 398 | update_node_count: 399 | type: OS::Heat::SoftwareConfig 400 | properties: 401 | group: script 402 | inputs: 403 | - name: node_count 404 | config: | 405 | #!/bin/bash 406 | set -eux 407 | mkdir -p /var/lib/ansible 408 | echo "$node_count" > /var/lib/ansible/node_count 409 | 410 | deployment_update_node_count: 411 | depends_on: wait_condition 412 | type: OS::Heat::SoftwareDeployment 413 | properties: 414 | config: 415 | get_resource: update_node_count 416 | server: 417 | get_resource: host 418 | input_values: 419 | node_count: {get_param: node_count} 420 | 421 | deployment_write_templates: 422 | depends_on: wait_condition 423 | type: OS::Heat::SoftwareDeployment 424 | properties: 425 | config: 426 | get_resource: write_templates 427 | server: 428 | get_resource: host 429 | 430 | # Apply ansible performance tuning values 431 | tune_ansible: 432 | type: OS::Heat::SoftwareConfig 433 | properties: 434 | group: script 435 | config: 436 | get_file: fragments/tune-ansible.sh 437 | 438 | # Execute the tuning operation on a host 439 | deployment_tune_ansible: 440 | depends_on: wait_condition 441 | type: OS::Heat::SoftwareDeployment 442 | properties: 443 | actions: ['CREATE'] 444 | config: 445 | get_resource: tune_ansible 446 | server: 447 | get_resource: host 448 | 449 | node_cleanup: 450 | type: OS::Heat::SoftwareConfig 451 | properties: 452 | group: script 453 | config: | 454 | #!/bin/bash 455 | set -eux 456 | (subscription-manager unregister && subscription-manager clean) || true 457 | 458 | deployment_bastion_node_cleanup: 459 | depends_on: [host, wait_condition] 460 | type: OS::Heat::SoftwareDeployment 461 | properties: 462 | actions: ['DELETE'] 463 | config: 464 | get_resource: node_cleanup 465 | server: 466 | get_resource: host 467 | 468 | # Additional space for Docker container and image storage 469 | docker_volume: 470 | type: OS::Cinder::Volume 471 | properties: 472 | size: {get_param: docker_volume_size} 473 | 474 | # Bind docker file space to the host filesystem 475 | docker_volume_attach: 476 | type: OS::Cinder::VolumeAttachment 477 | properties: 478 | instance_uuid: {get_resource: host} 479 | volume_id: {get_resource: docker_volume} 480 | 481 | # Wait until the cloud-init process ends and reports or times out 482 | wait_condition: 483 | type: OS::Heat::WaitCondition 484 | properties: 485 | handle: {get_resource: wait_handle} 486 | timeout: {get_param: timeout} 487 | 488 | # This is passed to bastion_init cloud-init script as a curl CLI command 489 | # When the cloud-init process ends it queries the URL with 490 | # a message which ends the wait and returns the value 491 | wait_handle: 492 | type: OS::Heat::WaitConditionHandle 493 | 494 | outputs: 495 | instance_ip: 496 | description: Instance private IP (used by other nodes for DNS queries). 497 | value: {get_attr: [host, first_address]} 498 | -------------------------------------------------------------------------------- /collect-config-setup/fragments/configure_config_agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | # this file should be included in write_files section for the node 5 | source /usr/local/share/openshift-on-openstack/common_functions.sh 6 | 7 | # on Atomic host os-collect-config runs inside a container which is 8 | # fetched&started in another step 9 | [ -e /run/ostree-booted ] && exit 0 10 | 11 | # os-apply-config templates directory 12 | oac_templates=/usr/libexec/os-apply-config/templates 13 | mkdir -p $oac_templates/etc 14 | 15 | # initial /etc/os-collect-config.conf 16 | cat </etc/os-collect-config.conf 17 | [DEFAULT] 18 | command = os-refresh-config 19 | EOF 20 | 21 | # template for building os-collect-config.conf for polling heat 22 | cat <$oac_templates/etc/os-collect-config.conf 23 | [DEFAULT] 24 | {{^os-collect-config.command}} 25 | command = os-refresh-config 26 | {{/os-collect-config.command}} 27 | {{#os-collect-config}} 28 | {{#command}} 29 | command = {{command}} 30 | {{/command}} 31 | {{#polling_interval}} 32 | polling_interval = {{polling_interval}} 33 | {{/polling_interval}} 34 | {{#cachedir}} 35 | cachedir = {{cachedir}} 36 | {{/cachedir}} 37 | {{#collectors}} 38 | collectors = {{.}} 39 | {{/collectors}} 40 | 41 | {{#cfn}} 42 | [cfn] 43 | {{#metadata_url}} 44 | metadata_url = {{metadata_url}} 45 | {{/metadata_url}} 46 | stack_name = {{stack_name}} 47 | secret_access_key = {{secret_access_key}} 48 | access_key_id = {{access_key_id}} 49 | path = {{path}} 50 | {{/cfn}} 51 | 52 | {{#heat}} 53 | [heat] 54 | auth_url = {{auth_url}} 55 | user_id = {{user_id}} 56 | password = {{password}} 57 | project_id = {{project_id}} 58 | stack_id = {{stack_id}} 59 | resource_name = {{resource_name}} 60 | {{/heat}} 61 | 62 | {{#request}} 63 | [request] 64 | {{#metadata_url}} 65 | metadata_url = {{metadata_url}} 66 | {{/metadata_url}} 67 | {{/request}} 68 | 69 | {{/os-collect-config}} 70 | EOF 71 | mkdir -p $oac_templates/var/run/heat-config 72 | 73 | # template for writing heat deployments data to a file 74 | echo "{{deployments}}" > $oac_templates/var/run/heat-config/heat-config 75 | 76 | # os-refresh-config scripts directory 77 | orc_scripts=/usr/libexec/os-refresh-config 78 | for d in pre-configure.d configure.d migration.d post-configure.d; do 79 | install -m 0755 -o root -g root -d $orc_scripts/$d 80 | done 81 | 82 | # os-refresh-config script for running os-apply-config 83 | cat <$orc_scripts/configure.d/20-os-apply-config 84 | #!/bin/bash 85 | set -ue 86 | 87 | exec os-apply-config 88 | EOF 89 | chmod 700 $orc_scripts/configure.d/20-os-apply-config 90 | 91 | ln -s /usr/share/openstack-heat-templates/software-config/elements/heat-config/os-refresh-config/configure.d/55-heat-config $orc_scripts/configure.d/55-heat-config 92 | 93 | # config hook for shell scripts 94 | hooks_dir=/var/lib/heat-config/hooks 95 | mkdir -p $hooks_dir 96 | 97 | # install hook for configuring with shell scripts 98 | ln -s /usr/share/openstack-heat-templates/software-config/heat-container-agent/scripts/hooks/script $hooks_dir/script 99 | 100 | # install heat-config-notify command 101 | ln -s /usr/share/openstack-heat-templates/software-config/elements/heat-config/bin/heat-config-notify /usr/bin/heat-config-notify 102 | 103 | # run once to write out /etc/os-collect-config.conf 104 | # use notify_failure from common_functions.sh to 105 | # make sure cloud-init reports failure 106 | os-collect-config --one-time --debug || 107 | notify_failure "failed to run os-collect-config" 108 | 109 | # check that a valid metadata_url was set 110 | curl "$(grep metadata_url /etc/os-collect-config.conf |sed 's/metadata_url = //')" || 111 | notify_failure "failed to connect to os-collect-config metadata_url" 112 | 113 | cat /etc/os-collect-config.conf 114 | -------------------------------------------------------------------------------- /collect-config-setup/fragments/install_config_agent_yum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | # OSP_VERSION is set by ENV or by heat string replacement 5 | [ -n "$OSP_VERSION" ] || (echo "Missing required value OSP_VERSION" ; exit 1) 6 | 7 | # on Atomic host os-collect-config runs inside a container which is 8 | # fetched&started in another step 9 | [ -e /run/ostree-booted ] && exit 0 10 | 11 | if ! yum info os-collect-config; then 12 | # if os-collect-config package is not available, first check if 13 | # the repo is available but disabled, otherwise install the package 14 | # from epel 15 | if yum repolist disabled|grep rhel-7-server-openstack-$OSP_VERSION-rpms; then 16 | subscription-manager repos --enable="rhel-7-server-openstack-$OSP_VERSION-rpms" 17 | if [ "$OSP_VERSION" -lt 10 ] ; then 18 | subscription-manager repos --enable="rhel-7-server-openstack-$OSP_VERSION-director-rpms" 19 | fi 20 | else 21 | yum -y install centos-release-openstack-liberty 22 | fi 23 | fi 24 | yum -y install os-collect-config python-zaqarclient os-refresh-config os-apply-config openstack-heat-templates python-oslo-log python-psutil 25 | #yum-config-manager --disable 'epel*' 26 | -------------------------------------------------------------------------------- /collect-config-setup/fragments/start_config_agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -eux 3 | 4 | # on Atomic host os-collect-config runs inside a container which is 5 | # fetched&started in another step 6 | [ -e /run/ostree-booted ] && exit 0 7 | 8 | # enable and start service to poll for deployment changes 9 | systemctl enable os-collect-config 10 | systemctl start --no-block os-collect-config 11 | -------------------------------------------------------------------------------- /collect-config-setup/install_config_agent_centos_yum.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | parameters: 4 | 5 | # What version of OpenStack Platform to install 6 | # This value is used to select the RPM repo for the OSP release to install 7 | osp_version: 8 | type: string 9 | default: "10" 10 | description: > 11 | The version of OpenStack Platform to use to collect data 12 | 13 | resources: 14 | 15 | install_config_agent_yum: 16 | type: "OS::Heat::SoftwareConfig" 17 | properties: 18 | group: ungrouped 19 | config: 20 | str_replace: 21 | params: 22 | $OSP_VERSION: {get_param: osp_version} 23 | template: {get_file: fragments/install_config_agent_yum.sh} 24 | 25 | configure_config_agent: 26 | type: "OS::Heat::SoftwareConfig" 27 | properties: 28 | group: ungrouped 29 | config: {get_file: fragments/configure_config_agent.sh} 30 | 31 | start_config_agent: 32 | type: "OS::Heat::SoftwareConfig" 33 | properties: 34 | group: ungrouped 35 | config: {get_file: fragments/start_config_agent.sh} 36 | 37 | install_config_agent: 38 | type: "OS::Heat::MultipartMime" 39 | properties: 40 | parts: 41 | - config: {get_resource: install_config_agent_yum} 42 | - config: {get_resource: configure_config_agent} 43 | - config: {get_resource: start_config_agent} 44 | 45 | outputs: 46 | config: 47 | value: {get_resource: install_config_agent} 48 | -------------------------------------------------------------------------------- /customize-disk-image: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import argparse 4 | import distutils.spawn 5 | import os 6 | import subprocess 7 | import sys 8 | import urllib2 9 | 10 | description = 'Customize disk image for Openshift.' 11 | 12 | parser = argparse.ArgumentParser(description=description) 13 | 14 | parser.add_argument('--disk', dest='disk', required=True, 15 | help='disk image to modify') 16 | parser.add_argument('--deployment-type', dest='deployment', default='origin', 17 | help='disk image to modify') 18 | parser.add_argument('--memsize', dest='memsize', default='512', 19 | help='memory size to be used by virt-customize') 20 | parser.add_argument('--package', dest="packages", action='append', 21 | default=['deltarpm', 22 | 'git', 23 | 'httpd-tools', 24 | 'iptables', 25 | 'iptables-services', 26 | 'PyYAML', 27 | 'ceph-common', 28 | 'glusterfs-fuse', 29 | 'nfs-utils', 30 | 'libselinux-python', 31 | 'firewalld', 32 | 'logrotate', 33 | 'pcs', 34 | 'bash-completion'], 35 | help='additional package to install') 36 | parser.add_argument('--sm-credentials', dest="sm_creds", 37 | help='subscription-manager credentials (user:password)') 38 | parser.add_argument('--sm-pool', dest="sm_pool", 39 | help='subscription-manager pool to attach') 40 | parser.add_argument('--sm-repo', dest="sm_repos", action='append', 41 | default=['rhel-7-server-rpms', 42 | 'rhel-7-server-extras-rpms', 43 | 'rhel-7-server-optional-rpms', 44 | 'rhel-7-server-ose-3.2-rpms'], 45 | help='subscription-manager repository to enable') 46 | parser.add_argument('--no-update', dest="update", action='store_false', 47 | default=True, help='update packages') 48 | parser.add_argument('--repos', dest="repos", action='append', 49 | default=[], help='custom additional YUM repositories') 50 | parser.add_argument('-v', '--verbose', dest='verbose', action='store_true', 51 | help='verbose mode') 52 | 53 | args = parser.parse_args() 54 | 55 | if not distutils.spawn.find_executable('virt-customize'): 56 | print "virt-customize must be installed on the system" 57 | sys.exit(1) 58 | 59 | cmd = ["virt-customize", "-a", args.disk] 60 | 61 | if args.verbose: 62 | cmd += ["-v"] 63 | 64 | # Create set of packages to install 65 | if args.deployment == "enterprise": 66 | args.packages += ["atomic-openshift-master", "atomic-openshift-node", 67 | "tuned-profiles-atomic-openshift-node"] 68 | elif args.deployment == "origin": 69 | args.packages += ["origin-master", "origin-node", 70 | "tuned-profiles-origin-node"] 71 | 72 | maxamillion_copr = "https://copr.fedoraproject.org/coprs/maxamillion/" \ 73 | "origin-next/repo/epel-7/" \ 74 | "maxamillion-origin-next-epel-7.repo" 75 | cmd += ["--write", "/etc/yum.repos.d/maxamillion-origin-next.repo:" + 76 | urllib2.urlopen(maxamillion_copr).read()] 77 | 78 | # add custom repositories 79 | for repo in args.repos: 80 | name = os.path.basename(repo) 81 | cmd += ["--write", "/etc/yum.repos.d/%s:" % (name) + 82 | urllib2.urlopen(repo).read()] 83 | 84 | # Activate Red Hat subscriptions 85 | if args.sm_creds: 86 | username, password = args.sm_creds.split(':') 87 | 88 | cmd += ["--run-command", "subscription-manager register --username %s " 89 | "--password %s" % (username, password)] 90 | 91 | if args.sm_pool: 92 | attach_args = "--pool " + args.sm_pool 93 | else: 94 | attach_args = "--auto" 95 | 96 | cmd += ["--run-command", "subscription-manager attach " + attach_args, 97 | "--run-command", "subscription-manager repos --disable='*'", 98 | "--run-command", "subscription-manager repos " + 99 | " ".join(map(lambda repo: '--enable=' + repo, args.sm_repos))] 100 | 101 | # Run a system update 102 | if args.update: 103 | cmd += ["--update"] 104 | 105 | # Set up EPEL 106 | cmd += ["--install", "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm", 107 | "--run-command", "sed -i -e 's/^enabled=1/enabled=0/' /etc/yum.repos.d/epel.repo"] 108 | 109 | # Install ansible 110 | cmd += ["--run-command", "yum -y --enablerepo=epel install ansible"] 111 | 112 | # Install required packages 113 | cmd += ["--install", ",".join(args.packages)] 114 | 115 | if args.sm_creds: 116 | cmd += ["--run-command", "subscription-manager remove --all", 117 | "--run-command", "subscription-manager unregister"] 118 | 119 | if args.verbose: 120 | print "Running command " + " ".join(cmd) 121 | 122 | subprocess.call(cmd) 123 | 124 | cmd = ["virt-sysprep", "-a", args.disk, "--selinux-relabel"] 125 | 126 | if args.verbose: 127 | print "Running command " + " ".join(cmd) 128 | 129 | subprocess.call(cmd) 130 | -------------------------------------------------------------------------------- /env_aop.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | ssh_key_name: default 3 | bastion_image: rhel72 4 | bastion_flavor: m1.medium 5 | master_image: rhel72 6 | master_flavor: m1.medium 7 | infra_image: rhel72 8 | infra_flavor: m1.medium 9 | node_image: rhel72 10 | node_flavor: m1.medium 11 | loadbalancer_image: rhel72 12 | loadbalancer_flavor: m1.medium 13 | 14 | external_network: ext_net 15 | dns_nameserver: 8.8.4.4,8.8.8.8 16 | node_count: 2 17 | 18 | rhn_username: "Your RHN Username" 19 | rhn_password: "Your RHN Password" 20 | sat6_hostname: "" 21 | sat6_organization: "" 22 | sat6_activationkey: "" 23 | rhn_pool: '' 24 | 25 | deployment_type: openshift-enterprise 26 | domain_name: "example.com" 27 | master_hostname: "openshift-master" 28 | node_hostname: "openshift-node" 29 | ssh_user: cloud-user 30 | master_docker_volume_size_gb: 25 31 | infra_docker_volume_size_gb: 25 32 | node_docker_volume_size_gb: 25 33 | 34 | resource_registry: 35 | OOShift::LoadBalancer: loadbalancer_neutron.yaml 36 | OOShift::ContainerPort: sdn_openshift_sdn.yaml 37 | OOShift::IPFailover: ipfailover_keepalived.yaml 38 | OOShift::DockerVolume: volume_docker.yaml 39 | OOShift::DockerVolumeAttachment: volume_attachment_docker.yaml 40 | OOShift::RegistryVolume: registry_ephemeral.yaml 41 | -------------------------------------------------------------------------------- /env_flannel.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | openshift_sdn: flannel 3 | 4 | resource_registry: 5 | OOShift::ContainerPort: sdn_flannel.yaml 6 | -------------------------------------------------------------------------------- /env_ipfailover_keepalived.yaml: -------------------------------------------------------------------------------- 1 | resource_registry: 2 | OOShift::IPFailover: ipfailover_keepalived.yaml 3 | -------------------------------------------------------------------------------- /env_ipfailover_none.yaml: -------------------------------------------------------------------------------- 1 | resource_registry: 2 | OOShift::IPFailover: ipfailover_none.yaml 3 | -------------------------------------------------------------------------------- /env_ldap.yaml: -------------------------------------------------------------------------------- 1 | parameter_defaults: 2 | ldap_hostname: ldap.example.com 3 | ldap_ip: 192.168.0.1 4 | ldap_url: ldap://ldap.example.com:389/cn=users,cn=compat,dc=example,dc=com?uid 5 | ldap_insecure: true 6 | -------------------------------------------------------------------------------- /env_loadbalancer_dedicated.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | loadbalancer_type: dedicated 3 | 4 | resource_registry: 5 | OOShift::LoadBalancer: loadbalancer_dedicated.yaml 6 | OOShift::IPFailover: ipfailover_none.yaml 7 | -------------------------------------------------------------------------------- /env_loadbalancer_external.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | loadbalancer_type: 'external' 3 | 4 | resource_registry: 5 | OOShift::LoadBalancer: loadbalancer_external.yaml 6 | OOShift::IPFailover: ipfailover_none.yaml 7 | -------------------------------------------------------------------------------- /env_loadbalancer_neutron.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | loadbalancer_type: 'neutron' 3 | 4 | resource_registry: 5 | OOShift::LoadBalancer: loadbalancer_neutron.yaml 6 | OOShift::IPFailover: ipfailover_keepalived.yaml 7 | -------------------------------------------------------------------------------- /env_loadbalancer_none.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | loadbalancer_type: 'none' 3 | 4 | resource_registry: 5 | OOShift::LoadBalancer: loadbalancer_none.yaml 6 | OOShift::IPFailover: ipfailover_keepalived.yaml 7 | -------------------------------------------------------------------------------- /env_origin.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | ssh_key_name: default 3 | bastion_image: centos72 4 | bastion_flavor: m1.medium 5 | master_image: centos72 6 | master_flavor: m1.medium 7 | infra_image: centos72 8 | infra_flavor: m1.medium 9 | node_image: centos72 10 | node_flavor: m1.medium 11 | loadbalancer_image: centos72 12 | loadbalancer_flavor: m1.medium 13 | external_network: ext_net 14 | dns_nameserver: 8.8.4.4,8.8.8.8 15 | node_count: 2 16 | 17 | rhn_username: "" 18 | rhn_password: "" 19 | sat6_hostname: "" 20 | sat6_organization: "" 21 | sat6_activationkey: "" 22 | rhn_pool: '' 23 | 24 | deployment_type: origin 25 | domain_name: "example.com" 26 | master_hostname: "origin-master" 27 | node_hostname: "origin-node" 28 | ssh_user: centos 29 | master_docker_volume_size_gb: 25 30 | infra_docker_volume_size_gb: 25 31 | node_docker_volume_size_gb: 25 32 | openshift_ansible_git_url: https://github.com/openshift/openshift-ansible.git 33 | openshift_ansible_git_rev: master 34 | 35 | resource_registry: 36 | OOShift::LoadBalancer: loadbalancer_neutron.yaml 37 | OOShift::ContainerPort: sdn_openshift_sdn.yaml 38 | OOShift::IPFailover: ipfailover_keepalived.yaml 39 | OOShift::DockerVolume: volume_docker.yaml 40 | OOShift::DockerVolumeAttachment: volume_attachment_docker.yaml 41 | OOShift::RegistryVolume: registry_ephemeral.yaml 42 | -------------------------------------------------------------------------------- /env_registry_ephemeral.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | prepare_registry: true 3 | 4 | resource_registry: 5 | OOShift::RegistryVolume: registry_ephemeral.yaml 6 | -------------------------------------------------------------------------------- /env_registry_persistent.yaml: -------------------------------------------------------------------------------- 1 | resource_registry: 2 | OOShift::RegistryVolume: registry_persistent.yaml 3 | -------------------------------------------------------------------------------- /env_sdn_flannel.yaml: -------------------------------------------------------------------------------- 1 | parameters: 2 | openshift_sdn: flannel 3 | 4 | resource_registry: 5 | OOShift::ContainerPort: sdn_flannel.yaml 6 | -------------------------------------------------------------------------------- /fragments/add_dns_record.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Update the DNS server with a record for this host 3 | 4 | set -eu 5 | set -x 6 | set -o pipefail 7 | 8 | DNS_UPDATE_KEY="%DNS_UPDATE_KEY%" 9 | 10 | if [ -z "$DNS_UPDATE_KEY" ]; then 11 | echo "Skipping the DNS update because the key is empty." 12 | exit 13 | fi 14 | 15 | if yum info python-dns; then 16 | retry yum -y install python-dns 17 | else 18 | retry yum -y install python2-dns 19 | fi 20 | 21 | 22 | NAME="%DNS_ENTRY%" 23 | 24 | # If we didn't get an explicit name, use this server's hostname 25 | if [ -n "$NAME" -a "${NAME:0:1}" = "%" -a "${NAME: -1}" = "%" ]; then 26 | NAME="$(hostname)" 27 | fi 28 | 29 | # NOTE: the dot after the hostname is necessary 30 | /usr/local/bin/update_dns -z "%ZONE%" -s "%DNS_SERVER%" -k "$DNS_UPDATE_KEY" "$NAME." "%IP_ADDRESS%" 31 | -------------------------------------------------------------------------------- /fragments/bastion-ansible.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Create an Ansible inventory file if it doesn't already exist 3 | 4 | set -eu 5 | set -x 6 | set -o pipefail 7 | 8 | ANSDIR=/var/lib/ansible 9 | INVENTORY=$ANSDIR/inventory 10 | NODESFILE=$ANSDIR/node_list 11 | 12 | function get_new_nodes() { 13 | # compare old and new list of nodes and return all newly added nodes 14 | # separated by comma instead of newline 15 | if [ -e ${ANSDIR}.deployed ]; then 16 | str=$(comm -13 <(sort ${ANSDIR}.deployed/node_list) <(sort ${ANSDIR}/node_list) | sed ':a;N;$!ba;s/\n/","/g') 17 | [ -z "$str" ] && echo '' || echo "\"$str\"" 18 | else 19 | echo '' 20 | fi 21 | } 22 | 23 | function create_metadata_json() { 24 | # $1 - metadata filename 25 | infra_arr=($all_infra_nodes) 26 | infra_count=${#infra_arr[@]} 27 | master_arr=($all_master_nodes) 28 | master_count=${#master_arr[@]} 29 | new_nodes=$(get_new_nodes) 30 | if [ -n "$os_username" ] && [ -n "$os_password" ] && \ 31 | [ -n "$os_auth_url" ] && [ -n "$os_tenant_name" ]; then 32 | openstack_cloud_provider=true 33 | else 34 | openstack_cloud_provider=false 35 | fi 36 | deploy_router_or_registry=$([ "$deploy_router" == "True" -o \ 37 | "$deploy_registry" == "True" ] && echo true || echo false) 38 | 39 | cat << EOF > $1 40 | { 41 | "openshift_use_openshift_sdn": $([ "$openshift_sdn" == "openshift-sdn" ] && echo true || echo false), 42 | "openshift_use_flannel": $([ "$openshift_sdn" == "flannel" ] && echo true || echo false), 43 | "master_ha": $([ "$lb_type" != "none" -a $master_count -gt 1 ] && echo true || echo false), 44 | "master_ip": "$master_ip", 45 | "openstack_cloud_provider": $openstack_cloud_provider, 46 | "os_username":"$os_username", 47 | "os_password":"$os_password", 48 | "os_auth_url":"$os_auth_url", 49 | "os_tenant_name":"$os_tenant_name", 50 | "os_region_name":"$os_region_name", 51 | "os_domain_name":"$os_domain_name", 52 | "dedicated_lb": $([ "$lb_type" == "dedicated" ] && echo true || echo false), 53 | "no_lb": $([ "$lb_type" == "none" ] && echo true || echo false), 54 | "external_lb": $([ "$lb_type" == "external" ] && echo true || echo false), 55 | "masters": ["$(echo "$all_master_nodes" | sed 's/ /","/g')"], 56 | "infra_nodes": ["$(echo "$all_infra_nodes" | sed 's/ /","/g')"], 57 | "infra_count": $infra_count, 58 | "nodes": ["$(sed ':a;N;$!ba;s/\n/","/g' $NODESFILE)"], 59 | "new_nodes": [$new_nodes], 60 | "deploy_router_or_registry": $deploy_router_or_registry, 61 | "domainname": "$domainname", 62 | "app_subdomain": "${app_subdomain:-"cloudapps.$domainname"}", 63 | "lb_hostname": "$lb_hostname", 64 | "short_lb_hostname": "${lb_hostname%%.$domainname}", 65 | "deploy_router": $([ "$deploy_router" == "True" ] && echo true || echo false), 66 | "deploy_registry": $([ "$deploy_registry" == "True" ] && echo true || echo false), 67 | "registry_volume_fs": "$registry_volume_fs", 68 | "registry_volume_id": "$registry_volume_id", 69 | "registry_volume_size": "$registry_volume_size", 70 | "prepare_registry": $([ "$prepare_registry" == "True" ] && echo true || echo false), 71 | "heat_outputs_path": "$heat_outputs_path", 72 | "ssh_user": "$ssh_user", 73 | "deployment_type": "$deployment_type", 74 | "lb_ip": "$lb_ip", 75 | "dns_forwarders": "$dns_forwarders", 76 | "ldap_url": "$ldap_url", 77 | "ldap_bind_dn": "$ldap_bind_dn", 78 | "ldap_bind_password": "$ldap_bind_password", 79 | "ldap_ca": "$ldap_ca", 80 | "ldap_insecure": "$ldap_insecure", 81 | "ldap_url": "$ldap_url", 82 | "ldap_preferred_username": "$ldap_preferred_username", 83 | "bastion_instance_id": "$bastion_instance_id", 84 | "ansible_first_run": $([ -e ${ANSDIR}.deployed ] && echo false || echo true), 85 | "router_vip": "$router_vip", 86 | "volume_quota": $volume_quota 87 | } 88 | EOF 89 | } 90 | 91 | function create_global_vars() { 92 | if [ -n "$extra_openshift_ansible_params" ]; then 93 | cat << EOF > /tmp/extra_openshift_ansible_params.json 94 | $extra_openshift_ansible_params 95 | EOF 96 | /usr/local/bin/merge_dict /tmp/extra_openshift_ansible_params.json \ 97 | /var/lib/ansible/group_vars/OSv3.yml 98 | rm /tmp/extra_openshift_ansible_params.json 99 | fi 100 | } 101 | 102 | function create_master_node_vars() { 103 | # $1 - node name 104 | if [ "$lb_type" == "none" ]; then 105 | public_name="$1.$domainname" 106 | else 107 | public_name="$lb_hostname" 108 | fi 109 | 110 | cat << EOF > /var/lib/ansible/host_vars/$1.$domainname.yml 111 | openshift_hostname: $1.$domainname 112 | openshift_public_hostname: $public_name 113 | openshift_master_public_console_url: https://$public_name:8443/console 114 | openshift_master_public_api_url: https://$public_name:8443 115 | EOF 116 | } 117 | 118 | function create_openshift_node_vars() { 119 | # $1 - node name 120 | cat << EOF > /var/lib/ansible/host_vars/$1.yml 121 | openshift_hostname: $1 122 | openshift_public_hostname: $1 123 | EOF 124 | } 125 | 126 | function is_scaleup() { 127 | # check if there are only new openshift nodes added - then we can play the 128 | # scaleup playbook, otherwise we run the main playbook 129 | [ -e ${ANSDIR}.deployed ] || return 1 130 | # check if diff between old and new inventory file contains only 131 | # node changes (ignore 'new_nodes' changes because nodes 132 | # are removed from [new_nodes] on the next stack-update run 133 | (diff $ANSDIR/inventory ${ANSDIR}.deployed/inventory | grep '^[<>]' | 134 | grep -v new_nodes | grep -v '[<>] $' | 135 | grep -v '.*-node') && return 1 || return 0 136 | } 137 | 138 | function backup_ansdir() { 139 | [ -e ${ANSDIR}.deployed ] && rm -rf ${ANSDIR}.deployed 140 | mv ${ANSDIR}.started ${ANSDIR}.deployed 141 | } 142 | 143 | [ "$prepare_ansible" == "False" ] && exit 0 144 | 145 | mkdir -p /var/lib/ansible/group_vars 146 | mkdir -p /var/lib/ansible/host_vars 147 | 148 | touch $NODESFILE 149 | 150 | existing=$(wc -l < $NODESFILE) 151 | if [ -e /var/lib/ansible/node_count ]; then 152 | node_count=$(cat /var/lib/ansible/node_count) 153 | if [ $existing -lt $node_count -a "$autoscaling" != "True" ]; then 154 | echo "skipping ansible run - only $existing of $node_count is registered" 155 | exit 0 156 | fi 157 | fi 158 | 159 | create_metadata_json /var/lib/ansible/metadata.json 160 | 161 | # generate ansible files from templates (located 162 | # in /var/lib/os-apply-config/templates/) 163 | os-apply-config -m /var/lib/ansible/metadata.json -t /var/lib/os-apply-config/templates 164 | 165 | for node in $all_master_nodes; do 166 | create_master_node_vars $node 167 | done 168 | 169 | for node in $all_infra_nodes; do 170 | create_openshift_node_vars $node.$domainname 171 | done 172 | 173 | for node in `cat $NODESFILE`; do 174 | create_openshift_node_vars $node 175 | done 176 | 177 | create_global_vars 178 | 179 | while pidof -x /bin/ansible-playbook /usr/bin/ansible-playbook; do 180 | echo "waiting for another ansible-playbook to finish" 181 | sleep 10 182 | done 183 | 184 | if [ -e ${ANSDIR}.deployed ] && 185 | diff $ANSDIR/inventory ${ANSDIR}.deployed/inventory; then 186 | echo "inventory file has not changed since last ansible run, no need to re-run" 187 | exit 0 188 | fi 189 | 190 | [ -e ${ANSDIR}.started ] && rm -rf ${ANSDIR}.started 191 | cp -a ${ANSDIR} ${ANSDIR}.started 192 | 193 | # crond was stopped in cloud-init before yum update, make sure it's running 194 | systemctl status crond && systemctl restart crond 195 | 196 | export HOME=/root 197 | export ANSIBLE_ROLES_PATH=/usr/share/ansible/openshift-ansible/roles:/var/lib/ansible/roles 198 | export ANSIBLE_HOST_KEY_CHECKING=False 199 | 200 | logfile=/var/log/ansible.$$ 201 | if is_scaleup; then 202 | if [ -z $(get_new_nodes) ]; then 203 | echo "There are no new nodes, not running scalup playbook" 204 | backup_ansdir 205 | exit 0 206 | fi 207 | cmd="ansible-playbook -vvvv --inventory /var/lib/ansible/inventory \ 208 | /var/lib/ansible/playbooks/scaleup.yml" 209 | else 210 | cmd="ansible-playbook -vvvv --inventory /var/lib/ansible/inventory \ 211 | /var/lib/ansible/playbooks/main.yml" 212 | fi 213 | 214 | if [ "$execute_ansible" == True ] ; then 215 | if ! $cmd > $logfile 2>&1; then 216 | tail -20 $logfile >&2 217 | echo "Failed to run '$cmd', full log is in $(hostname):$logfile" >&2 218 | exit 1 219 | else 220 | backup_ansdir 221 | fi 222 | else 223 | echo "INFO: ansible execution disabled" 224 | echo "INFO: command = $cmd" 225 | fi 226 | -------------------------------------------------------------------------------- /fragments/bastion-boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Prepare the bastion server for Docker and Ansible 4 | # 5 | # ENVVARS 6 | # WC_NOTIFY - a curl URL fragment from an OpenStack WaitCondition 7 | # used to signal OpenStack of completion status 8 | # 9 | # OPENSHIFT_ANSIBLE_GIT_URL - the URL of a git repository containing the 10 | # openshift ansible playbooks and configs 11 | # OPENSHIFT_ANSIBLE_GIT_REV - the release/revision of the playbooks to use 12 | # 13 | # ANSIBLE_VERSION - the version of of ansible to use for OCP installation 14 | # 15 | 16 | # Exit on first command failure or undefined var reference 17 | set -eu 18 | set -x 19 | 20 | # Return the non-zero exit code of the last cmd of a pipe (or 0 for success) 21 | set -o pipefail 22 | 23 | source /usr/local/share/openshift-on-openstack/common_functions.sh 24 | 25 | # CONSTANTS 26 | # 27 | # The device to mount to store Docker images and containers 28 | VOLUME_ID=$DOCKER_VOLUME_ID 29 | 30 | # The auxiliary service container images - for Atomic hosts 31 | HEAT_AGENT_CONTAINER_IMAGE=jprovaznik/ooshift-heat-agent 32 | 33 | # Select the EPEL release to make it easier to update 34 | EPEL_RELEASE_VERSION=7-7 35 | 36 | # ---------------------------------------------------------------------------- 37 | # Functions for Atomic Host systems 38 | # ---------------------------------------------------------------------------- 39 | 40 | # check if this is an Atomic host 41 | function is_atomic_host() { 42 | [ -e /run/ostree-booted ] 43 | } 44 | 45 | # remove the docker storage setup service link and re-load the systemd config 46 | function systemd_docker_disable_storage_setup() { 47 | mv /etc/systemd/system/multi-user.target.wants/docker-storage-setup.service /root 48 | systemctl daemon-reload 49 | } 50 | 51 | # 52 | # --- OpenShift Auxiliary Service Containers 53 | # 54 | 55 | function start_heat_agent_container() { 56 | # HEAT_AGENT_CONTAINER_IMAGE=$1 57 | docker pull $1 || 58 | notify_failure "failed to pull heat agent docker image: $1" 59 | docker run \ 60 | --name heat-agent \ 61 | --detach \ 62 | --privileged \ 63 | --ipc=host \ 64 | --net=host \ 65 | --pid=host \ 66 | -e HOST=/host \ 67 | -e NAME=rhel-tools \ 68 | --volume /run:/run \ 69 | --volume /var/log:/var/log \ 70 | --volume /etc/localtime:/etc/localtime \ 71 | --volume ~/.ssh:/root/.ssh \ 72 | --volume /:/host \ 73 | --volume /etc/ansible:/etc/ansible \ 74 | --volume /var/lib/heat-cfntools:/var/lib/heat-cfntools \ 75 | --volume /var/lib/os-apply-config:/var/lib/os-apply-config \ 76 | $1 || 77 | notify_failure "failed to run heat-agent docker image: $1" 78 | } 79 | 80 | # ---------------------------------------------------------------------------- 81 | # Functions for RPM based systems 82 | # ---------------------------------------------------------------------------- 83 | 84 | function verify_os_collect_config_is_installed() { 85 | systemctl is-enabled os-collect-config || 86 | notify_failure "os-collect-config service is not installed or enabled" 87 | } 88 | 89 | function install_epel_repos_disabled() { 90 | # EPEL_RELEASE=$1 - hyphen delimiter 91 | # NOTE: install the right Ansible version on RHEL7.1 and Centos 7.1: 92 | local EPEL_REPO_URL=http://dl.fedoraproject.org/pub/epel/7/x86_64 93 | if ! rpm -q epel-release-$1 94 | then 95 | yum -y install \ 96 | ${EPEL_REPO_URL}/e/epel-release-$1.noarch.rpm || 97 | echo "Failed to find epel-release-$1. Installing epel-release-latest-7." 98 | fi 99 | 100 | # If it fails, get the latest 101 | if ! rpm -q epel-release-$1 102 | then 103 | yum -y install \ 104 | https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm || 105 | notify_failure "could not install EPEL release $1 NOR the latest." 106 | fi 107 | sed -i -e "s/^enabled=1/enabled=0/" /etc/yum.repos.d/epel.repo 108 | } 109 | 110 | # 111 | # Check out the Ansible playbooks from a Git repository 112 | # 113 | function clone_openshift_ansible() { 114 | # GIT_URL=$1 115 | # GIT_REV=$2 116 | git clone "$1" /usr/share/ansible/openshift-ansible \ 117 | || notify_failure "could not clone openshift-ansible: $1" 118 | cd /usr/share/ansible/openshift-ansible 119 | git checkout "$2" || 120 | notify_failure "could not check out openshift-ansible rev $2" 121 | } 122 | 123 | sudo_enable_from_ssh 124 | 125 | if is_atomic_host 126 | then 127 | systemd_docker_disable_storage_setup 128 | 129 | docker_set_storage_device $VOLUME_ID 130 | 131 | systemctl enable lvm2-lvmetad 132 | systemctl start lvm2-lvmetad 133 | 134 | docker-storage-setup || notify_failure "docker storage setup failed" 135 | 136 | systemctl start docker --ignore-dependencies || 137 | notify_failure "docker service failed to start" 138 | 139 | start_heat_agent_container $HEAT_AGENT_CONTAINER_IMAGE 140 | 141 | else 142 | verify_os_collect_config_is_installed 143 | 144 | retry yum -y install git httpd-tools || 145 | notify_failure "could not install httpd-tools" 146 | 147 | # ensure openssl is installed on CentOS 148 | retry yum -y install pyOpenSSL || 149 | notify_failure "could not install pyOpenSSL" 150 | 151 | extra_opts="" 152 | # Install the EPEL repository, but leave it disabled 153 | # Used only to install Ansible 154 | if [ -e /etc/centos-release ]; then 155 | install_epel_repos_disabled $EPEL_RELEASE_VERSION 156 | extra_opts="--enablerepo=epel" 157 | fi 158 | if [ -z "$ANSIBLE_VERSION" ] ; then 159 | ANSIBLE_RPM="ansible" 160 | else 161 | ANSIBLE_RPM="ansible-$ANSIBLE_VERSION" 162 | fi 163 | retry yum -y $extra_opts install ${ANSIBLE_RPM} || 164 | notify_failure "could not install ansible" 165 | 166 | if [ -n "$OPENSHIFT_ANSIBLE_GIT_URL" -a -n "$OPENSHIFT_ANSIBLE_GIT_REV" ]; then 167 | clone_openshift_ansible \ 168 | $OPENSHIFT_ANSIBLE_GIT_URL \ 169 | $OPENSHIFT_ANSIBLE_GIT_REV 170 | else 171 | retry yum -y install openshift-ansible-roles openshift-ansible-playbooks \ 172 | || notify_failure "could not install openshift-ansible" 173 | fi 174 | fi 175 | 176 | notify_success "OpenShift node has been prepared for running ansible." 177 | -------------------------------------------------------------------------------- /fragments/bastion-node-add.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | # ENVVARS 6 | # node_etc_host = " " 7 | 8 | # 9 | # FILES 10 | # /etc/hosts - the host database file on RPM based host 11 | # 12 | 13 | # ============================================================================ 14 | # MAIN 15 | # ============================================================================ 16 | 17 | NODESFILE=/var/lib/ansible/${node_type}_list 18 | mkdir -p /var/lib/ansible/ 19 | touch $NODESFILE 20 | grep -q "$node_hostname" $NODESFILE || echo $node_hostname >> $NODESFILE 21 | -------------------------------------------------------------------------------- /fragments/bastion-node-cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # ENVVARS 3 | # node_name = "" 4 | 5 | # Exit on fail or bad VAR expansion 6 | set -eux 7 | 8 | # ============================================================================ 9 | # MAIN 10 | # ============================================================================ 11 | 12 | # used by ansible for setting ControlPath ssh param 13 | export HOME=/root 14 | 15 | INVENTORY=/var/lib/ansible/inventory 16 | 17 | # evacuate all the pods and remove the node from the openshift service 18 | # using the first master 19 | if [ -e $INVENTORY -a "$node_type" == node ]; then 20 | export ANSIBLE_ROLES_PATH=/usr/share/ansible/openshift-ansible/roles 21 | export ANSIBLE_HOST_KEY_CHECKING=False 22 | 23 | ansible-playbook -vvvv -e node=$node_name \ 24 | --inventory /var/lib/ansible/inventory \ 25 | /var/lib/ansible/playbooks/scaledown.yml &>> /var/log/ansible-scaledown.$$ || true 26 | fi 27 | 28 | # remove from the local list 29 | NODESFILE=/var/lib/ansible/${node_type}_list 30 | if [ -e $NODESFILE ]; then 31 | cp $NODESFILE{,.bkp} 32 | grep -v "$node_name" ${NODESFILE}.bkp > $NODESFILE || true 33 | fi 34 | 35 | # unregister the node if registered with subscription-manager 36 | [ -e $INVENTORY ] && ansible $node_name -m shell \ 37 | -u $ssh_user --sudo -i $INVENTORY \ 38 | -a "subscription-manager unregister && subscription-manager clean" || true 39 | 40 | echo "Deleted node $node_name" 41 | -------------------------------------------------------------------------------- /fragments/ca_cert.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Register with subscription manager and enable required RPM respositories 4 | # 5 | # ENVVARS: 6 | # CA_CERT - a ca certificate to be added to trust chain 7 | 8 | # Exit on command fail 9 | set -eu 10 | set -x 11 | 12 | # Return the final non-zero exit code of a failed pipe (or 0 for success) 13 | set -o pipefail 14 | 15 | # ============================================================================= 16 | # MAIN 17 | # ============================================================================= 18 | 19 | if [ -f /etc/pki/ca-trust/source/anchors/ca.crt ] ; then 20 | update-ca-trust enable 21 | update-ca-trust extract 22 | else 23 | exit 0 24 | fi 25 | -------------------------------------------------------------------------------- /fragments/common_functions.sh: -------------------------------------------------------------------------------- 1 | # Send success status to OpenStack WaitCondition 2 | function notify_success() { 3 | $WC_NOTIFY --data-binary \ 4 | "{\"status\": \"SUCCESS\", \"reason\": \"$1\", \"data\": \"$1\"}" 5 | exit 0 6 | } 7 | 8 | # Send success status to OpenStack WaitCondition 9 | function notify_failure() { 10 | $WC_NOTIFY --data-binary \ 11 | "{\"status\": \"FAILURE\", \"reason\": \"$1\", \"data\": \"$1\"}" 12 | exit 1 13 | } 14 | 15 | function sudo_enable_from_ssh() { 16 | # Required for SSH pipelining 17 | sed -i "/requiretty/s/^/#/" /etc/sudoers 18 | } 19 | 20 | # All hosts must have an external disk device (cinder?) for docker storage 21 | function docker_set_storage_device() { 22 | # By default the cinder volume is mapped to virtio-first_20_chars of cinder 23 | # volume ID under /dev/disk/by-id/ 24 | devlink=/dev/disk/by-id/virtio-${1:0:20} 25 | docker_dev="" 26 | if ! [ -e "$devlink" ];then 27 | # It might be that disk is not present under /dev/disk/by-id/ 28 | # https://ask.openstack.org/en/question/50882/are-devdiskby-id-symlinks-unreliable/ 29 | # then just find first disk which has no partition 30 | for dev in /dev/vdb /dev/vda; do 31 | if [ -b $dev -a ! -b ${dev}1 ]; then 32 | docker_dev=$dev 33 | break 34 | fi 35 | done 36 | else 37 | # docker-storage-setup can not deal with /dev/disk/by-id/ symlinks 38 | docker_dev=$(readlink -f $devlink) 39 | fi 40 | 41 | if ! [ -b "$docker_dev" ]; then 42 | notify_failure "docker volume device $docker_dev does not exist" 43 | fi 44 | 45 | cat << EOF > /etc/sysconfig/docker-storage-setup 46 | DEVS=$docker_dev 47 | VG=docker-vg 48 | EOF 49 | } 50 | 51 | function docker_set_storage_quota() { 52 | echo "EXTRA_DOCKER_STORAGE_OPTIONS=\"--storage-opt dm.basesize=$1G\"" \ 53 | >> /etc/sysconfig/docker-storage-setup 54 | echo "DOCKER_STORAGE_OPTIONS=\"--storage-opt dm.basesize=$1G\"" \ 55 | > /etc/sysconfig/docker-storage 56 | } 57 | -------------------------------------------------------------------------------- /fragments/common_openshift_functions.sh: -------------------------------------------------------------------------------- 1 | # workaround for openshift-ansible - symlinks are created in /usr/local/bin but 2 | # this path is not by default in sudo secure_path so ansible fails 3 | function sudo_set_secure_path() { 4 | # SECURE_PATH=$1 5 | sed -i "/secure_path = /s|=.*|= $1|" /etc/sudoers 6 | } 7 | 8 | # 9 | # - docker --------------------------------------------------------- 10 | # 11 | 12 | function docker_install_and_enable() { 13 | if ! rpm -q docker 14 | then 15 | retry yum -y install docker || notify_failure "could not install docker" 16 | fi 17 | systemctl enable docker 18 | } 19 | 20 | 21 | function docker_version() { 22 | # MAJ_MIN=$1 - 'major' or 'minor' 23 | local version=$(rpm -q docker --qf '%{VERSION}') 24 | [ $1 = "major" ] && echo $version | cut -d. -f1 && return 25 | [ $1 = "minor" ] && echo $version | cut -d. -f2 && return 26 | echo $version 27 | } 28 | 29 | function docker_set_trusted_registry() { 30 | # TRUSTED_REGISTRY=$1 31 | echo "INSECURE_REGISTRY='--insecure-registry $1'" >> /etc/sysconfig/docker 32 | } 33 | 34 | # 35 | # - systemd --------------------------------------------------------- 36 | # 37 | 38 | function systemd_add_docker_socket() { 39 | # Workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1289851 40 | # can be removed once docker 1.10 is released 41 | 42 | # make a "local" copy of the docker service unit file 43 | cp /lib/systemd/system/docker.service /etc/systemd/system/docker.service 44 | # Add a new unit file requirement 45 | sed -i '/Wants=docker-storage-setup.service/aRequires=docker.socket' \ 46 | /etc/systemd/system/docker.service 47 | 48 | # create the docker socket unit file 49 | cat << EOF > /etc/systemd/system/docker.socket 50 | [Unit] 51 | Description=Docker Socket for the API 52 | PartOf=docker.service 53 | 54 | [Socket] 55 | ListenStream=/var/run/docker.sock 56 | SocketMode=0660 57 | SocketUser=root 58 | SocketGroup=root 59 | 60 | [Install] 61 | WantedBy=sockets.target 62 | EOF 63 | 64 | # Force re-read of systemd configuration and apply 65 | systemctl daemon-reload 66 | } 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /fragments/host-update.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Ensure that the host OS packages are current 4 | # 5 | # On an Atomic host, upgrade the host tree 6 | # On traditional host, update RPMs 7 | # 8 | # ENVVARS: 9 | # WC_NOTIFY: a curl CLI fragment to notify OpenStack Heat of the completion 10 | # status of the script. 11 | # Provided by an OpenStack WaitCondition object 12 | 13 | # Exit on fail, bad VAR expansion 14 | set -eux 15 | # return the last (right most) non-zero status from pipes (or 0 on success) 16 | set -o pipefail 17 | 18 | source /usr/local/share/openshift-on-openstack/common_functions.sh 19 | 20 | # ============================================================================ 21 | # MAIN 22 | # ============================================================================ 23 | 24 | [ "$SYSTEM_UPDATE" = "True" ] || exit 0 25 | 26 | # Check for Atomic Host 27 | if [ -e /run/ostree-booted ] 28 | then 29 | # Update the OS tree 30 | atomic host upgrade || notify_failure "failed to run 'atomic host upgrade'" 31 | else 32 | # Update using traditional RPMs 33 | retry yum install -y deltarpm || notify_failure "could not install deltarpm" 34 | retry yum -y update || notify_failure "could not update RPMs" 35 | fi 36 | -------------------------------------------------------------------------------- /fragments/ifcfg-eth: -------------------------------------------------------------------------------- 1 | DEVICE="$IFNAME" 2 | BOOTPROTO="dhcp" 3 | BOOTPROTOv6="dhcp" 4 | ONBOOT="yes" 5 | TYPE="Ethernet" 6 | USERCTL="yes" 7 | PEERDNS="no" 8 | IPV6INIT="yes" 9 | PERSISTENT_DHCLIENT="1" 10 | -------------------------------------------------------------------------------- /fragments/infra-boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Prepare an OpenShift node VM for configuration by Ansible 4 | # 5 | # CONSTANTS 6 | # 7 | # The device to mount to store Docker images and containers 8 | VOLUME_ID=$DOCKER_VOLUME_ID 9 | 10 | # Exit on first fail or on reference to an undefined variable 11 | set -eu 12 | set -x 13 | 14 | # Return the exit code of the last non-zero command in a pipe (or 0 on success) 15 | set -o pipefail 16 | 17 | source /usr/local/share/openshift-on-openstack/common_functions.sh 18 | source /usr/local/share/openshift-on-openstack/common_openshift_functions.sh 19 | 20 | 21 | ifup eth1 22 | 23 | sudo_set_secure_path "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin" 24 | sudo_enable_from_ssh 25 | 26 | docker_install_and_enable 27 | docker_set_trusted_registry 0.0.0.0/0 28 | 29 | if [ $(docker_version major) -lt 2 -a $(docker_version minor) -lt 10 ] 30 | then 31 | systemd_add_docker_socket 32 | fi 33 | 34 | # lvmetad allows new volumes to be configured and made available as they appear 35 | # This is good for dynamically created volumes in a cloud provider service 36 | systemctl enable lvm2-lvmetad 37 | systemctl start lvm2-lvmetad 38 | 39 | if [ -n "$VOLUME_ID" ] 40 | then 41 | docker_set_storage_device $VOLUME_ID 42 | fi 43 | 44 | notify_success "OpenShift node has been prepared for running docker." 45 | -------------------------------------------------------------------------------- /fragments/lb-boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Prepare the Load Balancer host to run ansible for host configuration 4 | # 5 | # ENVVARS 6 | # WC_NOTIFY: A curl query prefix to provide status to OpenStack WaitCondition 7 | 8 | # Exit immediately on error or on reference to an undefined variable 9 | set -eu 10 | set -x 11 | 12 | # Exit with return code of the last non-zero part of a pipe (or 0 for success) 13 | set -o pipefail 14 | 15 | source /usr/local/share/openshift-on-openstack/common_functions.sh 16 | 17 | # ============================================================================== 18 | # MAIN 19 | # ============================================================================== 20 | 21 | # workaround for openshift-ansible - Add /usr/local/bin to sudo PATH 22 | # symlinks are created in /usr/local/bin but this path is not by 23 | # default in sudo secure_path so ansible fails 24 | sed -i '/secure_path =/s|=.*|= /sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin|' \ 25 | /etc/sudoers 26 | 27 | # Disable requiretty: allow sudo via SSH 28 | sed -i "/^Defaults.*requiretty/s/^/#/" /etc/sudoers 29 | 30 | notify_success "OpenShift node has been prepared for running ansible." 31 | -------------------------------------------------------------------------------- /fragments/master-boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Prepare an OpenShift node VM for configuration by Ansible 4 | # 5 | # ENVVARS 6 | # WC_NOTIFY - a curl URL from an OpenStack WaitCondition 7 | # send status to OpenStack 8 | # 9 | # CONSTANTS 10 | # 11 | # The device to mount to store Docker images and containers 12 | VOLUME_ID=$DOCKER_VOLUME_ID 13 | 14 | # Exit on first fail or on reference to an undefined variable 15 | set -eu 16 | set -x 17 | 18 | # Return the exit code of the last non-zero command in a pipe (or 0 on success) 19 | set -o pipefail 20 | 21 | source /usr/local/share/openshift-on-openstack/common_functions.sh 22 | source /usr/local/share/openshift-on-openstack/common_openshift_functions.sh 23 | 24 | 25 | ifup eth1 26 | 27 | sudo_set_secure_path "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin" 28 | sudo_enable_from_ssh 29 | 30 | docker_install_and_enable 31 | docker_set_trusted_registry 0.0.0.0/0 32 | 33 | if [ $(docker_version major) -lt 2 -a $(docker_version minor) -lt 10 ] 34 | then 35 | systemd_add_docker_socket 36 | fi 37 | 38 | docker_set_storage_device $VOLUME_ID 39 | 40 | # lvmetad allows new volumes to be configured and made available as they appear 41 | # This is good for dynamically created volumes in a cloud provider service 42 | systemctl enable lvm2-lvmetad 43 | systemctl start lvm2-lvmetad 44 | 45 | /usr/bin/docker-storage-setup || notify_failure "Docker Storage setup failed" 46 | 47 | notify_success "OpenShift node has been prepared for running docker." 48 | -------------------------------------------------------------------------------- /fragments/merge_dict.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | import collections 4 | import json 5 | import yaml 6 | import os 7 | import sys 8 | 9 | 10 | def dict_merge(dct, merge_dct): 11 | """ Recursive dict merge. Inspired by :meth:``dict.update()``, instead of 12 | updating only top-level keys, dict_merge recurses down into dicts nested 13 | to an arbitrary depth, updating keys. The ``merge_dct`` is merged into 14 | ``dct``. 15 | :param dct: dict onto which the merge is executed 16 | :param merge_dct: dct merged into dct 17 | :return: None 18 | """ 19 | for k, v in merge_dct.iteritems(): 20 | if (k in dct and isinstance(dct[k], dict) 21 | and isinstance(merge_dct[k], collections.Mapping)): 22 | dict_merge(dct[k], merge_dct[k]) 23 | else: 24 | dct[k] = merge_dct[k] 25 | 26 | 27 | def load_file(filename): 28 | name, extension = os.path.splitext(filename.lower()) 29 | extension = extension.lstrip('.') 30 | if extension == "json": 31 | return json.load(open(filename)) 32 | elif extension in ['yml', 'yaml']: 33 | return yaml.load(open(filename)) 34 | raise Exception("Invalid format " + extension) 35 | 36 | 37 | def save_file(dct, filename): 38 | name, extension = os.path.splitext(filename.lower()) 39 | extension = extension.lstrip('.') 40 | if extension == "json": 41 | return json.dump(dct, open(filename, 'w')) 42 | elif extension in ['yml', 'yaml']: 43 | return yaml.safe_dump(dct, open(filename, 'w')) 44 | raise Exception("Invalid format " + extension) 45 | 46 | 47 | if len(sys.argv) < 3: 48 | print("Usage: %s infile [infile...] outfile" % (sys.argv[0],)) 49 | sys.exit(1) 50 | 51 | merged = load_file(sys.argv[1]) 52 | for filename in sys.argv[2:-1]: 53 | dict_merge(merged, load_file(filename)) 54 | 55 | if os.path.exists(sys.argv[-1]): 56 | dict_merge(merged, load_file(sys.argv[-1])) 57 | 58 | save_file(merged, sys.argv[-1]) 59 | -------------------------------------------------------------------------------- /fragments/node-boot.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Prepare an OpenShift node VM for configuration by Ansible 4 | # 5 | # CONSTANTS 6 | # 7 | # The device to mount to store Docker images and containers 8 | VOLUME_ID=$DOCKER_VOLUME_ID 9 | 10 | # Exit on first fail or on reference to an undefined variable 11 | set -eu 12 | set -x 13 | 14 | # Return the exit code of the last non-zero command in a pipe (or 0 on success) 15 | set -o pipefail 16 | 17 | source /usr/local/share/openshift-on-openstack/common_functions.sh 18 | source /usr/local/share/openshift-on-openstack/common_openshift_functions.sh 19 | 20 | 21 | ifup eth1 22 | 23 | sudo_set_secure_path "/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/bin" 24 | sudo_enable_from_ssh 25 | 26 | docker_install_and_enable 27 | docker_set_trusted_registry 0.0.0.0/0 28 | 29 | if [ $(docker_version major) -lt 2 -a $(docker_version minor) -lt 10 ] 30 | then 31 | systemd_add_docker_socket 32 | fi 33 | 34 | # lvmetad allows new volumes to be configured and made available as they appear 35 | # This is good for dynamically created volumes in a cloud provider service 36 | systemctl enable lvm2-lvmetad 37 | systemctl start lvm2-lvmetad 38 | 39 | if [ -n "$VOLUME_ID" ] 40 | then 41 | docker_set_storage_device $VOLUME_ID 42 | fi 43 | 44 | if [ -n "$CONTAINER_QUOTA" ] && [ "$CONTAINER_QUOTA" != 0 ] 45 | then 46 | docker_set_storage_quota $CONTAINER_QUOTA 47 | fi 48 | 49 | notify_success "OpenShift node has been prepared for running docker." 50 | -------------------------------------------------------------------------------- /fragments/retry.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Make 5 attempts to execute a command at 2 sec intervals or until passed 4 | # 5 | 6 | for i in {1..5}; do 7 | $@ && exit || true 8 | sleep 2 9 | done 10 | exit 1 11 | -------------------------------------------------------------------------------- /fragments/rhn-register.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Register with subscription manager and enable required RPM respositories 4 | # 5 | # ENVVARS: 6 | # RHN_USERNAME - a valid RHN username with access to OpenShift entitlements 7 | # RHN_PASSWORD - password for the RHN user 8 | # POOL_ID - OPTIONAL - a specific pool with OpenShift entitlements 9 | # EXTRA_POOL_IDS - OPTIONAL - additional pools 10 | # SAT6_HOSTNAME - The hostname of the Sat6 server to register to 11 | # SAT6_ORGANIZAION - An Organization string to aid grouping of hosts 12 | # SAT6_ACTIVATIONKEY - A string used to authorize the registration 13 | # 14 | # OCP_VERSION - the version of the OS repo to enable 15 | OCP_VERSION=${OCP_VERSION:-"3.2"} 16 | 17 | # Exit on command fail 18 | set -eu 19 | set -x 20 | 21 | # Return the final non-zero exit code of a failed pipe (or 0 for success) 22 | set -o pipefail 23 | 24 | function use_satellite6() { 25 | [ -n "$SAT6_HOSTNAME" ] 26 | } 27 | 28 | function use_rhn() { 29 | [ -n "$RHN_USERNAME" -a -n "$RHN_PASSWORD" ] 30 | } 31 | 32 | function register_rhn() { 33 | # RHN_USERNAME=$1 34 | # RHN_PASSWORD=$2 35 | retry subscription-manager register --username="$1" --password="$2" 36 | } 37 | 38 | function install_sat6_ca_certs() { 39 | # SAT6_HOSTNAME=$1 40 | local SAT6_KEY_RPM="katello-ca-consumer-$1" 41 | local SAT6_KEY_RPM_URL="http://${1}/pub/katello-ca-consumer-latest.noarch.rpm" 42 | 43 | if ! rpm -q --quiet $SAT6_KEY_RPM ; then 44 | retry yum -y localinstall $SAT6_KEY_RPM_URL 45 | fi 46 | } 47 | 48 | function register_sat6() { 49 | # SAT6_ORGANIZATION=$1 50 | # SAT6_ACTIVATIONKEY=$2 51 | # register as a sat6 client 52 | retry subscription-manager register --org="$1" --activationkey="$2" 53 | } 54 | 55 | # ============================================================================= 56 | # MAIN 57 | # ============================================================================= 58 | 59 | if use_satellite6 ; then 60 | install_sat6_ca_certs $SAT6_HOSTNAME 61 | register_sat6 $SAT6_ORGANIZATION $SAT6_ACTIVATIONKEY 62 | elif use_rhn ; then 63 | register_rhn $RHN_USERNAME $RHN_PASSWORD 64 | else 65 | exit 0 66 | fi 67 | 68 | # Attach to an entitlement pool 69 | if [ -n "$POOL_ID" ]; then 70 | retry subscription-manager attach --pool $POOL_ID 71 | else 72 | retry subscription-manager attach --auto 73 | fi 74 | 75 | if [ -n "$EXTRA_POOL_IDS" ]; then 76 | retry subscription-manager attach --pool $EXTRA_POOL_IDS 77 | fi 78 | 79 | # Select the YUM repositories to use 80 | retry subscription-manager repos --disable="*" 81 | retry subscription-manager repos \ 82 | --enable="rhel-7-server-rpms" \ 83 | --enable="rhel-7-server-extras-rpms" \ 84 | --enable="rhel-7-server-optional-rpms" \ 85 | --enable="rhel-7-server-ose-$OCP_VERSION-rpms" 86 | 87 | # version 3.5+ require fast-datapath 88 | if [ $(expr "$OCP_VERSION" \> 3.4) -eq 1 ] ; then 89 | retry subscription-manager repos --enable rhel-7-fast-datapath-rpms 90 | fi 91 | 92 | # Allow RPM integrity checking 93 | rpm --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release 94 | -------------------------------------------------------------------------------- /fragments/set-extra-docker-repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | set -o pipefail 5 | 6 | CFGFILE=${CFGFILE:-"/etc/sysconfig/docker"} 7 | 8 | # Bypass this function when no additional repos are provided. 9 | [ -z "$REPOLIST" ] && exit 0 10 | 11 | registry_list='' 12 | insecure_list='' 13 | # Pull down each repo file from the URL, renaming it to avoid filename overload 14 | for repo in $REPOLIST; do 15 | insecure=false 16 | if [[ "$repo" == *"#insecure" ]]; then 17 | insecure=true 18 | repo=${repo%#insecure} 19 | fi 20 | registry_list="$registry_list --add-registry $repo" 21 | $insecure && insecure_list="$insecure_list --insecure-registry $repo" 22 | done 23 | 24 | if [ -n "$registry_list" ]; then 25 | if grep -q "^ADD_REGISTRY='\(.*\)'" $CFGFILE; then 26 | sed -i "s/^ADD_REGISTRY='\(.*\)'/ADD_REGISTRY='\1 $registry_list'/" $CFGFILE 27 | else 28 | echo "ADD_REGISTRY='$registry_list'" >> $CFGFILE 29 | fi 30 | fi 31 | 32 | if [ -n "$insecure_list" ]; then 33 | if grep -q "^INSECURE_REGISTRY='\(.*\)'" $CFGFILE; then 34 | sed -i "s/^INSECURE_REGISTRY='\(.*\)'/INSECURE_REGISTRY='\1 $insecure_list'/" $CFGFILE 35 | else 36 | echo "INSECURE_REGISTRY='$insecure_list'" >> $CFGFILE 37 | fi 38 | fi 39 | -------------------------------------------------------------------------------- /fragments/set-extra-repos.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # Download and add a set of additional YUM repo files 4 | # Apply a sum to each filename so that repo files from different sources but 5 | # with the same name (ie "testing.repo") will not overwrite each other. 6 | # 7 | # ENVVARS 8 | # 9 | # REPOLIST - a whitespace delimited list of URLs for YUM repo files. 10 | # 11 | 12 | set -eux 13 | set -o pipefail 14 | 15 | # The target location for downloaded repo files. Default to system location 16 | REPODIR=${REPODIR:-"/etc/yum.repos.d"} 17 | 18 | # Bypass this function when no additional repos are provided. 19 | [ -z "$REPOLIST" ] && exit 0 20 | 21 | # Pull down each repo file from the URL, renaming it to avoid filename overload 22 | for repofile_url in $REPOLIST; do 23 | # Create a path from the URL file and a hash to avoid conflict 24 | url_filename=$(basename $repofile_url) 25 | url_checksum=$(echo "$repofile_url" | md5sum | cut -f1 -d' ') 26 | curl -L -o ${REPODIR}/${url_filename}-${url_checksum}.repo $repofile_url 27 | done 28 | -------------------------------------------------------------------------------- /fragments/tune-ansible.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # set ansible configuration values to optimize ssh and fact gathering 4 | # 5 | 6 | # Exit on single command fail, or undefined variable reference 7 | set -eux 8 | 9 | # Return the last non-zero exit code from a pipe (or zero for success) 10 | set -o pipefail 11 | 12 | ANSIBLE_CFG=/etc/ansible/ansible.cfg 13 | 14 | # Make a single change to the local Ansible configuration file 15 | function set_ansible_configuration() { 16 | # SECTION=$1 17 | # OPTION=$2 18 | # VALUE=$3 19 | 20 | # Run a local command to modify the ansible configuration itself 21 | ansible all --connection=local -i "localhost," -m ini_file \ 22 | -a "dest=${ANSIBLE_CFG} section='$1' option='$2' value='$3' state=present" 23 | } 24 | 25 | # ============================================================================ 26 | # MAIN 27 | # ============================================================================ 28 | 29 | # Enable re-use of SSH connections 30 | # http://docs.ansible.com/ansible/intro_configuration.html#pipelining 31 | set_ansible_configuration ssh_connection "pipelining" "True" 32 | 33 | # Extend the connection idle timeout to 10 minutes 34 | # http://docs.ansible.com/ansible/intro_configuration.html#ssh-args 35 | set_ansible_configuration ssh_connection "ssh_args" "-o ControlMaster=auto -o ControlPersist=600s" 36 | 37 | # Shorten the socket path 38 | # http://docs.ansible.com/ansible/intro_configuration.html#control-path 39 | set_ansible_configuration ssh_connection "control_path" '%(directory)s/%%h-%%r' 40 | 41 | # Force re-gather facts for each new play (execution) 42 | # http://docs.ansible.com/ansible/intro_configuration.html#gathering 43 | set_ansible_configuration defaults "gathering" "implicit" 44 | 45 | # Cache facts in JSON format in a tmp dir and save them 10 minutes 46 | # http://docs.ansible.com/ansible/playbooks_variables.html#fact-caching 47 | set_ansible_configuration defaults \ 48 | "fact_caching_connection" \ 49 | "/tmp/ansible/facts" 50 | set_ansible_configuration defaults "fact_caching_timeout" "600" 51 | set_ansible_configuration defaults "fact_caching" "jsonfile" 52 | -------------------------------------------------------------------------------- /fragments/update_dns.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # 3 | # Add an A record to a DNS server via RFC 2136 dynamic update 4 | # 5 | 6 | from __future__ import print_function 7 | 8 | import os,sys 9 | from argparse import ArgumentParser 10 | 11 | # python2-dns 12 | import dns.query 13 | import dns.tsigkeyring 14 | import dns.update 15 | import dns.rcode 16 | 17 | 18 | def add_a_record(server, zone, key, name, address, ttl=300): 19 | 20 | # make input zones absolute 21 | #zone = zone + '.' if not zone.endswith('.') 22 | keyring = dns.tsigkeyring.from_text({'update-key': key}) 23 | update = dns.update.Update(zone, keyring=keyring) 24 | update.replace(name, ttl, 'a', address) 25 | response = dns.query.tcp(update, server) 26 | return response 27 | 28 | 29 | if __name__ == "__main__": 30 | 31 | def process_arguments(): 32 | parser = ArgumentParser() 33 | parser.add_argument("-s", "--server", type=str, default="127.0.0.1") 34 | parser.add_argument("-z", "--zone", type=str, default="example.com") 35 | parser.add_argument("-k", "--key", type=str, default=os.getenv("DNS_KEY")) 36 | parser.add_argument("name", type=str) 37 | parser.add_argument("address", type=str) 38 | parser.add_argument("-t", "--ttl", type=str, default=300) 39 | return parser.parse_args() 40 | 41 | opts = process_arguments() 42 | r = add_a_record(opts.server, opts.zone, opts.key, opts.name, opts.address, opts.ttl) 43 | 44 | if r.rcode() != dns.rcode.NOERROR: 45 | print("ERROR: update failed: {}".format(dns.rcode.to_text(r.rcode()))) 46 | sys.exit(r.rcode()) 47 | 48 | -------------------------------------------------------------------------------- /graphics/architecture.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/redhat-openstack/openshift-on-openstack/29a1d3a5fba13f2189f9bd79956058980c7748f1/graphics/architecture.png -------------------------------------------------------------------------------- /heat-docker-agent/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM centos:centos7 2 | MAINTAINER jprovazn@redhat.com 3 | ENV container docker 4 | 5 | ADD configure_container_agent.sh /tmp/ 6 | RUN /tmp/configure_container_agent.sh 7 | 8 | #create volumes to share the host directories 9 | #VOLUME [ "/var/lib/cloud"] 10 | #VOLUME [ "/var/lib/heat-cfntools" ] 11 | 12 | #set DOCKER_HOST environment variable that docker-compose would use 13 | ENV DOCKER_HOST unix:///var/run/docker.sock 14 | 15 | CMD /usr/bin/os-collect-config 16 | -------------------------------------------------------------------------------- /heat-docker-agent/configure_container_agent.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eux 4 | 5 | yum update -y && yum clean all 6 | yum -y install centos-release-openstack-liberty 7 | yum install -y net-tools bind-utils git python-pip \ 8 | sysvinit-tools openstack-heat-templates \ 9 | os-collect-config os-apply-config \ 10 | os-refresh-config dib-utils python-pip \ 11 | python-docker-py python-yaml python-zaqarclient 12 | 13 | # required for openshift registry setup (using cinder backend) 14 | yum -y install python-novaclient python-cinderclient 15 | 16 | # openshift-ansible 17 | #RUN yum -y install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 18 | yum -y install http://dl.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-7.noarch.rpm || yum -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 19 | yum install -y --enablerepo epel ansible 20 | 21 | #git clone --single-branch --branch master https://github.com/openshift/openshift-ansible.git /usr/share/ansible/openshift-ansible/ 22 | git clone --single-branch --branch master https://github.com/jprovaznik/openshift-ansible.git /usr/share/ansible/openshift-ansible/ 23 | 24 | #RUN yum -y install docker 25 | yum -y install http://mirror.centos.org/centos/7/extras/x86_64/Packages/docker-1.8.2-10.el7.centos.x86_64.rpm 26 | yum -y install pyOpenSSL 27 | 28 | pip install dpath functools32 29 | 30 | # os-collect-config 31 | orc_scripts=/usr/libexec/os-refresh-config 32 | heat_templates=/usr/share/openstack-heat-templates 33 | oac_templates=/usr/libexec/os-apply-config/templates 34 | mkdir -p /var/lib/heat-config/hooks 35 | ln -s $heat_templates/software-config/elements/heat-config/bin/heat-config-notify /usr/bin/ 36 | ln -s $heat_templates/software-config/heat-container-agent/scripts/hooks/script /var/lib/heat-config/hooks/ 37 | mkdir -p $orc_scripts/configure.d 38 | ln -s $heat_templates/software-config/elements/heat-config/os-refresh-config/configure.d/55-heat-config $orc_scripts/configure.d/ 39 | 40 | mkdir -p $oac_templates/var/run/heat-config 41 | echo "{{deployments}}" > $oac_templates/var/run/heat-config/heat-config 42 | 43 | # template for building os-collect-config.conf for polling heat 44 | mkdir -p $oac_templates/etc/ 45 | cat <$oac_templates/etc/os-collect-config.conf 46 | [DEFAULT] 47 | {{^os-collect-config.command}} 48 | command = os-refresh-config 49 | {{/os-collect-config.command}} 50 | {{#os-collect-config}} 51 | {{#command}} 52 | command = {{command}} 53 | {{/command}} 54 | {{#polling_interval}} 55 | polling_interval = {{polling_interval}} 56 | {{/polling_interval}} 57 | {{#cachedir}} 58 | cachedir = {{cachedir}} 59 | {{/cachedir}} 60 | {{#collectors}} 61 | collectors = {{collectors}} 62 | {{/collectors}} 63 | 64 | {{#cfn}} 65 | [cfn] 66 | {{#metadata_url}} 67 | metadata_url = {{metadata_url}} 68 | {{/metadata_url}} 69 | stack_name = {{stack_name}} 70 | secret_access_key = {{secret_access_key}} 71 | access_key_id = {{access_key_id}} 72 | path = {{path}} 73 | {{/cfn}} 74 | 75 | {{#heat}} 76 | [heat] 77 | auth_url = {{auth_url}} 78 | user_id = {{user_id}} 79 | password = {{password}} 80 | project_id = {{project_id}} 81 | stack_id = {{stack_id}} 82 | resource_name = {{resource_name}} 83 | {{/heat}} 84 | 85 | {{#request}} 86 | [request] 87 | {{#metadata_url}} 88 | metadata_url = {{metadata_url}} 89 | {{/metadata_url}} 90 | {{/request}} 91 | 92 | {{/os-collect-config}} 93 | EOF 94 | 95 | # os-refresh-config script for running os-apply-config 96 | cat <$orc_scripts/configure.d/20-os-apply-config 97 | #!/bin/bash 98 | set -ue 99 | 100 | exec os-apply-config 101 | EOF 102 | chmod 700 $orc_scripts/configure.d/20-os-apply-config 103 | -------------------------------------------------------------------------------- /infra.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | 4 | description: > 5 | OpenShift Master 6 | 7 | 8 | parameters: 9 | 10 | # What version of OpenShift Container Platform to install 11 | # This value is used to select the RPM repo for the OCP release to install 12 | ocp_version: 13 | type: string 14 | description: > 15 | The version of OpenShift Container Platform to deploy 16 | 17 | key_name: 18 | description: > 19 | A pre-submitted SSH key to access the VM hosts 20 | type: string 21 | constraints: 22 | - custom_constraint: nova.keypair 23 | 24 | image: 25 | description: > 26 | Select a base image to use for the infra servers 27 | type: string 28 | constraints: 29 | - custom_constraint: glance.image 30 | 31 | flavor: 32 | description: > 33 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 34 | type: string 35 | constraints: 36 | - custom_constraint: nova.flavor 37 | 38 | docker_volume_size: 39 | description: > 40 | size of a cinder volume in GB to allocate to docker for container/image 41 | storage 42 | type: number 43 | default: 25 44 | 45 | rhn_username: 46 | description: > 47 | A valid user with entitlements to RHEL and OpenShift software repos 48 | type: string 49 | 50 | rhn_password: 51 | description: > 52 | The password for the RHN user 53 | type: string 54 | hidden: true 55 | 56 | # Red Hat satellite subscription parameters 57 | sat6_hostname: 58 | type: string 59 | description: > 60 | The hostname of the Satellite 6 server which will provide software updates 61 | default: '' 62 | 63 | sat6_organization: 64 | type: string 65 | description: > 66 | An organization string provided by Sat6 to group subscriptions 67 | default: '' 68 | 69 | sat6_activationkey: 70 | type: string 71 | description: > 72 | An activation key string provided by Sat6 to enable subscriptions 73 | default: '' 74 | 75 | rhn_pool: 76 | description: > 77 | A subscription pool containing the RHEL and OpenShift software repos 78 | OPTIONAL 79 | type: string 80 | hidden: true 81 | 82 | extra_rhn_pools: 83 | type: comma_delimited_list 84 | description: List of rhn pools which will be installed on each node. 85 | default: '' 86 | 87 | hostname: 88 | description: > 89 | A string to identify infra hostnames. 90 | type: string 91 | constraints: 92 | - allowed_pattern: '[a-z0-9\-]*' 93 | description: Hostname must contain only characters [a-z0-9\-]. 94 | 95 | domain_name: 96 | description: > 97 | The DNS domain suffix. All VMs will be placed in this domain 98 | type: string 99 | 100 | ansible_public_key: 101 | description: > 102 | The SSH public key that Ansible will use to access master and node hosts 103 | This will be placed on each VM host in /root/.ssh/authorized_keys 104 | type: string 105 | 106 | ssh_user: 107 | description: > 108 | The user for SSH access to the VM hosts 109 | type: string 110 | 111 | timeout: 112 | description: Time to wait until the infra setup is ready. 113 | type: number 114 | default: 10800 115 | 116 | bastion_node: 117 | description: > 118 | The hostname of the bastion node in the deployment 119 | type: string 120 | default: '' 121 | 122 | external_network: 123 | description: > 124 | The name of the inbound access network 125 | type: string 126 | constraints: 127 | - custom_constraint: neutron.network 128 | 129 | fixed_network: 130 | description: > 131 | The name or ID of the admin and public network 132 | type: string 133 | constraints: 134 | - custom_constraint: neutron.network 135 | 136 | fixed_subnet: 137 | description: > 138 | The name or ID of the admin and public IPv4 space 139 | type: string 140 | constraints: 141 | - custom_constraint: neutron.subnet 142 | 143 | internal_network: 144 | description: > 145 | The name or ID of the internal network 146 | type: string 147 | constraints: 148 | - custom_constraint: neutron.network 149 | 150 | internal_subnet: 151 | description: > 152 | The name or ID of the internal IPv4 space 153 | type: string 154 | constraints: 155 | - custom_constraint: neutron.subnet 156 | 157 | allowed_address_pairs: 158 | description: > 159 | Allowed address pairs to apply on the ports of the nodes 160 | that host the Openshift router 161 | type: json 162 | 163 | 164 | ldap_url: 165 | description: > 166 | The LDAP url for access to the LDAP service (redundant? ML) 167 | type: string 168 | default: '' 169 | 170 | ldap_preferred_username: 171 | description: > 172 | What field to use to look up and identify users in the database 173 | type: string 174 | default: 'uid' 175 | 176 | ldap_bind_dn: 177 | description: > 178 | LDAP service access user identifier 179 | type: string 180 | default: '' 181 | 182 | ldap_bind_password: 183 | description: > 184 | LDAP service access password 185 | type: string 186 | default: '' 187 | 188 | ldap_ca: 189 | description: > 190 | The Certificate Authority file to confirm SSL encryption security 191 | type: string 192 | default: '' 193 | 194 | ldap_insecure: 195 | description: > 196 | Toggle SSL encryption for LDAP communications 197 | type: string 198 | default: false 199 | 200 | infra_server_group: 201 | description: > 202 | ID of a server group containing all of the infra hosts 203 | type: string 204 | 205 | infra_security_group: 206 | description: > 207 | ID of the network access policies for the OpenShift infra hosts 208 | type: string 209 | 210 | system_update: 211 | type: boolean 212 | 213 | extra_repository_urls: 214 | type: comma_delimited_list 215 | description: List of repository URLs which will be installed on each node. 216 | default: '' 217 | 218 | extra_docker_repository_urls: 219 | type: comma_delimited_list 220 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 221 | default: '' 222 | 223 | dns_servers: 224 | type: comma_delimited_list 225 | description: address of dns nameservers reachable in your environment 226 | 227 | dns_update_key: 228 | type: string 229 | hidden: true 230 | 231 | ca_cert: 232 | type: string 233 | description: Certificate Authority Certificate to be added to trust chain 234 | 235 | resources: 236 | 237 | # Create a network connection on the internal communications network 238 | port: 239 | type: OS::Neutron::Port 240 | properties: 241 | security_groups: 242 | - {get_param: infra_security_group} 243 | network: {get_param: fixed_network} 244 | allowed_address_pairs: {get_param: allowed_address_pairs} 245 | fixed_ips: 246 | - subnet: {get_param: fixed_subnet} 247 | replacement_policy: AUTO 248 | 249 | # Provide access to the Master on the public network 250 | floating_ip: 251 | type: OS::Neutron::FloatingIP 252 | properties: 253 | floating_network: {get_param: external_network} 254 | port_id: {get_resource: port} 255 | 256 | # Create a network connection on the internal communications network 257 | internal_port: 258 | type: OOShift::ContainerPort 259 | properties: 260 | security_group: {get_param: infra_security_group} 261 | network: {get_param: internal_network} 262 | subnet: {get_param: internal_subnet} 263 | 264 | # Create the VM instance to host the OpenShift infra service 265 | host: 266 | type: OS::Nova::Server 267 | properties: 268 | name: 269 | str_replace: 270 | template: "HOST.DOMAIN" 271 | params: 272 | HOST: {get_param: hostname} 273 | DOMAIN: {get_param: domain_name} 274 | admin_user: {get_param: ssh_user} 275 | image: {get_param: image} 276 | flavor: {get_param: flavor} 277 | key_name: {get_param: key_name} 278 | networks: 279 | - port: {get_resource: port} 280 | - port: {get_attr: [internal_port, port]} 281 | user_data_format: SOFTWARE_CONFIG 282 | user_data_update_policy: IGNORE 283 | user_data: {get_resource: init} 284 | scheduler_hints: 285 | group: {get_param: infra_server_group} 286 | 287 | # Create space for Docker containers and images 288 | docker_volume: 289 | type: OS::Cinder::Volume 290 | properties: 291 | size: {get_param: docker_volume_size} 292 | 293 | # Bind the docker storage to the VM 294 | docker_volume_attach: 295 | type: OS::Cinder::VolumeAttachment 296 | properties: 297 | instance_uuid: {get_resource: host} 298 | volume_id: {get_resource: docker_volume} 299 | 300 | # Compose configuration data into a single structure 301 | init: 302 | type: OS::Heat::MultipartMime 303 | properties: 304 | parts: 305 | - config: {get_resource: set_hostname} 306 | - config: {get_resource: included_files} 307 | - config: {get_resource: update_ca_cert} 308 | - config: {get_resource: rhn_register} 309 | - config: {get_resource: set_extra_repos} 310 | - config: {get_resource: set_extra_docker_repos} 311 | - config: {get_resource: host_update} 312 | - config: {get_resource: add_dns_record} 313 | - config: {get_resource: infra_boot} 314 | 315 | # Compose the short hostname and fully qualified domain name for the new host 316 | set_hostname: 317 | type: OS::Heat::CloudConfig 318 | properties: 319 | cloud_config: 320 | hostname: {get_param: hostname} 321 | fqdn: 322 | str_replace: 323 | template: "HOST.DOMAIN" 324 | params: 325 | HOST: {get_param: hostname} 326 | DOMAIN: {get_param: domain_name} 327 | 328 | # Compile a set of standard configuration files to provide identity and access 329 | included_files: 330 | type: OS::Heat::CloudConfig 331 | properties: 332 | cloud_config: 333 | write_files: 334 | - path: /usr/local/share/openshift-on-openstack/common_functions.sh 335 | permissions: 0755 336 | content: 337 | str_replace: 338 | params: 339 | $WC_NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] } 340 | template: {get_file: fragments/common_functions.sh} 341 | - path: /usr/local/share/openshift-on-openstack/common_openshift_functions.sh 342 | permissions: 0755 343 | content: {get_file: fragments/common_openshift_functions.sh} 344 | - path: /usr/local/bin/retry 345 | permissions: 0755 346 | content: {get_file: fragments/retry.sh} 347 | - path: /usr/local/bin/update_dns 348 | permissions: 0755 349 | content: {get_file: fragments/update_dns.py} 350 | - path: /etc/sysconfig/network-scripts/ifcfg-eth1 351 | content: 352 | str_replace: 353 | params: 354 | $IFNAME: eth1 355 | template: {get_file: fragments/ifcfg-eth} 356 | - path: /etc/pki/ca-trust/source/anchors/ca.crt 357 | permissions: 0600 358 | content: {get_param: ca_cert} 359 | ssh_authorized_keys: 360 | - {get_param: ansible_public_key} 361 | 362 | # Add CA Cert to trust chain 363 | update_ca_cert: 364 | type: OS::Heat::SoftwareConfig 365 | properties: 366 | config: {get_file: fragments/ca_cert.sh} 367 | 368 | # Attach to a source of software updates for RHEL 369 | rhn_register: 370 | type: OS::Heat::SoftwareConfig 371 | properties: 372 | config: 373 | str_replace: 374 | params: 375 | $OCP_VERSION: {get_param: ocp_version} 376 | $RHN_USERNAME: {get_param: rhn_username} 377 | $RHN_PASSWORD: {get_param: rhn_password} 378 | $SAT6_HOSTNAME: {get_param: sat6_hostname} 379 | $SAT6_ORGANIZATION: {get_param: sat6_organization} 380 | $SAT6_ACTIVATIONKEY: {get_param: sat6_activationkey} 381 | $POOL_ID: {get_param: rhn_pool} 382 | $EXTRA_POOL_IDS: 383 | list_join: 384 | - " --pool=" 385 | - {get_param: extra_rhn_pools} 386 | template: {get_file: fragments/rhn-register.sh} 387 | 388 | # Enable any extra repositories 389 | set_extra_repos: 390 | type: OS::Heat::SoftwareConfig 391 | properties: 392 | config: 393 | str_replace: 394 | params: 395 | $REPOLIST: 396 | list_join: 397 | - " " 398 | - {get_param: extra_repository_urls} 399 | template: {get_file: fragments/set-extra-repos.sh} 400 | 401 | set_extra_docker_repos: 402 | type: OS::Heat::SoftwareConfig 403 | properties: 404 | config: 405 | str_replace: 406 | params: 407 | $REPOLIST: 408 | list_join: 409 | - " " 410 | - {get_param: extra_docker_repository_urls} 411 | template: {get_file: fragments/set-extra-docker-repos.sh} 412 | 413 | # Insure that the host software is current 414 | host_update: 415 | type: OS::Heat::SoftwareConfig 416 | properties: 417 | config: 418 | str_replace: 419 | params: 420 | $SYSTEM_UPDATE: {get_param: system_update} 421 | template: {get_file: fragments/host-update.sh} 422 | 423 | add_dns_record: 424 | type: OS::Heat::SoftwareConfig 425 | properties: 426 | config: 427 | str_replace: 428 | params: 429 | '%ZONE%': {get_param: domain_name} 430 | '%DNS_SERVER%': {get_param: [dns_servers, 0]} 431 | '%DNS_UPDATE_KEY%': {get_param: dns_update_key} 432 | '%IP_ADDRESS%': {get_attr: [port, fixed_ips, 0, ip_address]} 433 | template: {get_file: fragments/add_dns_record.sh} 434 | 435 | # Prepare the host to run Docker and Ansible for OpenShift install and config 436 | infra_boot: 437 | type: OS::Heat::SoftwareConfig 438 | properties: 439 | group: script 440 | config: 441 | str_replace: 442 | params: 443 | $DOCKER_VOLUME_ID: {get_resource: docker_volume} 444 | template: {get_file: fragments/infra-boot.sh} 445 | 446 | # Add a node's IP/Name mapping to DNS 447 | node_add: 448 | type: OS::Heat::SoftwareConfig 449 | properties: 450 | group: script 451 | inputs: 452 | - name: node_hostname 453 | - name: node_type 454 | outputs: 455 | - name: result 456 | config: {get_file: fragments/bastion-node-add.sh} 457 | 458 | # Add the hostname and address of the bastion host to the infra host 459 | deployment_bastion_node_add: 460 | depends_on: wait_condition 461 | type: OS::Heat::SoftwareDeployment 462 | properties: 463 | config: 464 | get_resource: node_add 465 | server: 466 | get_param: bastion_node 467 | input_values: 468 | node_type: infra 469 | node_hostname: 470 | str_replace: 471 | template: "HOST.DOMAIN" 472 | params: 473 | HOST: {get_param: hostname} 474 | DOMAIN: {get_param: domain_name} 475 | 476 | node_cleanup: 477 | type: OS::Heat::SoftwareConfig 478 | properties: 479 | group: script 480 | inputs: 481 | - name: node_name 482 | - name: node_type 483 | - name: ssh_user 484 | default: {get_param: ssh_user} 485 | outputs: 486 | - name: result 487 | config: {get_file: fragments/bastion-node-cleanup.sh} 488 | 489 | # activation hook for removing the node from DNS and from the Kubernetes 490 | # cluster 491 | deployment_bastion_node_cleanup: 492 | depends_on: [host, wait_condition] 493 | type: OS::Heat::SoftwareDeployment 494 | properties: 495 | actions: ['DELETE'] 496 | input_values: 497 | node_type: infra 498 | node_name: 499 | str_replace: 500 | template: "HOST.DOMAIN" 501 | params: 502 | HOST: {get_param: hostname} 503 | DOMAIN: {get_param: domain_name} 504 | config: 505 | get_resource: node_cleanup 506 | server: 507 | get_param: bastion_node 508 | 509 | # Wait for infra_boot (cloud-init) to complete or time out 510 | wait_condition: 511 | type: OS::Heat::WaitCondition 512 | properties: 513 | handle: {get_resource: wait_handle} 514 | timeout: {get_param: timeout} 515 | 516 | # This provides a curl CLI string to the cloud-init script which is queried 517 | # with a status at the end of of the cloud-init process 518 | wait_handle: 519 | type: OS::Heat::WaitConditionHandle 520 | 521 | outputs: 522 | console_url: 523 | description: Compose the access URL for the OpenShift web UI 524 | value: 525 | str_replace: 526 | params: 527 | HOSTNAME: {get_param: hostname} 528 | DOMAINNAME: {get_param: domain_name} 529 | template: "https://HOSTNAME.DOMAINNAME:8443/console/" 530 | api_url: 531 | description: Compose the access URL for the OpenShift REST API 532 | value: 533 | str_replace: 534 | params: 535 | HOSTNAME: {get_param: hostname} 536 | DOMAINNAME: {get_param: domain_name} 537 | template: "https://HOSTNAME.DOMAINNAME:8443/" 538 | wc_data: 539 | description: Syncronization data 540 | value: { get_attr: ['wait_condition', 'data'] } 541 | hostname: 542 | description: The actual short name for the host 543 | value: {get_param: hostname} 544 | host: 545 | description: A reference to the infra host identifier 546 | value: {get_resource: host} 547 | ip_address: 548 | description: IP address of the node 549 | value: {get_attr: [floating_ip, floating_ip_address]} 550 | -------------------------------------------------------------------------------- /ipfailover_keepalived.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which deploys a IP failover service for the Openshift router 5 | 6 | parameters: 7 | fixed_network: 8 | description: > 9 | The name or ID of the admin and public network 10 | type: string 11 | constraints: 12 | - custom_constraint: neutron.network 13 | 14 | fixed_subnet: 15 | description: > 16 | The name or ID of the admin and public IPv4 space 17 | type: string 18 | constraints: 19 | - custom_constraint: neutron.subnet 20 | 21 | external_network: 22 | description: > 23 | The name of the inbound access network 24 | type: string 25 | constraints: 26 | - custom_constraint: neutron.network 27 | 28 | loadbalancer_ip: 29 | type: string 30 | 31 | resources: 32 | router_vip_port: 33 | type: OS::Neutron::Port 34 | properties: 35 | network_id: {get_param: fixed_network} 36 | fixed_ips: 37 | - subnet: {get_param: fixed_subnet} 38 | 39 | router_floating_ip: 40 | type: OS::Neutron::FloatingIP 41 | properties: 42 | floating_network: {get_param: external_network} 43 | port_id: {get_resource: router_vip_port} 44 | 45 | outputs: 46 | router_ip: 47 | description: > 48 | Openshift Router external IP 49 | value: {get_attr: [router_floating_ip, floating_ip_address]} 50 | 51 | router_vip: 52 | description: > 53 | Openshift Router virtual private IP 54 | value: {get_attr: [router_vip_port, fixed_ips, 0, ip_address]} 55 | 56 | allowed_address_pairs: 57 | description: > 58 | Allowed address pairs to apply on the ports of the nodes 59 | that host the Openshift router 60 | value: 61 | # Openshift IP failover uses Keepalived in multicast mode. 62 | # Therefore, we need to allow the multicast address and the 63 | # forged MAC address for this IP 64 | - ip_address: "224.0.0.18" 65 | mac_address: "01:00:5e:00:00:12" 66 | - ip_address: {get_attr: [router_vip_port, fixed_ips, 0, ip_address]} 67 | -------------------------------------------------------------------------------- /ipfailover_none.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which deploys a IP failover service for the Openshift router 5 | 6 | parameters: 7 | fixed_network: 8 | description: > 9 | The name or ID of the admin and public network 10 | type: string 11 | constraints: 12 | - custom_constraint: neutron.network 13 | 14 | fixed_subnet: 15 | description: > 16 | The name or ID of the admin and public IPv4 space 17 | type: string 18 | constraints: 19 | - custom_constraint: neutron.subnet 20 | 21 | external_network: 22 | description: > 23 | The name of the inbound access network 24 | type: string 25 | constraints: 26 | - custom_constraint: neutron.network 27 | 28 | loadbalancer_ip: 29 | type: string 30 | 31 | outputs: 32 | router_ip: 33 | description: > 34 | The Openshift Router external IP 35 | value: {get_param: loadbalancer_ip} 36 | 37 | router_vip: 38 | description: > 39 | The Openshift Router virtual private IP 40 | value: "" 41 | 42 | allowed_address_pairs: 43 | description: > 44 | Allowed address pairs to apply on the ports of the nodes 45 | that host the Openshift router 46 | value: [] 47 | -------------------------------------------------------------------------------- /loadbalancer_dedicated.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a creates a loadbalancer using neutron's LBaaS. 5 | 6 | parameters: 7 | 8 | # What version of OpenShift Container Platform to install 9 | # This value is used to select the RPM repo for the OCP release to install 10 | ocp_version: 11 | type: string 12 | description: > 13 | The version of OpenShift Container Platform to deploy 14 | 15 | key_name: 16 | description: > 17 | A pre-submitted SSH key to access the VM hosts 18 | type: string 19 | constraints: 20 | - custom_constraint: nova.keypair 21 | 22 | image: 23 | description: > 24 | Select a base image to use for the bastion server 25 | type: string 26 | constraints: 27 | - custom_constraint: glance.image 28 | 29 | flavor: 30 | description: > 31 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 32 | type: string 33 | constraints: 34 | - custom_constraint: nova.flavor 35 | 36 | hostname: 37 | description: > 38 | The load balancer hostname portion of the FQDN 39 | type: string 40 | constraints: 41 | - allowed_pattern: '[a-z0-9\-\.]*' 42 | description: Hostname must contain only characters [a-z0-9\-\.]. 43 | 44 | domain_name: 45 | description: > 46 | All VMs will be placed in this domain 47 | type: string 48 | 49 | app_subdomain: 50 | type: string 51 | 52 | rhn_username: 53 | description: > 54 | A valid user with entitlements to RHEL and OpenShift software repos 55 | type: string 56 | 57 | rhn_password: 58 | description: > 59 | The password for the RHN user 60 | type: string 61 | hidden: true 62 | 63 | # Red Hat satellite subscription parameters 64 | sat6_hostname: 65 | type: string 66 | description: > 67 | The hostname of the Satellite 6 server which will provide software updates 68 | default: '' 69 | 70 | sat6_organization: 71 | type: string 72 | description: > 73 | An organization string provided by Sat6 to group subscriptions 74 | default: '' 75 | 76 | sat6_activationkey: 77 | type: string 78 | description: > 79 | An activation key string provided by Sat6 to enable subscriptions 80 | 81 | rhn_pool: 82 | description: > 83 | A subscription pool containing the RHEL and OpenShift software repos 84 | OPTIONAL 85 | type: string 86 | hidden: true 87 | 88 | extra_rhn_pools: 89 | type: comma_delimited_list 90 | description: List of rhn pools which will be installed on each node. 91 | default: '' 92 | 93 | ssh_user: 94 | description: > 95 | The user for SSH access to the VM hosts 96 | type: string 97 | 98 | ansible_public_key: 99 | description: > 100 | The SSH public key that Ansible will use to access master and node hosts 101 | This will be placed on each VM host in /root/.ssh/authorized_keys 102 | type: string 103 | 104 | fixed_subnet: 105 | description: > 106 | The name or ID of the internal IPv4 space 107 | type: string 108 | constraints: 109 | - custom_constraint: neutron.subnet 110 | 111 | members: 112 | type: comma_delimited_list 113 | 114 | master_hostname: 115 | type: string 116 | 117 | floatingip_id: 118 | type: string 119 | 120 | floatingip: 121 | type: string 122 | 123 | fixed_network: 124 | description: > 125 | The name or ID of the internal network 126 | type: string 127 | constraints: 128 | - custom_constraint: neutron.network 129 | 130 | fixed_subnet: 131 | description: > 132 | The name or ID of the internal IPv4 space 133 | type: string 134 | constraints: 135 | - custom_constraint: neutron.subnet 136 | 137 | extra_repository_urls: 138 | type: comma_delimited_list 139 | description: List of repository URLs which will be installed on each node. 140 | default: '' 141 | 142 | extra_docker_repository_urls: 143 | type: comma_delimited_list 144 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 145 | default: '' 146 | 147 | stack_name: 148 | type: string 149 | default: '' 150 | 151 | # Delay openshift installation until the master is ready to accept 152 | timeout: 153 | description: Time to wait until the master setup is ready. 154 | type: number 155 | default: 4000 156 | 157 | bastion_node: 158 | type: string 159 | description: > 160 | The name or ID of an bastion instance. 161 | default: '' 162 | 163 | stack_name: 164 | description: Top level stack name. 165 | type: string 166 | 167 | dns_servers: 168 | type: comma_delimited_list 169 | description: address of dns nameservers reachable in your environment 170 | 171 | dns_update_key: 172 | type: string 173 | hidden: true 174 | 175 | ca_cert: 176 | type: string 177 | description: Certificate Authority Certificate to be added to trust chain 178 | 179 | resources: 180 | floating_ip_assoc: 181 | type: OS::Neutron::FloatingIPAssociation 182 | properties: 183 | port_id: {get_resource: port} 184 | floatingip_id: {get_param: floatingip_id} 185 | 186 | # bind the IP to the host with security port filters 187 | port: 188 | type: OS::Neutron::Port 189 | properties: 190 | security_groups: 191 | - {get_resource: security_group} 192 | network: {get_param: fixed_network} 193 | fixed_ips: 194 | - subnet: {get_param: fixed_subnet} 195 | replacement_policy: AUTO 196 | 197 | # Define network access policy for access to the laod balancer 198 | security_group: 199 | type: OS::Neutron::SecurityGroup 200 | properties: 201 | rules: 202 | - protocol: icmp 203 | - protocol: tcp 204 | port_range_min: 8443 205 | port_range_max: 8443 206 | - protocol: tcp 207 | port_range_min: 443 208 | port_range_max: 443 209 | - protocol: tcp 210 | port_range_min: 80 211 | port_range_max: 80 212 | - protocol: tcp 213 | port_range_min: 22 214 | port_range_max: 22 215 | 216 | # The VM which will host the load balancer service 217 | host: 218 | type: OS::Nova::Server 219 | properties: 220 | name: 221 | str_replace: 222 | template: "%stack_name%-%hostname%.%domainname%" 223 | params: 224 | '%stack_name%': {get_param: stack_name} 225 | '%hostname%': {get_param: hostname} 226 | '%domainname%': {get_param: domain_name} 227 | admin_user: {get_param: ssh_user} 228 | image: {get_param: image} 229 | flavor: {get_param: flavor} 230 | key_name: {get_param: key_name} 231 | networks: 232 | - port: {get_resource: port} 233 | user_data_format: SOFTWARE_CONFIG 234 | user_data_update_policy: IGNORE 235 | user_data: {get_resource: init} 236 | 237 | # Collect a set of host configuration information in a single structure 238 | init: 239 | type: OS::Heat::MultipartMime 240 | properties: 241 | parts: 242 | - config: {get_resource: set_hostname} 243 | - config: {get_resource: included_files} 244 | - config: {get_resource: update_ca_cert} 245 | - config: {get_resource: rhn_register} 246 | - config: {get_resource: set_extra_repos} 247 | - config: {get_resource: set_extra_docker_repos} 248 | - config: {get_resource: host_update} 249 | - config: {get_resource: add_dns_record} 250 | - config: {get_resource: add_wildcard_record} 251 | - config: {get_resource: lb_boot} 252 | 253 | # Compose the FQDN and set the hostname in the cloud-init data structure 254 | set_hostname: 255 | type: OS::Heat::CloudConfig 256 | properties: 257 | cloud_config: 258 | hostname: 259 | str_replace: 260 | template: "%stack_name%-%hostname%" 261 | params: 262 | '%stack_name%': {get_param: stack_name} 263 | '%hostname%': {get_param: hostname} 264 | fqdn: 265 | str_replace: 266 | template: "%stack_name%-%hostname%.%domainname%" 267 | params: 268 | '%stack_name%': {get_param: stack_name} 269 | '%hostname%': {get_param: hostname} 270 | '%domainname%': {get_param: domain_name} 271 | 272 | # Compile a set of standard configuration files to provide identity and access 273 | included_files: 274 | type: OS::Heat::CloudConfig 275 | properties: 276 | cloud_config: 277 | write_files: 278 | - path: /usr/bin/retry 279 | permissions: 0755 280 | content: {get_file: fragments/retry.sh} 281 | - path: /usr/local/bin/update_dns 282 | permissions: 0755 283 | content: {get_file: fragments/update_dns.py} 284 | - path: /usr/local/share/openshift-on-openstack/common_functions.sh 285 | permissions: 0755 286 | content: 287 | str_replace: 288 | params: 289 | $WC_NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] } 290 | template: {get_file: fragments/common_functions.sh} 291 | - path: /etc/pki/ca-trust/source/anchors/ca.crt 292 | permissions: 0600 293 | content: {get_param: ca_cert} 294 | ssh_authorized_keys: 295 | - {get_param: ansible_public_key} 296 | 297 | # Add CA Cert to trust chain 298 | update_ca_cert: 299 | type: OS::Heat::SoftwareConfig 300 | properties: 301 | config: 302 | str_replace: 303 | params: 304 | $CA_CERT: {get_param: ca_cert} 305 | template: {get_file: fragments/ca_cert.sh} 306 | 307 | # Connect to a software source for updates on RHEL 308 | rhn_register: 309 | type: OS::Heat::SoftwareConfig 310 | properties: 311 | config: 312 | str_replace: 313 | params: 314 | $OCP_VERSION: {get_param: ocp_version} 315 | $RHN_USERNAME: {get_param: rhn_username} 316 | $RHN_PASSWORD: {get_param: rhn_password} 317 | $SAT6_HOSTNAME: {get_param: sat6_hostname} 318 | $SAT6_ORGANIZATION: {get_param: sat6_organization} 319 | $SAT6_ACTIVATIONKEY: {get_param: sat6_activationkey} 320 | $POOL_ID: {get_param: rhn_pool} 321 | $EXTRA_POOL_IDS: 322 | list_join: 323 | - " --pool=" 324 | - {get_param: extra_rhn_pools} 325 | template: {get_file: fragments/rhn-register.sh} 326 | 327 | # Enable any extra repositories 328 | set_extra_repos: 329 | type: OS::Heat::SoftwareConfig 330 | properties: 331 | config: 332 | str_replace: 333 | params: 334 | $REPOLIST: 335 | list_join: 336 | - " " 337 | - {get_param: extra_repository_urls} 338 | template: {get_file: fragments/set-extra-repos.sh} 339 | 340 | set_extra_docker_repos: 341 | type: OS::Heat::SoftwareConfig 342 | properties: 343 | config: 344 | str_replace: 345 | params: 346 | $REPOLIST: 347 | list_join: 348 | - " " 349 | - {get_param: extra_docker_repository_urls} 350 | template: {get_file: fragments/set-extra-docker-repos.sh} 351 | 352 | # Insure that the host software is current 353 | host_update: 354 | type: OS::Heat::SoftwareConfig 355 | properties: 356 | config: 357 | get_file: fragments/host-update.sh 358 | 359 | add_dns_record: 360 | type: OS::Heat::SoftwareConfig 361 | properties: 362 | config: 363 | str_replace: 364 | params: 365 | '%ZONE%': {get_param: domain_name} 366 | '%DNS_SERVER%': {get_param: [dns_servers, 0]} 367 | '%DNS_UPDATE_KEY%': {get_param: dns_update_key} 368 | '%IP_ADDRESS%': {get_param: floatingip} 369 | template: {get_file: fragments/add_dns_record.sh} 370 | 371 | add_wildcard_record: 372 | type: OS::Heat::SoftwareConfig 373 | properties: 374 | config: 375 | str_replace: 376 | params: 377 | '%DNS_ENTRY%': {list_join: ["", ["*.", {get_param: app_subdomain}]]} 378 | '%ZONE%': {get_param: domain_name} 379 | '%DNS_SERVER%': {get_param: [dns_servers, 0]} 380 | '%DNS_UPDATE_KEY%': {get_param: dns_update_key} 381 | '%IP_ADDRESS%': {get_param: floatingip} 382 | template: {get_file: fragments/add_dns_record.sh} 383 | 384 | 385 | # Prepare the host for SSH access for ansible 386 | lb_boot: 387 | type: OS::Heat::SoftwareConfig 388 | properties: 389 | config: 390 | str_replace: 391 | params: {} 392 | template: {get_file: fragments/lb-boot.sh} 393 | 394 | node_cleanup: 395 | type: OS::Heat::SoftwareConfig 396 | properties: 397 | group: script 398 | inputs: 399 | - name: node_name 400 | - name: node_type 401 | - name: ssh_user 402 | default: {get_param: ssh_user} 403 | outputs: 404 | - name: result 405 | config: {get_file: fragments/bastion-node-cleanup.sh} 406 | 407 | # activation hook for removing the node from DNS and from the Kubernetes 408 | # cluster 409 | deployment_bastion_node_cleanup: 410 | depends_on: [host, wait_condition] 411 | type: OS::Heat::SoftwareDeployment 412 | properties: 413 | actions: ['DELETE'] 414 | input_values: 415 | node_type: loadbalancer 416 | node_name: 417 | str_replace: 418 | template: "%stack_name%-%hostname%.%domainname%" 419 | params: 420 | '%stack_name%': {get_param: stack_name} 421 | '%hostname%': {get_param: hostname} 422 | '%domainname%': {get_param: domain_name} 423 | config: 424 | get_resource: node_cleanup 425 | server: 426 | get_param: bastion_node 427 | 428 | # Wait for the load balancer host to complete cloud-init or time out 429 | wait_condition: 430 | type: OS::Heat::WaitCondition 431 | properties: 432 | handle: {get_resource: wait_handle} 433 | timeout: {get_param: timeout} 434 | 435 | # Provide a curl CLI to the cloud-init script. On completion, notify Heat 436 | wait_handle: 437 | type: OS::Heat::WaitConditionHandle 438 | 439 | outputs: 440 | console_url: 441 | description: URL of the OpenShift web console 442 | value: 443 | str_replace: 444 | template: "https://%stack_name%-%hostname%.%domainname%:8443/console/" 445 | params: 446 | '%stack_name%': {get_param: stack_name} 447 | '%hostname%': {get_param: hostname} 448 | '%domainname%': {get_param: domain_name} 449 | 450 | api_url: 451 | description: URL entrypoint to the OpenShift API 452 | value: 453 | str_replace: 454 | template: "https://%stack_name%-%hostname%.%domainname%:8443/" 455 | params: 456 | '%stack_name%': {get_param: stack_name} 457 | '%hostname%': {get_param: hostname} 458 | '%domainname%': {get_param: domain_name} 459 | 460 | hostname: 461 | description: Loadbalancer hostname 462 | value: 463 | str_replace: 464 | template: "%stack_name%-%hostname%.%domainname%" 465 | params: 466 | '%stack_name%': {get_param: stack_name} 467 | '%hostname%': {get_param: hostname} 468 | '%domainname%': {get_param: domain_name} 469 | -------------------------------------------------------------------------------- /loadbalancer_external.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which doesn't create any resources supposing that an external 5 | loadbalancer is used. 6 | 7 | parameters: 8 | 9 | # What version of OpenShift Container Platform to install 10 | # This value is used to select the RPM repo for the OCP release to install 11 | ocp_version: 12 | type: string 13 | description: > 14 | The version of OpenShift Container Platform to deploy 15 | 16 | key_name: 17 | description: > 18 | A pre-submitted SSH key to access the VM hosts 19 | type: string 20 | constraints: 21 | - custom_constraint: nova.keypair 22 | 23 | image: 24 | type: string 25 | default: '' 26 | 27 | flavor: 28 | description: > 29 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 30 | type: string 31 | constraints: 32 | - custom_constraint: nova.flavor 33 | 34 | hostname: 35 | description: > 36 | The load balancer hostname portion of the FQDN 37 | type: string 38 | constraints: 39 | - allowed_pattern: '[a-z0-9\-\.]*' 40 | description: Hostname must contain only characters [a-z0-9\-\.]. 41 | 42 | stack_name: 43 | description: Top level stack name. 44 | type: string 45 | 46 | domain_name: 47 | description: > 48 | All VMs will be placed in this domain 49 | type: string 50 | 51 | app_subdomain: 52 | type: string 53 | 54 | rhn_username: 55 | description: > 56 | A valid user with entitlements to RHEL and OpenShift software repos 57 | type: string 58 | 59 | rhn_password: 60 | description: > 61 | The password for the RHN user 62 | type: string 63 | hidden: true 64 | 65 | # Red Hat satellite subscription parameters 66 | sat6_hostname: 67 | type: string 68 | description: > 69 | The hostname of the Satellite 6 server which will provide software updates 70 | default: '' 71 | 72 | sat6_organization: 73 | type: string 74 | description: > 75 | An organization string provided by Sat6 to group subscriptions 76 | default: '' 77 | 78 | sat6_activationkey: 79 | type: string 80 | description: > 81 | An activation key string provided by Sat6 to enable subscriptions 82 | 83 | rhn_pool: 84 | description: > 85 | A subscription pool containing the RHEL and OpenShift software repos 86 | OPTIONAL 87 | type: string 88 | hidden: true 89 | 90 | extra_rhn_pools: 91 | type: comma_delimited_list 92 | description: List of rhn pools which will be installed on each node. 93 | default: '' 94 | 95 | ssh_user: 96 | description: > 97 | The user for SSH access to the VM hosts 98 | type: string 99 | 100 | ansible_public_key: 101 | description: > 102 | The SSH public key that Ansible will use to access master and node hosts 103 | This will be placed on each VM host in /root/.ssh/authorized_keys 104 | type: string 105 | 106 | fixed_subnet: 107 | description: > 108 | The name or ID of the internal IPv4 space 109 | type: string 110 | constraints: 111 | - custom_constraint: neutron.subnet 112 | 113 | members: 114 | type: comma_delimited_list 115 | 116 | master_hostname: 117 | type: string 118 | 119 | floatingip_id: 120 | type: string 121 | 122 | floatingip: 123 | type: string 124 | 125 | fixed_network: 126 | description: > 127 | The name or ID of the internal network 128 | type: string 129 | constraints: 130 | - custom_constraint: neutron.network 131 | 132 | fixed_subnet: 133 | description: > 134 | The name or ID of the internal IPv4 space 135 | type: string 136 | constraints: 137 | - custom_constraint: neutron.subnet 138 | 139 | extra_repository_urls: 140 | type: comma_delimited_list 141 | description: List of repository URLs which will be installed on each node. 142 | default: '' 143 | 144 | extra_docker_repository_urls: 145 | type: comma_delimited_list 146 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 147 | default: '' 148 | 149 | # Delay openshift installation until the master is ready to accept 150 | timeout: 151 | description: Time to wait until the master setup is ready. 152 | type: number 153 | default: 4000 154 | 155 | stack_name: 156 | type: string 157 | default: '' 158 | 159 | bastion_node: 160 | type: string 161 | description: > 162 | The name or ID of the bastion instance. 163 | default: '' 164 | 165 | dns_servers: 166 | type: comma_delimited_list 167 | description: address of dns nameservers reachable in your environment 168 | 169 | dns_update_key: 170 | type: string 171 | hidden: true 172 | 173 | ca_cert: 174 | type: string 175 | description: Certificate Authority Certificate to be added to trust chain 176 | 177 | outputs: 178 | console_url: 179 | description: URL of the OpenShift web console 180 | value: 181 | str_replace: 182 | template: "https://%hostname%:8443/console/" 183 | params: 184 | '%hostname%': {get_param: hostname} 185 | 186 | api_url: 187 | description: URL entrypoint to the OpenShift API 188 | value: 189 | str_replace: 190 | template: "https://%hostname%:8443/" 191 | params: 192 | '%hostname%': {get_param: hostname} 193 | 194 | hostname: 195 | description: Loadbalancer hostname 196 | value: {get_param: hostname} 197 | -------------------------------------------------------------------------------- /loadbalancer_neutron.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a creates a loadbalancer using neutron's LBaaS. 5 | 6 | parameters: 7 | 8 | # What version of OpenShift Container Platform to install 9 | # This value is used to select the RPM repo for the OCP release to install 10 | ocp_version: 11 | type: string 12 | description: > 13 | The version of OpenShift Container Platform to deploy 14 | 15 | key_name: 16 | description: > 17 | A pre-submitted SSH key to access the VM hosts 18 | type: string 19 | constraints: 20 | - custom_constraint: nova.keypair 21 | 22 | image: 23 | type: string 24 | default: '' 25 | 26 | flavor: 27 | description: > 28 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 29 | type: string 30 | constraints: 31 | - custom_constraint: nova.flavor 32 | 33 | hostname: 34 | description: > 35 | The load balancer hostname portion of the FQDN 36 | type: string 37 | constraints: 38 | - allowed_pattern: '[a-z0-9\-\.]*' 39 | description: Hostname must contain only characters [a-z0-9\-\.]. 40 | 41 | stack_name: 42 | description: Top level stack name. 43 | type: string 44 | 45 | domain_name: 46 | description: > 47 | All VMs will be placed in this domain 48 | type: string 49 | 50 | app_subdomain: 51 | type: string 52 | 53 | rhn_username: 54 | description: > 55 | A valid user with entitlements to RHEL and OpenShift software repos 56 | type: string 57 | 58 | rhn_password: 59 | description: > 60 | The password for the RHN user 61 | type: string 62 | hidden: true 63 | 64 | # Red Hat satellite subscription parameters 65 | sat6_hostname: 66 | type: string 67 | description: > 68 | The hostname of the Satellite 6 server which will provide software updates 69 | default: '' 70 | 71 | sat6_organization: 72 | type: string 73 | description: > 74 | An organization string provided by Sat6 to group subscriptions 75 | default: '' 76 | 77 | sat6_activationkey: 78 | type: string 79 | description: > 80 | An activation key string provided by Sat6 to enable subscriptions 81 | 82 | rhn_pool: 83 | description: > 84 | A subscription pool containing the RHEL and OpenShift software repos 85 | OPTIONAL 86 | type: string 87 | hidden: true 88 | 89 | extra_rhn_pools: 90 | type: comma_delimited_list 91 | description: List of rhn pools which will be installed on each node. 92 | default: '' 93 | 94 | ssh_user: 95 | description: > 96 | The user for SSH access to the VM hosts 97 | type: string 98 | 99 | ansible_public_key: 100 | description: > 101 | The SSH public key that Ansible will use to access master and node hosts 102 | This will be placed on each VM host in /root/.ssh/authorized_keys 103 | type: string 104 | 105 | fixed_subnet: 106 | description: > 107 | The name or ID of the internal IPv4 space 108 | type: string 109 | constraints: 110 | - custom_constraint: neutron.subnet 111 | 112 | members: 113 | type: comma_delimited_list 114 | 115 | master_hostname: 116 | type: string 117 | 118 | floatingip_id: 119 | type: string 120 | 121 | floatingip: 122 | type: string 123 | 124 | fixed_network: 125 | description: > 126 | The name or ID of the internal network 127 | type: string 128 | constraints: 129 | - custom_constraint: neutron.network 130 | 131 | fixed_subnet: 132 | description: > 133 | The name or ID of the internal IPv4 space 134 | type: string 135 | constraints: 136 | - custom_constraint: neutron.subnet 137 | 138 | extra_repository_urls: 139 | type: comma_delimited_list 140 | description: List of repository URLs which will be installed on each node. 141 | default: '' 142 | 143 | extra_docker_repository_urls: 144 | type: comma_delimited_list 145 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 146 | default: '' 147 | 148 | stack_name: 149 | type: string 150 | default: '' 151 | 152 | bastion_node: 153 | type: string 154 | description: > 155 | The name or ID of the bastion instance. 156 | default: '' 157 | 158 | dns_servers: 159 | type: comma_delimited_list 160 | description: address of dns nameservers reachable in your environment 161 | 162 | dns_update_key: 163 | type: string 164 | hidden: true 165 | 166 | ca_cert: 167 | type: string 168 | description: Certificate Authority Certificate to be added to trust chain 169 | 170 | resources: 171 | lb: 172 | type: OS::Neutron::LoadBalancer 173 | properties: 174 | protocol_port: 8443 175 | pool_id: {get_resource: lb_pool} 176 | members: {get_param: members} 177 | 178 | lb_pool: 179 | type: OS::Neutron::Pool 180 | properties: 181 | name: lb_pool 182 | description: Load balancer for OpenShift hosts. 183 | protocol: HTTPS 184 | subnet_id: {get_param: fixed_subnet} 185 | lb_method: ROUND_ROBIN 186 | monitors: [{get_resource: lb_monitor}] 187 | vip: 188 | protocol_port: 8443 189 | session_persistence: 190 | type: SOURCE_IP 191 | 192 | lb_monitor: 193 | type: OS::Neutron::HealthMonitor 194 | properties: 195 | type: TCP 196 | delay: 15 197 | max_retries: 5 198 | timeout: 10 199 | 200 | floating_ip_assoc: 201 | type: OS::Neutron::FloatingIPAssociation 202 | properties: 203 | port_id: {get_attr: [lb_pool, vip, port_id]} 204 | floatingip_id: {get_param: floatingip_id} 205 | 206 | outputs: 207 | console_url: 208 | description: URL of the OpenShift web console 209 | value: 210 | str_replace: 211 | template: "https://%stack_name%-%hostname%.%domainname%:8443/console/" 212 | params: 213 | '%stack_name%': {get_param: stack_name} 214 | '%hostname%': {get_param: hostname} 215 | '%domainname%': {get_param: domain_name} 216 | 217 | api_url: 218 | description: URL entrypoint to the OpenShift API 219 | value: 220 | str_replace: 221 | template: "https://%stack_name%-%hostname%.%domainname%:8443/" 222 | params: 223 | '%stack_name%': {get_param: stack_name} 224 | '%hostname%': {get_param: hostname} 225 | '%domainname%': {get_param: domain_name} 226 | 227 | hostname: 228 | description: Loadbalancer hostname 229 | value: 230 | str_replace: 231 | template: "%stack_name%-%hostname%.%domainname%" 232 | params: 233 | '%stack_name%': {get_param: stack_name} 234 | '%hostname%': {get_param: hostname} 235 | '%domainname%': {get_param: domain_name} 236 | -------------------------------------------------------------------------------- /loadbalancer_none.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a creates a loadbalancer using neutron's LBaaS. 5 | 6 | parameters: 7 | 8 | # What version of OpenShift Container Platform to install 9 | # This value is used to select the RPM repo for the OCP release to install 10 | ocp_version: 11 | type: string 12 | description: > 13 | The version of OpenShift Container Platform to deploy 14 | 15 | key_name: 16 | description: > 17 | A pre-submitted SSH key to access the VM hosts 18 | type: string 19 | constraints: 20 | - custom_constraint: nova.keypair 21 | 22 | image: 23 | type: string 24 | default: '' 25 | 26 | flavor: 27 | description: > 28 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 29 | type: string 30 | constraints: 31 | - custom_constraint: nova.flavor 32 | 33 | hostname: 34 | description: > 35 | The load balancer hostname portion of the FQDN 36 | type: string 37 | 38 | stack_name: 39 | description: Top level stack name. 40 | type: string 41 | 42 | domain_name: 43 | description: > 44 | All VMs will be placed in this domain 45 | type: string 46 | 47 | app_subdomain: 48 | type: string 49 | 50 | rhn_username: 51 | description: > 52 | A valid user with entitlements to RHEL and OpenShift software repos 53 | type: string 54 | 55 | rhn_password: 56 | description: > 57 | The password for the RHN user 58 | type: string 59 | hidden: true 60 | 61 | # Red Hat satellite subscription parameters 62 | sat6_hostname: 63 | type: string 64 | description: > 65 | The hostname of the Satellite 6 server which will provide software updates 66 | default: '' 67 | 68 | sat6_organization: 69 | type: string 70 | description: > 71 | An organization string provided by Sat6 to group subscriptions 72 | default: '' 73 | 74 | sat6_activationkey: 75 | type: string 76 | description: > 77 | An activation key string provided by Sat6 to enable subscriptions 78 | 79 | rhn_pool: 80 | description: > 81 | A subscription pool containing the RHEL and OpenShift software repos 82 | OPTIONAL 83 | type: string 84 | hidden: true 85 | 86 | extra_rhn_pools: 87 | type: comma_delimited_list 88 | description: List of rhn pools which will be installed on each node. 89 | default: '' 90 | 91 | ssh_user: 92 | description: > 93 | The user for SSH access to the VM hosts 94 | type: string 95 | 96 | ansible_public_key: 97 | description: > 98 | The SSH public key that Ansible will use to access master and node hosts 99 | This will be placed on each VM host in /root/.ssh/authorized_keys 100 | type: string 101 | 102 | fixed_subnet: 103 | description: > 104 | The name or ID of the internal IPv4 space 105 | type: string 106 | constraints: 107 | - custom_constraint: neutron.subnet 108 | 109 | members: 110 | type: comma_delimited_list 111 | 112 | master_hostname: 113 | type: string 114 | 115 | floatingip_id: 116 | type: string 117 | 118 | floatingip: 119 | type: string 120 | 121 | fixed_network: 122 | description: > 123 | The name or ID of the internal network 124 | type: string 125 | constraints: 126 | - custom_constraint: neutron.network 127 | 128 | fixed_subnet: 129 | description: > 130 | The name or ID of the internal IPv4 space 131 | type: string 132 | constraints: 133 | - custom_constraint: neutron.subnet 134 | 135 | extra_repository_urls: 136 | type: comma_delimited_list 137 | description: List of repository URLs which will be installed on each node. 138 | default: '' 139 | 140 | extra_docker_repository_urls: 141 | type: comma_delimited_list 142 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 143 | default: '' 144 | 145 | # Delay openshift installation until the master is ready to accept 146 | timeout: 147 | description: Time to wait until the master setup is ready. 148 | type: number 149 | default: 4000 150 | 151 | hostname: 152 | description: > 153 | The load balancer hostname portion of the FQDN 154 | type: string 155 | constraints: 156 | - allowed_pattern: '[a-z0-9\-\.]*' 157 | description: Hostname must contain only characters [a-z0-9\-\.]. 158 | 159 | stack_name: 160 | type: string 161 | default: '' 162 | 163 | bastion_node: 164 | type: string 165 | description: > 166 | The name or ID of the bastion instance. 167 | default: '' 168 | 169 | dns_servers: 170 | type: comma_delimited_list 171 | description: address of dns nameservers reachable in your environment 172 | 173 | dns_update_key: 174 | type: string 175 | hidden: true 176 | 177 | ca_cert: 178 | type: string 179 | description: Certificate Authority Certificate to be added to trust chain 180 | 181 | outputs: 182 | console_url: 183 | description: URL of the OpenShift web console 184 | value: 185 | str_replace: 186 | template: "https://%hostname%.%domainname%:8443/console/" 187 | params: 188 | '%hostname%': {get_param: master_hostname} 189 | '%domainname%': {get_param: domain_name} 190 | 191 | api_url: 192 | description: URL entrypoint to the OpenShift API 193 | value: 194 | str_replace: 195 | template: "https://%hostname%.%domainname%:8443/" 196 | params: 197 | '%hostname%': {get_param: master_hostname} 198 | '%domainname%': {get_param: domain_name} 199 | 200 | hostname: 201 | description: Loadbalancer hostname 202 | value: 203 | str_replace: 204 | template: "%stack_name%-%hostname%.%domainname%" 205 | params: 206 | '%stack_name%': {get_param: stack_name} 207 | '%hostname%': {get_param: hostname} 208 | '%domainname%': {get_param: domain_name} 209 | -------------------------------------------------------------------------------- /master.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | 4 | description: > 5 | OpenShift Master 6 | 7 | 8 | parameters: 9 | 10 | # What version of OpenShift Container Platform to install 11 | # This value is used to select the RPM repo for the OCP release to install 12 | ocp_version: 13 | type: string 14 | description: > 15 | The version of OpenShift Container Platform to deploy 16 | 17 | key_name: 18 | description: > 19 | A pre-submitted SSH key to access the VM hosts 20 | type: string 21 | constraints: 22 | - custom_constraint: nova.keypair 23 | 24 | image: 25 | description: > 26 | Select a base image to use for the master servers 27 | type: string 28 | constraints: 29 | - custom_constraint: glance.image 30 | 31 | flavor: 32 | description: > 33 | Define the hardware characteristics for the VMs: CPU, Memory, base disk 34 | type: string 35 | constraints: 36 | - custom_constraint: nova.flavor 37 | 38 | docker_volume_size: 39 | description: > 40 | size of a cinder volume in GB to allocate to docker for container/image 41 | storage 42 | type: number 43 | default: 25 44 | 45 | rhn_username: 46 | description: > 47 | A valid user with entitlements to RHEL and OpenShift software repos 48 | type: string 49 | 50 | rhn_password: 51 | description: > 52 | The password for the RHN user 53 | type: string 54 | hidden: true 55 | 56 | # Red Hat satellite subscription parameters 57 | sat6_hostname: 58 | type: string 59 | description: > 60 | The hostname of the Satellite 6 server which will provide software updates 61 | default: '' 62 | 63 | sat6_organization: 64 | type: string 65 | description: > 66 | An organization string provided by Sat6 to group subscriptions 67 | default: '' 68 | 69 | sat6_activationkey: 70 | type: string 71 | description: > 72 | An activation key string provided by Sat6 to enable subscriptions 73 | default: '' 74 | 75 | rhn_pool: 76 | description: > 77 | A subscription pool containing the RHEL and OpenShift software repos 78 | OPTIONAL 79 | type: string 80 | hidden: true 81 | 82 | extra_rhn_pools: 83 | type: comma_delimited_list 84 | description: List of rhn pools which will be installed on each node. 85 | default: '' 86 | 87 | hostname: 88 | description: > 89 | A string to identify master hostnames. 90 | type: string 91 | constraints: 92 | - allowed_pattern: '[a-z0-9\-]*' 93 | description: Hostname must contain only characters [a-z0-9\-]. 94 | 95 | domain_name: 96 | description: > 97 | The DNS domain suffix. All VMs will be placed in this domain 98 | type: string 99 | 100 | ansible_public_key: 101 | description: > 102 | The SSH public key that Ansible will use to access master and node hosts 103 | This will be placed on each VM host in /root/.ssh/authorized_keys 104 | type: string 105 | 106 | ssh_user: 107 | description: > 108 | The user for SSH access to the VM hosts 109 | type: string 110 | 111 | timeout: 112 | description: Time to wait until the master setup is ready. 113 | type: number 114 | default: 10800 115 | 116 | external_network: 117 | description: > 118 | The name of the inbound access network 119 | type: string 120 | constraints: 121 | - custom_constraint: neutron.network 122 | 123 | bastion_node: 124 | description: > 125 | The hostname of the bastion node in the deployment 126 | type: string 127 | default: '' 128 | 129 | fixed_network: 130 | description: > 131 | The name or ID of the admin and public network 132 | type: string 133 | constraints: 134 | - custom_constraint: neutron.network 135 | 136 | fixed_subnet: 137 | description: > 138 | The name or ID of the admin and public IPv4 space 139 | type: string 140 | constraints: 141 | - custom_constraint: neutron.subnet 142 | 143 | internal_network: 144 | description: > 145 | The name or ID of the internal network 146 | type: string 147 | constraints: 148 | - custom_constraint: neutron.network 149 | 150 | internal_subnet: 151 | description: > 152 | The name or ID of the internal IPv4 space 153 | type: string 154 | constraints: 155 | - custom_constraint: neutron.subnet 156 | 157 | ldap_url: 158 | description: > 159 | The LDAP url for access to the LDAP service (redundant? ML) 160 | type: string 161 | default: '' 162 | 163 | ldap_preferred_username: 164 | description: > 165 | What field to use to look up and identify users in the database 166 | type: string 167 | default: 'uid' 168 | 169 | ldap_bind_dn: 170 | description: > 171 | LDAP service access user identifier 172 | type: string 173 | default: '' 174 | 175 | ldap_bind_password: 176 | description: > 177 | LDAP service access password 178 | type: string 179 | default: '' 180 | 181 | ldap_ca: 182 | description: > 183 | The Certificate Authority file to confirm SSL encryption security 184 | type: string 185 | default: '' 186 | 187 | ldap_insecure: 188 | description: > 189 | Toggle SSL encryption for LDAP communications 190 | type: string 191 | default: false 192 | 193 | master_server_group: 194 | description: > 195 | ID of a server group containing all of the master hosts 196 | type: string 197 | 198 | master_security_group: 199 | description: > 200 | ID of the network access policies for the OpenShift master hosts 201 | type: string 202 | 203 | system_update: 204 | type: boolean 205 | 206 | extra_repository_urls: 207 | type: comma_delimited_list 208 | description: List of repository URLs which will be installed on each node. 209 | default: '' 210 | 211 | extra_docker_repository_urls: 212 | type: comma_delimited_list 213 | description: List of docker repository URLs which will be installed on each node, if a repo is insecure use '#insecure' suffix. 214 | default: '' 215 | 216 | dns_servers: 217 | type: comma_delimited_list 218 | description: address of dns nameservers reachable in your environment 219 | 220 | dns_update_key: 221 | type: string 222 | hidden: true 223 | 224 | ca_cert: 225 | type: string 226 | description: Certificate Authority Certificate to be added to trust chain 227 | 228 | resources: 229 | 230 | # Create a network connection on the internal communications network 231 | port: 232 | type: OS::Neutron::Port 233 | properties: 234 | security_groups: 235 | - {get_param: master_security_group} 236 | network: {get_param: fixed_network} 237 | fixed_ips: 238 | - subnet: {get_param: fixed_subnet} 239 | replacement_policy: AUTO 240 | 241 | # Create a network connection on the internal communications network 242 | internal_port: 243 | type: OOShift::ContainerPort 244 | properties: 245 | security_group: {get_param: master_security_group} 246 | network: {get_param: internal_network} 247 | subnet: {get_param: internal_subnet} 248 | 249 | # Create the VM instance to host the OpenShift master service 250 | host: 251 | type: OS::Nova::Server 252 | properties: 253 | name: 254 | str_replace: 255 | template: "HOST.DOMAIN" 256 | params: 257 | HOST: {get_param: hostname} 258 | DOMAIN: {get_param: domain_name} 259 | admin_user: {get_param: ssh_user} 260 | image: {get_param: image} 261 | flavor: {get_param: flavor} 262 | key_name: {get_param: key_name} 263 | networks: 264 | - port: {get_resource: port} 265 | - port: {get_attr: [internal_port, port]} 266 | user_data_format: SOFTWARE_CONFIG 267 | user_data_update_policy: IGNORE 268 | user_data: {get_resource: init} 269 | scheduler_hints: 270 | group: {get_param: master_server_group} 271 | 272 | # Create space for Docker containers and images 273 | docker_volume: 274 | type: OS::Cinder::Volume 275 | properties: 276 | size: {get_param: docker_volume_size} 277 | 278 | # Bind the docker storage to the VM 279 | docker_volume_attach: 280 | type: OS::Cinder::VolumeAttachment 281 | properties: 282 | instance_uuid: {get_resource: host} 283 | volume_id: {get_resource: docker_volume} 284 | 285 | # Provide access to the Master on the public network 286 | floating_ip: 287 | type: OS::Neutron::FloatingIP 288 | properties: 289 | floating_network: {get_param: external_network} 290 | port_id: {get_resource: port} 291 | 292 | # Compose configuration data into a single structure 293 | init: 294 | type: OS::Heat::MultipartMime 295 | properties: 296 | parts: 297 | - config: {get_resource: set_hostname} 298 | - config: {get_resource: included_files} 299 | - config: {get_resource: update_ca_cert} 300 | - config: {get_resource: rhn_register} 301 | - config: {get_resource: set_extra_repos} 302 | - config: {get_resource: set_extra_docker_repos} 303 | - config: {get_resource: host_update} 304 | - config: {get_resource: add_dns_record} 305 | - config: {get_resource: master_boot} 306 | 307 | # Compose the short hostname and fully qualified domain name for the new host 308 | set_hostname: 309 | type: OS::Heat::CloudConfig 310 | properties: 311 | cloud_config: 312 | hostname: {get_param: hostname} 313 | fqdn: 314 | str_replace: 315 | template: "HOST.DOMAIN" 316 | params: 317 | HOST: {get_param: hostname} 318 | DOMAIN: {get_param: domain_name} 319 | 320 | # Compile a set of standard configuration files to provide identity and access 321 | included_files: 322 | type: OS::Heat::CloudConfig 323 | properties: 324 | cloud_config: 325 | write_files: 326 | - path: /usr/local/share/openshift-on-openstack/common_functions.sh 327 | permissions: 0755 328 | content: 329 | str_replace: 330 | params: 331 | $WC_NOTIFY: { get_attr: ['wait_handle', 'curl_cli'] } 332 | template: {get_file: fragments/common_functions.sh} 333 | - path: /usr/local/share/openshift-on-openstack/common_openshift_functions.sh 334 | permissions: 0755 335 | content: {get_file: fragments/common_openshift_functions.sh} 336 | - path: /usr/local/bin/retry 337 | permissions: 0755 338 | content: {get_file: fragments/retry.sh} 339 | - path: /usr/local/bin/update_dns 340 | permissions: 0755 341 | content: {get_file: fragments/update_dns.py} 342 | - path: /etc/sysconfig/network-scripts/ifcfg-eth1 343 | content: 344 | str_replace: 345 | params: 346 | $IFNAME: eth1 347 | template: {get_file: fragments/ifcfg-eth} 348 | - path: /etc/pki/ca-trust/source/anchors/ca.crt 349 | permissions: 0600 350 | content: {get_param: ca_cert} 351 | ssh_authorized_keys: 352 | - {get_param: ansible_public_key} 353 | 354 | # Add CA Cert to trust chain 355 | update_ca_cert: 356 | type: OS::Heat::SoftwareConfig 357 | properties: 358 | config: {get_file: fragments/ca_cert.sh} 359 | 360 | # Attach to a source of software updates for RHEL 361 | rhn_register: 362 | type: OS::Heat::SoftwareConfig 363 | properties: 364 | config: 365 | str_replace: 366 | params: 367 | $OCP_VERSION: {get_param: ocp_version} 368 | $RHN_USERNAME: {get_param: rhn_username} 369 | $RHN_PASSWORD: {get_param: rhn_password} 370 | $SAT6_HOSTNAME: {get_param: sat6_hostname} 371 | $SAT6_ORGANIZATION: {get_param: sat6_organization} 372 | $SAT6_ACTIVATIONKEY: {get_param: sat6_activationkey} 373 | $POOL_ID: {get_param: rhn_pool} 374 | $EXTRA_POOL_IDS: 375 | list_join: 376 | - " --pool=" 377 | - {get_param: extra_rhn_pools} 378 | template: {get_file: fragments/rhn-register.sh} 379 | 380 | # Enable any extra repositories 381 | set_extra_repos: 382 | type: OS::Heat::SoftwareConfig 383 | properties: 384 | config: 385 | str_replace: 386 | params: 387 | $REPOLIST: 388 | list_join: 389 | - " " 390 | - {get_param: extra_repository_urls} 391 | template: {get_file: fragments/set-extra-repos.sh} 392 | 393 | set_extra_docker_repos: 394 | type: OS::Heat::SoftwareConfig 395 | properties: 396 | config: 397 | str_replace: 398 | params: 399 | $REPOLIST: 400 | list_join: 401 | - " " 402 | - {get_param: extra_docker_repository_urls} 403 | template: {get_file: fragments/set-extra-docker-repos.sh} 404 | 405 | # Insure that the host software is current 406 | host_update: 407 | type: OS::Heat::SoftwareConfig 408 | properties: 409 | config: 410 | str_replace: 411 | params: 412 | $SYSTEM_UPDATE: {get_param: system_update} 413 | template: {get_file: fragments/host-update.sh} 414 | 415 | add_dns_record: 416 | type: OS::Heat::SoftwareConfig 417 | properties: 418 | config: 419 | str_replace: 420 | params: 421 | '%ZONE%': {get_param: domain_name} 422 | '%DNS_SERVER%': {get_param: [dns_servers, 0]} 423 | '%DNS_UPDATE_KEY%': {get_param: dns_update_key} 424 | '%IP_ADDRESS%': {get_attr: [port, fixed_ips, 0, ip_address]} 425 | template: {get_file: fragments/add_dns_record.sh} 426 | 427 | # Prepare the host to run Docker and Ansible for OpenShift install and config 428 | master_boot: 429 | type: OS::Heat::SoftwareConfig 430 | properties: 431 | group: script 432 | config: 433 | str_replace: 434 | params: 435 | $DOCKER_VOLUME_ID: {get_resource: docker_volume} 436 | template: {get_file: fragments/master-boot.sh} 437 | 438 | # Add a node's IP/Name mapping to DNS 439 | node_add: 440 | type: OS::Heat::SoftwareConfig 441 | properties: 442 | group: script 443 | inputs: 444 | - name: node_hostname 445 | - name: node_type 446 | outputs: 447 | - name: result 448 | config: {get_file: fragments/bastion-node-add.sh} 449 | 450 | # Add the hostname and address of the bastion host to the master host 451 | deployment_bastion_node_add: 452 | depends_on: wait_condition 453 | type: OS::Heat::SoftwareDeployment 454 | properties: 455 | config: 456 | get_resource: node_add 457 | server: 458 | get_param: bastion_node 459 | input_values: 460 | node_type: master 461 | node_hostname: 462 | str_replace: 463 | template: "HOST.DOMAIN" 464 | params: 465 | HOST: {get_param: hostname} 466 | DOMAIN: {get_param: domain_name} 467 | 468 | node_cleanup: 469 | type: OS::Heat::SoftwareConfig 470 | properties: 471 | group: script 472 | inputs: 473 | - name: node_name 474 | - name: node_type 475 | - name: ssh_user 476 | default: {get_param: ssh_user} 477 | outputs: 478 | - name: result 479 | config: {get_file: fragments/bastion-node-cleanup.sh} 480 | 481 | # activation hook for removing the node from DNS and from the Kubernetes 482 | # cluster 483 | deployment_bastion_node_cleanup: 484 | depends_on: [host, wait_condition] 485 | type: OS::Heat::SoftwareDeployment 486 | properties: 487 | actions: ['DELETE'] 488 | input_values: 489 | node_type: master 490 | node_name: 491 | str_replace: 492 | template: "HOST.DOMAIN" 493 | params: 494 | HOST: {get_param: hostname} 495 | DOMAIN: {get_param: domain_name} 496 | config: 497 | get_resource: node_cleanup 498 | server: 499 | get_param: bastion_node 500 | 501 | # Wait for master_boot (cloud-init) to complete or time out 502 | wait_condition: 503 | type: OS::Heat::WaitCondition 504 | properties: 505 | handle: {get_resource: wait_handle} 506 | timeout: {get_param: timeout} 507 | 508 | # This provides a curl CLI string to the cloud-init script which is queried 509 | # with a status at the end of of the cloud-init process 510 | wait_handle: 511 | type: OS::Heat::WaitConditionHandle 512 | 513 | outputs: 514 | console_url: 515 | description: Compose the access URL for the OpenShift web UI 516 | value: 517 | str_replace: 518 | params: 519 | HOSTNAME: {get_param: hostname} 520 | DOMAINNAME: {get_param: domain_name} 521 | template: "https://HOSTNAME.DOMAINNAME:8443/console/" 522 | api_url: 523 | description: Compose the access URL for the OpenShift REST API 524 | value: 525 | str_replace: 526 | params: 527 | HOSTNAME: {get_param: hostname} 528 | DOMAINNAME: {get_param: domain_name} 529 | template: "https://HOSTNAME.DOMAINNAME:8443/" 530 | wc_data: 531 | description: Syncronization data 532 | value: { get_attr: ['wait_condition', 'data'] } 533 | hostname: 534 | description: The actual short name for the host 535 | value: {get_param: hostname} 536 | host: 537 | description: A reference to the master host identifier 538 | value: {get_resource: host} 539 | ip_address: 540 | description: IP address of the node 541 | value: {get_attr: [floating_ip, floating_ip_address]} 542 | -------------------------------------------------------------------------------- /openshift-on-openstack.spec: -------------------------------------------------------------------------------- 1 | Name: openshift-on-openstack 2 | Version: 0.9.5 3 | Release: 1%{?dist} 4 | Summary: Set of Openstack Heat templates to deploy OpenShift 5 | Group: System Environment/Base 6 | License: ASL 2.0 7 | URL: https://github.com/redhat-openstack/openshift-on-openstack 8 | Source0: https://github.com/redhat-openstack/openshift-on-openstack/archive/v%{version}/openshift-on-openstack-%{version}.tar.gz 9 | BuildArch: noarch 10 | 11 | %description 12 | A collection of documentation, Heat templates, configuration and 13 | everything else that's necessary to deploy OpenShift on OpenStack. 14 | 15 | %prep 16 | %setup -qn openshift-on-openstack-%{version} 17 | 18 | %build 19 | 20 | %install 21 | install -d -m 755 %{buildroot}/%{_datadir}/%{name} 22 | install -D -m 755 customize-disk-image %{buildroot}%{_bindir}/customize-disk-image 23 | cp -aR *.yaml %{buildroot}%{_datadir}/%{name}/ 24 | cp -aR collect-config-setup/ %{buildroot}%{_datadir}/%{name} 25 | cp -aR fragments/ %{buildroot}%{_datadir}/%{name} 26 | cp -aR templates/ %{buildroot}%{_datadir}/%{name} 27 | cp -aR heat-docker-agent/ %{buildroot}%{_datadir}/%{name} 28 | cp -aR tests/ %{buildroot}%{_datadir}/%{name} 29 | 30 | %files 31 | %doc LICENSE.txt README.adoc README_debugging.adoc 32 | %{_datadir}/%{name} 33 | %{_bindir}/customize-disk-image 34 | 35 | %changelog 36 | * Fri Oct 28 2016 Sylvain Baubeau - 0.9.5-1 37 | - Make app_subdomain customizable 38 | - External loadbalancer hostname now can (and should) be fully qualified domain name 39 | - Bug fixes: 40 | - Adding retry when installing packages for mitigating random networking issues 41 | - Add dependent package python-psutil for os-collect-config 42 | 43 | * Wed Oct 19 2016 Sylvain Baubeau - 0.9.4-1 44 | - Use openshift-ansible recommanded way for scaleup 45 | - Add constraints on hostnames 46 | - Documentation improvements 47 | - Fixed master_count evaluation 48 | 49 | * Fri Oct 14 2016 Sylvain Baubeau - 0.9.3-1 50 | - Bug fixes: 51 | - Improve checking of os-collect-config setup 52 | - Add missing domain to dedicated loadbalancer instance 53 | - Unify loadbalancer stackname prefix 54 | - Make sure prepare_registry is a bool value 55 | - Make use of parameter registry_volume_size 56 | - Fix scaleup when using volume_quota parameter 57 | 58 | * Wed Oct 12 2016 Sylvain Baubeau - 0.9.2-1 59 | - Bug fixes: 60 | - Set default value for rhn_pool 61 | - Add default loadbalancer neutron env file 62 | - Ignore "oadm ipfailover" error state 63 | - Install missing package python-oslo-log for OSP 9 64 | - Set load balancer image to rhel when using AOP 65 | - Fixed schduling on master nodes 66 | - Return non empty router_ip when using dedicated loadbalancer 67 | - Lots of documentation improvements 68 | 69 | * Thu Oct 6 2016 Sylvain Baubeau - 0.9.1-1 70 | - Allow specifying different flavors for every node type 71 | - Bug fixes: 72 | - Write template files in post cloud-init phase to avoid 64k 73 | limit of cloud-init 74 | - Explicitly enable port 53 for DNS server 75 | - Refactor skip_dns parameter 76 | 77 | * Mon Oct 3 2016 Sylvain Baubeau - 0.9.0-1 78 | - Rename 'infra' node to 'bastion' 79 | - Dedicated infra nodes 80 | - Setup masquerading when using flannel 81 | - Allow passing parameters to openshift-ansible as JSON 82 | - Satellite fixes 83 | 84 | * Wed Sep 14 2016 Sylvain Baubeau - 0.8.1-1 85 | - Bump to version 0.8.1 86 | - Bug fixes: 87 | - Make sure registry volume is not left in /etc/fstab 88 | - Fix EPEL repository enablement 89 | - Explicitly set replica=1 for registry 90 | 91 | * Wed Sep 14 2016 Sylvain Baubeau - 0.8.0-1 92 | - Bump to version 0.8.0 93 | - Switch to Ansible 2.1 94 | - Improve scalability (up to 100 nodes) 95 | - Use IP failover for OpenShift router 96 | - Add subscription manager register to satellite 97 | - Evacuate pods before removing a node 98 | - Automatic subscription removal 99 | - Allow setting quotas on container and emptyDir volumes 100 | - Allow use of external volume for registry storage 101 | 102 | * Thu Jun 16 2016 Sylvain Baubeau - 0.7.0-1 103 | - Bump to version 0.7.0 104 | 105 | * Thu Jun 02 2016 Sylvain Baubeau - 0.6.0-1 106 | - Bump to version 0.6.0 107 | 108 | * Fri May 13 2016 Sylvain Baubeau - 0.5.0-1 109 | - Bump to version 0.5.0 110 | 111 | * Tue Mar 22 2016 Sylvain Baubeau - 0.4.0-1 112 | - Bump to version 0.4.0 113 | - Enable dedicated loadbalancer node (again) 114 | 115 | * Tue Mar 22 2016 Sylvain Baubeau - 0.3.0-1 116 | - Bump to version 0.3.0 117 | 118 | * Fri Feb 19 2016 Sylvain Baubeau - 0.2.0-1 119 | - Bump to version 0.2.0 120 | 121 | * Wed Jan 27 2016 Sylvain Baubeau - 0.1.0-1 122 | - Initial openshift-on-openstack rpm 123 | -------------------------------------------------------------------------------- /registry_ephemeral.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which creates a cinder volume used by openshift registry, 5 | this volume is deleted with stack. 6 | 7 | parameters: 8 | volume_id: 9 | description: > 10 | An existing cinder volume ID 11 | type: string 12 | default: '' 13 | 14 | volume_size: 15 | description: > 16 | Size of the Openshift registry persistent volume 17 | type: number 18 | 19 | resources: 20 | volume: 21 | type: OS::Cinder::Volume 22 | properties: 23 | size: {get_param: volume_size} 24 | 25 | outputs: 26 | volume_id: 27 | description: cinder volume id 28 | value: {get_resource: volume} 29 | -------------------------------------------------------------------------------- /registry_persistent.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which allows usage of an existing cinder volume as a persistent 5 | openshift registry. 6 | 7 | parameters: 8 | volume_id: 9 | description: > 10 | An existing cinder volume ID 11 | type: string 12 | constraints: 13 | - custom_constraint: cinder.volume 14 | 15 | volume_size: 16 | description: > 17 | Size of the Openshift registry persistent volume 18 | type: number 19 | default: 10 20 | 21 | outputs: 22 | volume_id: 23 | description: cinder volume id 24 | value: {get_param: volume_id} 25 | -------------------------------------------------------------------------------- /sdn_flannel.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which creates a dedicated network for container communication 5 | 6 | parameters: 7 | network: 8 | description: > 9 | The name or ID of the internal network 10 | type: string 11 | constraints: 12 | - custom_constraint: neutron.network 13 | 14 | subnet: 15 | description: > 16 | The name or ID of the internal IPv4 space 17 | type: string 18 | constraints: 19 | - custom_constraint: neutron.subnet 20 | 21 | security_group: 22 | description: > 23 | ID of the network access policies 24 | type: string 25 | 26 | resources: 27 | port_security_disabled_port: 28 | type: OS::Neutron::Port 29 | properties: 30 | network: {get_param: network} 31 | fixed_ips: 32 | - subnet: {get_param: subnet} 33 | replacement_policy: AUTO 34 | port_security_enabled: false 35 | 36 | outputs: 37 | port: 38 | description: A reference to the created port 39 | value: {get_resource: port_security_disabled_port} 40 | -------------------------------------------------------------------------------- /sdn_openshift_sdn.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a creates a dedicated network for container communication 5 | 6 | parameters: 7 | network: 8 | description: > 9 | The name or ID of the internal network 10 | type: string 11 | constraints: 12 | - custom_constraint: neutron.network 13 | 14 | subnet: 15 | description: > 16 | The name or ID of the internal IPv4 space 17 | type: string 18 | constraints: 19 | - custom_constraint: neutron.subnet 20 | 21 | security_group: 22 | description: > 23 | ID of the network access policies 24 | type: string 25 | 26 | resources: 27 | security_group_port: 28 | type: OS::Neutron::Port 29 | properties: 30 | security_groups: 31 | - {get_param: security_group} 32 | network: {get_param: network} 33 | fixed_ips: 34 | - subnet: {get_param: subnet} 35 | replacement_policy: AUTO 36 | 37 | outputs: 38 | port: 39 | description: A reference to the created port 40 | value: {get_resource: security_group_port} 41 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/group_vars/OSv3.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/group_vars 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/group_vars/OSv3.yml 3 | ansible_first_run: {{ansible_first_run}} 4 | bastion_instance_id: {{bastion_instance_id}} 5 | ansible_ssh_user: {{ssh_user}} 6 | ansible_sudo: true 7 | ansible_become: true 8 | deployment_type: {{deployment_type}} # deployment type valid values are origin, online and openshif-enterprise 9 | openshift_master_default_subdomain: {{app_subdomain}} # default subdomain to use for exposed routes 10 | openshift_override_hostname_check: true 11 | openshift_use_openshift_sdn: {{openshift_use_openshift_sdn}} 12 | openshift_use_flannel: {{openshift_use_flannel}} 13 | flannel_interface: eth1 14 | openshift_use_dnsmasq: false 15 | {{#master_ha}} 16 | openshift_master_cluster_password: openshift_cluster 17 | openshift_master_cluster_method: native 18 | {{/master_ha}} 19 | {{^no_lb}} 20 | openshift_master_cluster_hostname: {{lb_hostname}} 21 | openshift_master_cluster_public_hostname: {{lb_hostname}} 22 | {{/no_lb}} 23 | {{#ldap_url}} 24 | openshift_master_identity_providers: 25 | - name: ldap_auth 26 | kind: LDAPPasswordIdentityProvider 27 | challenge: true 28 | login: true 29 | bindDN: {{ldap_bind_dn}} 30 | bindPassword: {{ldap_bind_password}} 31 | ca: '{{ldap_ca}}' 32 | insecure: {{ldap_insecure}} 33 | url: {{ldap_url}} 34 | attributes: 35 | id: ['dn'] 36 | email: ['mail'] 37 | name: ['cn'] 38 | preferredUsername: ['{{ldap_preferred_username}}'] 39 | {{/ldap_url}} 40 | {{^ldap_url}} 41 | openshift_master_identity_providers: 42 | - name: htpasswd_auth 43 | login: true 44 | challenge: true 45 | kind: HTPasswdPasswordIdentityProvider 46 | filename: /etc/origin/openshift-passwd 47 | {{/ldap_url}} 48 | {{#openstack_cloud_provider}} 49 | openshift_cloudprovider_kind: openstack 50 | openshift_cloudprovider_openstack_auth_url: {{os_auth_url}} 51 | openshift_cloudprovider_openstack_username: {{os_username}} 52 | openshift_cloudprovider_openstack_password: {{os_password}} 53 | openshift_cloudprovider_openstack_tenant_name: {{os_tenant_name}} 54 | openshift_cloudprovider_openstack_region: {{os_region_name}} 55 | {{#os_domain_name}} 56 | openshift_cloudprovider_openstack_domain_name: {{os_domain_name}} 57 | {{/os_domain_name}} 58 | {{#deploy_registry}} 59 | openshift_hosted_registry_replicas: 1 60 | openshift_registry_selector: region=infra 61 | openshift_hosted_registry_storage_create_pv: true 62 | openshift_hosted_registry_storage_kind: openstack 63 | openshift_hosted_registry_storage_volume_name: registry 64 | openshift_hosted_registry_storage_volume_size: {{registry_volume_size}}Gi 65 | openshift_hosted_registry_storage_access_modes: 66 | - ReadWriteOnce 67 | openshift_hosted_registry_storage_openstack_volumeID: {{registry_volume_id}} 68 | openshift_hosted_registry_storage_openstack_filesystem: {{registry_volume_fs}} 69 | {{/deploy_registry}} 70 | {{/openstack_cloud_provider}} 71 | openshift_hosted_manage_registry: {{deploy_registry}} 72 | openshift_hosted_manage_router: {{deploy_router}} 73 | EOF 74 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/group_vars/masters.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/group_vars 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/group_vars/masters.yml 3 | num_infra: {{infra_count}} 4 | router_vip: {{router_vip}} 5 | openshift_schedulable: false 6 | openshift_master_api_port: 8443 7 | {{#deploy_router}} 8 | openshift_hosted_router_selector: region=infra 9 | {{/deploy_router}} 10 | {{^deploy_router}} 11 | openshift_hosted_router_replicas: 0 12 | {{/deploy_router}} 13 | EOF 14 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/group_vars/nodes.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/group_vars 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/group_vars/nodes.yml 3 | {{#volume_quota}} 4 | openshift_node_local_quota_per_fsgroup: {{ volume_quota }}Gi 5 | {{/volume_quota}} 6 | EOF 7 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/host_vars/loadbalancer.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/host_vars 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/host_vars/loadbalancer.yml 3 | ansible_ssh_host: {{lb_ip}} 4 | ansible_hostname: {{short_lb_hostname}} 5 | ansible_default_ipv4: 6 | address: {{lb_ip}} 7 | EOF 8 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/inventory: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/inventory 3 | # Create an OSEv3 group that contains the masters and nodes groups 4 | [OSv3:children] 5 | bastion 6 | masters 7 | nodes 8 | etcd 9 | new_nodes 10 | {{#dedicated_lb}} 11 | lb 12 | 13 | [lb] 14 | loadbalancer 15 | {{/dedicated_lb}} 16 | 17 | [bastion] 18 | localhost 19 | 20 | [masters] 21 | {{#masters}} 22 | {{.}}.{{domainname}} 23 | {{/masters}} 24 | 25 | [etcd] 26 | {{#masters}} 27 | {{.}}.{{domainname}} 28 | {{/masters}} 29 | 30 | # FIXME: this is used only in playbooks/haproxy.yml for 31 | # setting up dedicated loadbalancer 32 | [infra] 33 | {{#infra_nodes}} 34 | {{.}}.{{domainname}} 35 | {{/infra_nodes}} 36 | 37 | [nodes] 38 | {{#masters}} 39 | {{.}}.{{domainname}} openshift_node_labels="{'zone': 'default'}" 40 | {{/masters}} 41 | {{#infra_nodes}} 42 | {{.}}.{{domainname}} openshift_node_labels="{'region': 'infra', 'zone': 'default'}" 43 | {{/infra_nodes}} 44 | {{#nodes}} 45 | {{.}} openshift_node_labels="{'region': 'primary', 'zone': 'default'}" 46 | {{/nodes}} 47 | 48 | [new_nodes] 49 | {{^ansible_first_run}} 50 | {{#new_nodes}} 51 | {{.}} openshift_node_labels="{'region': 'primary', 'zone': 'default'}" 52 | {{/new_nodes}} 53 | {{/ansible_first_run}} 54 | 55 | [dns] 56 | localhost 57 | {{^no_lb}} 58 | {{^external_lb}} 59 | 60 | [extradnsitems] 61 | loadbalancer 62 | {{/external_lb}} 63 | {{/no_lb}} 64 | EOF 65 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/flannel.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/flannel.yml 3 | {{=<% %>=}} 4 | - hosts: nodes 5 | sudo: yes 6 | tasks: 7 | - name: Allow docker traffic 8 | shell: iptables -A DOCKER -p tcp -j ACCEPT 9 | 10 | - name: Set up masquerading on flannel interface 11 | shell: iptables -t nat -A POSTROUTING -o {{ flannel_interface }} -j MASQUERADE 12 | 13 | - name: Make iptables rules permanent 14 | shell: /usr/libexec/iptables/iptables.init save 15 | <%={{ }}=%> 16 | EOF 17 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/haproxy.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/haproxy.yml 3 | {{=<% %>=}} 4 | - name: Gather facts 5 | hosts: infra 6 | gather_facts: False 7 | tasks: 8 | - setup: 9 | filter: ansible_* 10 | 11 | - name: Configure load balancers 12 | hosts: oo_lb_to_config 13 | vars: 14 | os_firewall_use_firewalld: false 15 | os_firewall_allow: 16 | - service: router http 17 | port: "80/tcp" 18 | - service: router https 19 | port: "443/tcp" 20 | roles: 21 | - role: os_firewall 22 | - role: openshift_facts 23 | tasks: 24 | - set_fact: 25 | servers: "{{ groups.infra }}" 26 | - name: Configure haproxy 27 | template: 28 | src: /var/lib/ansible/templates/etc/haproxy/router.cfg.j2 29 | dest: /etc/haproxy/router.cfg 30 | owner: root 31 | group: root 32 | mode: 0644 33 | - name: Include config file 34 | replace: dest=/etc/sysconfig/haproxy regexp='OPTIONS=.*' replace='OPTIONS="-f /etc/haproxy/router.cfg"' 35 | - name: Enable and start haproxy 36 | service: 37 | name: haproxy 38 | state: restarted 39 | enabled: yes 40 | register: start_result 41 | <%={{ }}=%> 42 | EOF 43 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/ipfailover.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/ipfailover.yml 3 | --- 4 | {{=<% %>=}} 5 | - hosts: masters[0] 6 | sudo: yes 7 | roles: 8 | - role: openshift_serviceaccounts 9 | openshift_serviceaccounts_names: 10 | - ipfailover 11 | openshift_serviceaccounts_namespace: default 12 | openshift_serviceaccounts_sccs: 13 | - privileged 14 | 15 | tasks: 16 | - name: Deploy Openshift IP failover for router 17 | command: oadm ipfailover --create --service-account=ipfailover --interface=eth0 --selector='region=infra' --replicas={{ num_infra }} --virtual-ips="{{ router_vip }}" --credentials=/etc/origin/master/openshift-router.kubeconfig 18 | when: ansible_first_run | default(false) | bool 19 | # oadm ipfailover returns error code if service account already exists even 20 | # if ipfailover pod is created successfully 21 | # remove when https://bugzilla.redhat.com/show_bug.cgi?id=1332432 is fixed 22 | ignore_errors: yes 23 | 24 | - hosts: masters 25 | sudo: yes 26 | tasks: 27 | - name: Allow multicast for keepalived 28 | command: /sbin/iptables -I INPUT -i eth0 -d 224.0.0.18/32 -j ACCEPT 29 | <%={{ }}=%> 30 | EOF 31 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/main.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/main.yml 3 | {{=<% %>=}} 4 | 5 | <%#ansible_first_run%> 6 | <%#external_lb%> 7 | # an external LB can not be pre-set with master IPs because both creation 8 | # of master nodes and openshift-ansible are part of the same stack-create 9 | # process. Temporarily override external LB's IP to point to the first 10 | # master node. This IP is removed at the end of the first successful 11 | # setup. 12 | - hosts: all 13 | tasks: 14 | - name: Override external LB hostname's IP 15 | lineinfile: dest=/etc/hosts regexp='.* <%lb_hostname%>' line="<%master_ip%> <%lb_hostname%>" state=present 16 | <%/external_lb%> 17 | 18 | <%#deploy_registry%> 19 | <%#prepare_registry%> 20 | - include: /var/lib/ansible/playbooks/registry.yml 21 | <%/prepare_registry%> 22 | <%/deploy_registry%> 23 | <%#volume_quota%> 24 | - include: /var/lib/ansible/playbooks/quota.yml 25 | <%/volume_quota%> 26 | <%/ansible_first_run%> 27 | 28 | - include: /usr/share/ansible/openshift-ansible/playbooks/byo/config.yml 29 | vars: 30 | openshift_infra_nodes: "{{ groups.infra | default([]) }}" 31 | 32 | <%#openshift_use_flannel%> 33 | - include: /var/lib/ansible/playbooks/flannel.yml 34 | <%/openshift_use_flannel%> 35 | 36 | - hosts: masters[0] 37 | sudo: yes 38 | tasks: 39 | - name: Fetch cert file 40 | fetch: 41 | src=/etc/origin/master/ca.crt 42 | dest=<%heat_outputs_path%>.ca_cert 43 | flat=yes 44 | 45 | - name: Fetch ca key 46 | fetch: 47 | src=/etc/origin/master/ca.key 48 | dest=<%heat_outputs_path%>.ca_key 49 | flat=yes 50 | 51 | - hosts: nodes 52 | sudo: yes 53 | tasks: 54 | - name: Restart node service 55 | service: name={{openshift.common.service_type}}-node state=restarted 56 | 57 | <%#deploy_registry%> 58 | - name: Waiting for registry setup 59 | hosts: masters[0] 60 | tasks: 61 | - action: shell oc get pod | grep -v deploy | grep registry | grep Running 62 | register: result 63 | until: result.rc == 0 64 | retries: 15 65 | delay: 60 66 | <%/deploy_registry%> 67 | 68 | <%#deploy_router%> 69 | - name: Waiting for router setup 70 | hosts: masters[0] 71 | tasks: 72 | - action: shell oc get pod | grep -v deploy | grep router | grep Running 73 | register: result 74 | until: result.rc == 0 75 | retries: 15 76 | delay: 60 77 | 78 | <%#router_vip%> 79 | - include: /var/lib/ansible/playbooks/ipfailover.yml 80 | 81 | - name: Waiting for IP failover setup 82 | hosts: masters[0] 83 | tasks: 84 | - action: shell oc get pod | grep -v deploy | grep ipfailover | grep Running 85 | register: result 86 | until: result.rc == 0 87 | retries: 15 88 | delay: 60 89 | <%/router_vip%> 90 | <%/deploy_router%> 91 | 92 | - hosts: masters[0] 93 | sudo: yes 94 | tasks: 95 | - name: Clean pods in DeadlineExceeded status 96 | shell: oc get pod | grep DeadlineExceeded | cut -f 1 -d " " | xargs -r oc delete pod 97 | 98 | <%#deploy_router%> 99 | <%#dedicated_lb%> 100 | - include: /var/lib/ansible/playbooks/haproxy.yml 101 | <%/dedicated_lb%> 102 | <%/deploy_router%> 103 | 104 | <%#ansible_first_run%> 105 | <%#external_lb%> 106 | - hosts: all 107 | tasks: 108 | - name: Remove external LB hostname's IP 109 | lineinfile: dest=/etc/hosts regexp='.* <%lb_hostname%>' state=absent 110 | <%/external_lb%> 111 | <%/ansible_first_run%> 112 | 113 | <%={{ }}=%> 114 | EOF 115 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/quota.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/quota.yml 3 | --- 4 | - name: Apply gquota options to XFS mount points 5 | hosts: nodes,new_nodes 6 | roles: 7 | - role: fstab_mount_options 8 | fmo_mount_point: "/" 9 | fmo_mount_options: "gquota" 10 | - role: xfs_grub_quota 11 | - role: reboot 12 | EOF 13 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/registry.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/registry.yml 3 | --- 4 | {{=<% %>=}} 5 | - hosts: localhost 6 | sudo: yes 7 | tasks: 8 | - name: Create temp directory for volume definitions 9 | command: mktemp -d /tmp/registry-volume-XXXXXXX 10 | register: mktemp 11 | changed_when: False 12 | 13 | - name: Check if Openstack clients are installed 14 | command: rpm -q python-novaclient python-cinderclient python-keystoneclient 15 | register: rpm_check 16 | ignore_errors: true 17 | 18 | - name: Setup RDO repositories 19 | yum: name=https://rdoproject.org/repos/rdo-release.rpm state=present 20 | when: (ansible_first_run | default(false) | bool) and deployment_type == 'origin' and rpm_check.rc != 0 and ansible_distribution != 'RedHat' 21 | 22 | - name: Install Nova and Cinder clients 23 | yum: name=python-novaclient,python-cinderclient,python-keystoneclient state=latest 24 | 25 | - name: Attach the volume to the VM 26 | shell: nova --os-auth-url {{ openshift_cloudprovider_openstack_auth_url }} --os-username {{ openshift_cloudprovider_openstack_username }} --os-password {{ openshift_cloudprovider_openstack_password }} --os-tenant-name {{ openshift_cloudprovider_openstack_tenant_name }} volume-attach {{ bastion_instance_id }} {{ openshift_hosted_registry_storage_openstack_volumeID }} 27 | 28 | - name: Wait for the device to appear 29 | wait_for: path=/dev/vdc 30 | 31 | - name: Format the device 32 | filesystem: fstype={{ openshift_hosted_registry_storage_openstack_filesystem }} dev=/dev/vdc 33 | 34 | - name: Mount the device 35 | mount: name={{ mktemp.stdout }} src=/dev/vdc state=mounted fstype={{ openshift_hosted_registry_storage_openstack_filesystem }} 36 | 37 | - name: Change mode on the filesystem 38 | file: path={{ mktemp.stdout }} state=directory recurse=true mode=0777 39 | 40 | - name: Unmount the device 41 | mount: name={{ mktemp.stdout }} src=/dev/vdc state=absent fstype={{ openshift_hosted_registry_storage_openstack_filesystem }} 42 | 43 | - name: Delete temp directory 44 | file: 45 | name: "{{ mktemp.stdout }}" 46 | state: absent 47 | changed_when: False 48 | 49 | - name: Detach the volume to the VM 50 | shell: nova --os-auth-url {{ openshift_cloudprovider_openstack_auth_url }} --os-username {{ openshift_cloudprovider_openstack_username }} --os-password {{ openshift_cloudprovider_openstack_password }} --os-tenant-name {{ openshift_cloudprovider_openstack_tenant_name }} volume-detach {{ bastion_instance_id }} {{ openshift_hosted_registry_storage_openstack_volumeID }} 51 | <%={{ }}=%> 52 | EOF 53 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/scaledown.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/scaledown.yml 3 | {{=<% %>=}} 4 | - name: Remove OpenShift node 5 | hosts: masters[0] 6 | gather_facts: False 7 | tasks: 8 | - name: Mark node as non schedulable 9 | action: shell oadm --config ~/.kube/config manage-node --schedulable=false {{ node }} 10 | 11 | - name: Evacuate pods from node 12 | action: shell oadm --config ~/.kube/config manage-node --evacuate --force {{ node }} 13 | 14 | - name: Wait for pods to be evacuated 15 | action: shell oadm --config ~/.kube/config manage-node --list-pods --show-all=false {{ node }} | grep -v Error | tail -n +5 16 | register: result 17 | until: result.stdout_lines | length == 0 18 | retries: 3 19 | delay: 60 20 | 21 | - name: Remove node from list node list 22 | action: shell oc --config ~/.kube/config delete node {{ node }} 23 | <%={{ }}=%> 24 | EOF 25 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/playbooks/scaleup.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/playbooks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/playbooks/scaleup.yml 3 | {{=<% %>=}} 4 | <%#volume_quota%> 5 | - include: /var/lib/ansible/playbooks/quota.yml 6 | <%/volume_quota%> 7 | 8 | - include: /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-node/scaleup.yml 9 | vars: 10 | openshift_infra_nodes: "{{ groups.infra | default([]) }}" 11 | 12 | - hosts: new_nodes 13 | sudo: yes 14 | tasks: 15 | - name: Allow docker traffic 16 | shell: iptables -A DOCKER -p tcp -j ACCEPT 17 | when: openshift_use_flannel 18 | 19 | - name: Set up masquerading on flannel interface 20 | shell: iptables -t nat -A POSTROUTING -o {{ flannel_interface }} -j MASQUERADE 21 | when: openshift_use_flannel 22 | 23 | - name: Make iptables rule permanent 24 | shell: /usr/libexec/iptables/iptables.init save 25 | when: openshift_use_flannel 26 | 27 | - hosts: new_nodes 28 | sudo: yes 29 | tasks: 30 | - name: Restart node service 31 | service: name={{openshift.common.service_type}}-node state=restarted 32 | <%={{ }}=%> 33 | EOF 34 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/roles/fstab_mount_options/tasks/main.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/roles/fstab_mount_options/tasks/ 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/roles/fstab_mount_options/tasks/main.yml 3 | {{=<% %>=}} 4 | --- 5 | - name: grab mount point line from fstab 6 | shell: /usr/bin/awk '($1 !~ /^#/) && ($2=="{{ fmo_mount_point }}")' /etc/fstab 7 | register: fstab_line 8 | changed_when: False 9 | 10 | - debug: var=fstab_line 11 | 12 | - name: fail when 13 | fail: 14 | msg: Didn't find just one fstab line 15 | when: fstab_line.stdout_lines | length != 1 16 | 17 | - name: set var facts 18 | set_fact: 19 | mount_opts: | 20 | {% set rval = fstab_line.stdout.strip().split() %} 21 | {{- rval -}} 22 | 23 | - name: debug 24 | debug: 25 | var: mount_opts 26 | 27 | - name: update mount point with mount options in fstab 28 | mount: 29 | state: present 30 | fstab: /etc/fstab 31 | src: "{{ mount_opts[0] }}" 32 | name: "{{ mount_opts[1] }}" 33 | fstype: "{{ mount_opts[2] }}" 34 | opts: "{{ fmo_mount_options }}" 35 | dump: "{{ mount_opts[4] }}" 36 | passno: "{{ mount_opts[5] }}" 37 | <%={{ }}=%> 38 | EOF 39 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/roles/reboot/tasks/main.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/roles/reboot/tasks/ 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/roles/reboot/tasks/main.yml 3 | {{=<% %>=}} 4 | --- 5 | - name: check if grpquota mount options are already set 6 | shell: /usr/bin/findmnt / | grep grpquota 7 | register: proc_line 8 | changed_when: False 9 | ignore_errors: yes 10 | 11 | - name: Reboot server 12 | command: /usr/bin/systemd-run --on-active=5 /usr/bin/systemctl reboot 13 | async: 0 14 | poll: 0 15 | when: proc_line.rc == 1 16 | 17 | - name: Wait for the server to reboot 18 | sudo: no 19 | local_action: wait_for host="{{ inventory_hostname }}" search_regex=OpenSSH port=22 timeout=300 state=stopped 20 | when: proc_line.rc == 1 21 | 22 | - name: Wait for the server to finish rebooting 23 | sudo: no 24 | local_action: wait_for host="{{ inventory_hostname }}" search_regex=OpenSSH port=22 timeout=300 state=started 25 | when: proc_line.rc == 1 26 | <%={{ }}=%> 27 | EOF 28 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/roles/xfs_grub_quota/tasks/main.yml: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/roles/xfs_grub_quota/tasks 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/roles/xfs_grub_quota/tasks/main.yml 3 | {{=<% %>=}} 4 | --- 5 | - name: update kernel command line to add gquota 6 | shell: grep rootflags=gquota /etc/default/grub || sed -i 's/\(GRUB_CMDLINE_LINUX="\)\(.*\)"/\1rootflags=gquota \2"/' /etc/default/grub 7 | 8 | - name: regenerate grub configuration 9 | shell: /sbin/grub2-mkconfig -o /boot/grub2/grub.cfg 10 | <%={{ }}=%> 11 | EOF 12 | -------------------------------------------------------------------------------- /templates/var/lib/ansible/templates/etc/haproxy/router.cfg.j2: -------------------------------------------------------------------------------- 1 | mkdir -p /var/lib/os-apply-config/templates/var/lib/ansible/templates/etc/haproxy 2 | cat << 'EOF' > /var/lib/os-apply-config/templates/var/lib/ansible/templates/etc/haproxy/router.cfg.j2 3 | {{=<% %>=}} 4 | frontend atomic-openshift-router-http 5 | bind *:80 6 | default_backend atomic-openshift-router-http 7 | mode tcp 8 | option tcplog 9 | 10 | frontend atomic-openshift-router-https 11 | bind *:443 12 | default_backend atomic-openshift-router-https 13 | mode tcp 14 | option tcplog 15 | 16 | backend atomic-openshift-router-http 17 | balance source 18 | mode tcp 19 | {% for server in servers %} 20 | server {{ hostvars[server].ansible_hostname }} {{ hostvars[server]['ansible_default_ipv4'].address }}:80 check 21 | {% endfor %} 22 | 23 | backend atomic-openshift-router-https 24 | balance source 25 | mode tcp 26 | {% for server in servers %} 27 | server {{ hostvars[server].ansible_hostname }} {{ hostvars[server]['ansible_default_ipv4'].address }}:443 check 28 | {% endfor %} 29 | <%={{ }}=%> 30 | EOF 31 | -------------------------------------------------------------------------------- /tests/filter_plugins/oo_filters.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | # vim: expandtab:tabstop=4:shiftwidth=4 4 | """ 5 | Custom filters for use in openshift-ansible 6 | """ 7 | 8 | import os 9 | import re 10 | import json 11 | from ansible.utils.unicode import to_unicode 12 | 13 | class FilterModule(object): 14 | def filters(self): 15 | """ returns a mapping of filters to methods """ 16 | return { 17 | "oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs, 18 | } 19 | 20 | @staticmethod 21 | def oo_parse_heat_stack_outputs(data): 22 | """ Formats the HEAT stack output into a usable form 23 | 24 | The goal is to transform something like this: 25 | 26 | +---------------+-------------------------------------------------+ 27 | | Property | Value | 28 | +---------------+-------------------------------------------------+ 29 | | capabilities | [] | | 30 | | creation_time | 2015-06-26T12:26:26Z | | 31 | | description | OpenShift cluster | | 32 | | … | … | 33 | | outputs | [ | 34 | | | { | 35 | | | "output_value": "value_A" | 36 | | | "description": "This is the value of Key_A" | 37 | | | "output_key": "Key_A" | 38 | | | }, | 39 | | | { | 40 | | | "output_value": [ | 41 | | | "value_B1", | 42 | | | "value_B2" | 43 | | | ], | 44 | | | "description": "This is the value of Key_B" | 45 | | | "output_key": "Key_B" | 46 | | | }, | 47 | | | ] | 48 | | parameters | { | 49 | | … | … | 50 | +---------------+-------------------------------------------------+ 51 | 52 | into something like this: 53 | 54 | { 55 | "Key_A": "value_A", 56 | "Key_B": [ 57 | "value_B1", 58 | "value_B2" 59 | ] 60 | } 61 | """ 62 | # Extract the “outputs” JSON snippet from the pretty-printed array 63 | in_outputs = False 64 | outputs = '' 65 | 66 | line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|') 67 | for line in data['stdout_lines']: 68 | match = line_regex.match(line) 69 | if match: 70 | if match.group(1) == 'outputs': 71 | in_outputs = True 72 | elif match.group(1) != '': 73 | in_outputs = False 74 | if in_outputs: 75 | outputs += match.group(2) 76 | 77 | outputs = json.loads(outputs) 78 | 79 | # Revamp the “outputs” to put it in the form of a “Key: value” map 80 | revamped_outputs = {} 81 | for output in outputs: 82 | revamped_outputs[output['output_key']] = output['output_value'] 83 | 84 | return revamped_outputs 85 | -------------------------------------------------------------------------------- /tests/main.yml: -------------------------------------------------------------------------------- 1 | - name: Create stack 2 | hosts: localhost 3 | become: no 4 | connection: local 5 | gather_facts: no 6 | roles: 7 | - { role: deploy, 8 | heat_cmd: stack-create, 9 | node_count: 2, 10 | master_count: 1, 11 | stack_params: "{{heat_params | default('')}}"} 12 | 13 | - name: Check status of created stack 14 | hosts: masters[0] 15 | become: yes 16 | roles: 17 | - { role: checkstatus, node_count: 2, master_count: 1} 18 | 19 | - include: scaling.yml 20 | -------------------------------------------------------------------------------- /tests/roles/checkstatus/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Get openshift cluster nodes 2 | shell: 'oc get nodes | grep Ready | wc -l' 3 | register: result 4 | 5 | - set_fact: count={{ node_count + master_count }} 6 | 7 | - fail: 8 | msg: "Openshift cluster node count doesn't match" 9 | when: result.stdout != "{{ count }}" 10 | 11 | - name: Check if router is running 12 | shell: "oc get pods|grep 'router-1.*Running'" 13 | 14 | - name: Run diagnostics 15 | command: "oadm diagnostics" 16 | -------------------------------------------------------------------------------- /tests/roles/deploy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check OpenStack stack presence 2 | command: 'heat stack-show oshift' 3 | register: stack_show_result 4 | changed_when: false 5 | failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr 6 | 7 | - name: Create or Update OpenStack Stack 8 | command: "heat {{heat_cmd}} oshift 9 | -e ../env_origin.yaml 10 | -f ../openshift.yaml 11 | -P external_network=public 12 | -P flavor=m1.shift 13 | -P node_count={{node_count | default(2)}} 14 | -P master_count={{master_count | default(1)}} 15 | -P bastion_image=centos72 16 | -P master_image=centos72 17 | -P node_image=centos72 18 | -P master_server_group_policies=affinity 19 | -P master_docker_volume_size_gb=6 20 | -P node_docker_volume_size_gb=5 21 | -P deploy_router=true 22 | -P deploy_registry=false 23 | {{stack_params | default('')}}" 24 | when: stack_show_result.rc == 1 or heat_cmd == 'stack-update' 25 | 26 | - set_fact: 27 | progress_states: ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] 28 | success_states: ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] 29 | 30 | - name: Wait for OpenStack Stack readiness 31 | shell: 'heat stack-show oshift | awk ''$2 == "stack_status" {print $4}''' 32 | register: stack_show_status_result 33 | until: stack_show_status_result.stdout not in progress_states 34 | retries: 60 35 | delay: 60 36 | 37 | - fail: 38 | msg: "Stack creation failed" 39 | when: stack_show_status_result.stdout not in success_states 40 | 41 | - name: Read OpenStack Stack outputs 42 | command: 'heat stack-show oshift' 43 | register: stack_show_result 44 | 45 | - set_fact: 46 | parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}" 47 | 48 | - name: Add new master instances groups and variables 49 | add_host: 50 | hostname: '{{ item[0] }}' 51 | ansible_ssh_host: '{{ item[0] }}' 52 | ansible_ssh_user: centos 53 | ansible_become: yes 54 | groups: masters 55 | openshift_node_labels: 56 | type: "master" 57 | with_together: 58 | - parsed_outputs.master_ips 59 | 60 | - name: Add new node instances groups and variables 61 | add_host: 62 | hostname: '{{ item[0] }}' 63 | ansible_ssh_host: '{{ item[0] }}' 64 | ansible_ssh_user: centos 65 | ansible_become: yes 66 | groups: masters 67 | openshift_node_labels: 68 | type: "nodes" 69 | with_together: 70 | - parsed_outputs.host_ips 71 | -------------------------------------------------------------------------------- /tests/scaling.yml: -------------------------------------------------------------------------------- 1 | - name: Scale up stack 2 | hosts: localhost 3 | become: no 4 | connection: local 5 | gather_facts: no 6 | roles: 7 | - { role: deploy, 8 | heat_cmd: stack-update, 9 | node_count: 3, 10 | master_count: 1, 11 | stack_params: "{{heat_params | default('')}}"} 12 | 13 | - name: Check status of scaled up stack 14 | hosts: masters[0] 15 | become: yes 16 | roles: 17 | - { role: checkstatus, node_count: 3, master_count: 1} 18 | 19 | - name: Scale down stack 20 | hosts: localhost 21 | become: no 22 | connection: local 23 | gather_facts: no 24 | roles: 25 | - { role: deploy, heat_cmd: stack-update, node_count: 2, master_count: 1} 26 | 27 | - name: Check status of scaled down stack 28 | hosts: masters[0] 29 | become: yes 30 | roles: 31 | - { role: checkstatus, node_count: 2, master_count: 1} 32 | -------------------------------------------------------------------------------- /volume_attachment_docker.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a volume for Docker storage 5 | 6 | parameters: 7 | volume_uuid: 8 | description: > 9 | ID of the volume to attach to the VM 10 | type: string 11 | 12 | instance_uuid: 13 | description: > 14 | ID of the VM to attach the volume to 15 | type: string 16 | 17 | resources: 18 | # Bind the external storage to the VM 19 | docker_volume_attach: 20 | type: OS::Cinder::VolumeAttachment 21 | properties: 22 | instance_uuid: {get_param: instance_uuid} 23 | volume_id: {get_param: volume_uuid} 24 | -------------------------------------------------------------------------------- /volume_attachment_noop.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a volume for Docker storage 5 | 6 | parameters: 7 | volume_uuid: 8 | description: > 9 | ID of the volume to attach to the VM 10 | type: string 11 | 12 | instance_uuid: 13 | description: > 14 | ID of the VM to attach the volume to 15 | type: string 16 | -------------------------------------------------------------------------------- /volume_docker.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a volume for Docker storage 5 | 6 | parameters: 7 | size: 8 | description: > 9 | size of a cinder volume in GB to allocate to docker for container/image storage 10 | type: number 11 | 12 | resources: 13 | # Create an external volume to contain the Docker containers and images 14 | docker_volume: 15 | type: OS::Cinder::Volume 16 | properties: 17 | size: {get_param: size} 18 | 19 | outputs: 20 | volume_id: 21 | description: ID of the created volume 22 | value: {get_resource: docker_volume} 23 | -------------------------------------------------------------------------------- /volume_noop.yaml: -------------------------------------------------------------------------------- 1 | heat_template_version: 2016-10-14 2 | 3 | description: > 4 | A template which provides a volume for Docker storage 5 | 6 | parameters: 7 | size: 8 | description: > 9 | size of a cinder volume in GB to allocate to docker for container/image storage 10 | type: number 11 | 12 | outputs: 13 | volume_id: 14 | description: ID of the created volume 15 | value: "" 16 | --------------------------------------------------------------------------------