├── .gitignore ├── LICENSE ├── README.md ├── activate-internal-lb.yml ├── ansible.cfg ├── azure-cli.yml ├── deploy.yml ├── destroy.yml ├── roles ├── azure_infra │ ├── README.md │ ├── defaults │ │ └── main.yml │ ├── files │ │ └── ansible.cfg │ ├── handlers │ │ └── main.yml │ ├── meta │ │ └── main.yml │ ├── tasks │ │ ├── azure_deploy.yml │ │ ├── bastion.yml │ │ ├── cloudprovider.yml │ │ └── main.yml │ ├── templates │ │ ├── hosts.j2 │ │ └── private-dnsmasq.conf.j2 │ ├── tests │ │ ├── inventory │ │ └── test.yml │ └── vars │ │ └── main.yml ├── ocp_post │ ├── handlers │ │ └── main.yml │ ├── tasks │ │ └── main.yml │ └── templates │ │ └── private-dnsmasq.conf.j2 └── ocp_pre │ ├── README.md │ ├── defaults │ └── main.yml │ ├── files │ └── storageClass.yml │ ├── handlers │ └── main.yml │ ├── meta │ └── main.yml │ ├── tasks │ ├── main.yml │ ├── storage_container.yml │ ├── storage_etcd.yml │ └── subscribe.yml │ ├── templates │ └── azure.j2 │ ├── tests │ ├── inventory │ └── test.yml │ └── vars │ └── main.yml └── vars.yml.example /.gitignore: -------------------------------------------------------------------------------- 1 | **hosts 2 | *.retry 3 | vars.yml 4 | **.log 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | # OpenShift on Azure 4 | This project automates the installation of OpenShift on Azure using ansible. It follows the [OpenShift + Azure Reference Architecture](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_azure/) closely. By default the following is deployed, 3 masters, 3 Infra nodes, 3 app nodes, Logging (EFK), Metrics, Prometheus & Grafana. If deploying OpenShift Container Storage (Formerly CNS), this automation will follow best practices and depending on how many app nodes being deployed will create 1 OCS cluster for all storage is less than 3 app nodes and 2 OCS clusters if greater than or equal to 3 app nodes. SSH access is restricted into the cluster by allowing only the bastion to reach each Node, ssh is then proxied from the ansible control host via the bastion accesing nodes by hostname. `ssh ocp-master-1` To quickly standup an ansible deploy host have a look at [vagrant-rhel](https://github.com/hornjason/vagrant-rhel), as of now it only supports [virtualbox and libvirt providers](https://app.vagrantup.com/jasonhorn/boxes/rhel7). 5 | 6 | 7 | ## Topology 8 | 9 | ![enter image description here](https://access.redhat.com/webassets/avalon/d/Reference_Architectures-2018-Deploying_and_Managing_OpenShift_3.9_on_Azure-en-US/images/582f7bc50a94c64d5fbc330296a2697a/topology.png) 10 | 11 | ## Virtual Machine Sizing 12 | The following table outlines the sizes used to better understand the vCpu and Memory quotas needed to successfully deploy OpenShift on Azure. Verify your current subscription quotas meet the below requirements. 13 | 14 | Instance | Hostname | # |VM Size | vCpu's | Memory 15 | -------- | -------- | - | ------ | ------ | ----- 16 | Master Nodes | ocp-master-# | 3 | Standard_D4s_v3 | 4 | 16 17 | Infra Nodes | ocp-infra-# | 3 | Standard_D4s_v3 | 4 | 16 18 | App Nodes | ocp-app-# | 3 | Standard_D2S_v3 | 2 | 8 19 | Bastion | bastion | 1 | Standard_D1 | 1 | 3.5 20 | Total | | 10 | | 31 | 123.5Gb 21 | 22 | 23 | VM sizes can be configured from defaults by changing the following variables, if the sizes chosen are below minimum OpenShift requirements deployment checks will fail. 24 | 25 | 26 | | Variable | VM Size 27 | | -- | ---- | 28 | | vm_size_master: | Standard_D4s_v3 29 | | vm_size_infra: | Standard_D4s_v3 30 | | vm_size_node: | Standard_D2s_v3 31 | | vm_size_bastion: | Standard_D1 32 | 33 | 34 | >After installing and setting up Azure CLI the following command can be used to show available VM Resources in a location. 35 | ``` 36 | az vm list-usage --location westus --output table 37 | ``` 38 | 39 | ## Pre-Reqs 40 | 41 | Reqs 42 | A few Pre-Reqs need to be met and are documented in the Reference Architecture already. **Ansible 2.5 is required**, the ansible control host running the deployment needs to be registered and subscribed to `rhel-7-server-ansible-2.5-rpms`. Creating a [Service Principal](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_azure/#service_principal) is documented as well as setting up the Azure CLI. Currently the Azure CLI is setup on the ansible control host running the deployment using the playbook `azure_cli.yml` or by following instructions here, [Azure CLI Setup](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli?toc=%2Fazure%2Fazure-resource-manager%2Ftoc.json&view=azure-cli-latest). 43 | 44 | 1. Ansible control host setup: 45 | Register the ansible control host used for this deployment with valid RedHat subscription thats able to pull down ansible 2.5 or manually install ansible 2.5 along with atomic-openshift-utils. To quickly create a VM using Vagrant try out [vagrant-rhel](https://github.com/hornjason/vagrant-rhel). 46 | ``` 47 | sudo subscription-manager register --username < username > --password < password > 48 | sudo subscription-manager attach --pool < pool_id > 49 | sudo subscription-manager repos --disable=* 50 | sudo subscription-manager repos \ 51 | --enable="rhel-7-server-rpms" \ 52 | --enable="rhel-7-server-extras-rpms" \ 53 | --enable="rhel-7-server-ose-3.10-rpms" \ 54 | --enable="rhel-7-fast-datapath-rpms" \ 55 | --enable="rhel-7-server-ansible-2.5-rpms" 56 | 57 | sudo yum -y install ansible openshift-ansible git wget net-tools bind-utils yum-utils bash-completion kexec-tools sos psacct 58 | 59 | As of now a fix for deployging multiple OCS clusters is only available by cloning and using the latest release-3.10 branch from https://github.com/openshift/openshift-ansible.git 60 | ``` 61 | 62 | 2. Clone this repository 63 | 64 | ``` 65 | git clone https://github.com/hornjason/ansible-ocp-azure.git; cd ansible-ocp-azure 66 | ``` 67 | 3. Install Azure CLI, using playbook included or manually following above directions. 68 | ``` 69 | ansible-playbook azure-cli.yml 70 | ``` 71 | 4. Authenticate with Azure, `az login` described here, [Azure Authentication](https://docs.microsoft.com/en-us/cli/azure/authenticate-azure-cli?view=azure-cli-latest). 72 | 5. Create a Service Principal outlined here, [Creating SP](https://docs.microsoft.com/en-us/cli/azure/create-an-azure-service-principal-azure-cli?toc=%2Fazure%2Fazure-resource-manager%2Ftoc.json&view=azure-cli-latest). and [3.9 Reference Architecture](https://access.redhat.com/documentation/en-us/reference_architectures/2018/html-single/deploying_and_managing_openshift_3.9_on_azure/#service_principal) 73 | ``` 74 | az ad sp create-for-rbac --name ServicePrincipalName --password PASSWORD 75 | ``` 76 | 6. Copy vars.yml.example to vars.yml 77 | ``` 78 | cp vars.yml.example vars.yml 79 | ``` 80 | 7. Fill out required variables below. 81 | 8. Due to bug https://github.com/ansible/ansible/issues/40332 if the ansible control host used to deploy from has LANG set to something other than `en` then you must `unset LANG` 82 | 83 | ## Required Variables 84 | Most defaults are specified in `role/azure/defaults/main.yml`, Sensitive information is left out and should be entered in `vars.yml`. Below are required variables that should be filled in before deploying. 85 | 86 | - **location**: - Azure location for deployment ex. `eastus` 87 | - **rg**: - Azure Resource Group ex. `test-rg` 88 | - **admin_user**: - SSH user that will be created on each VM ex. `cloud-user` 89 | - **admin_pubkey**: - Copy paste the Public SSH key that will be added to authorized_keys on each VM ex. 90 | `ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAB` 91 | - **admin_privkey**: - Path to the private ssh key associated with the public key above. ex. `'~/.ssh/id_rsa` 92 | - **sp_name**: - Service Principal name created in step 5. 93 | - **sp_secret**: - Service Principal secret 94 | - **sp_app_id**: - Service Principal APPID 95 | - **rhsm_user**: - If subscribing to RHSM using username / password, fill in username 96 | - **rhsm_pass**: - If subscribing to RHSM using username / password, fill in passowrd for RHSM 97 | - **rhsm_key**: - If subscribing to RHSM using activation key and orgId fill in activation key here. 98 | - **rhsm_org**: - If subscribing to RHSM using activation key and orgId fill in orgId here. 99 | - **rhsm_broker_pool**: - If you have a broker pool id for masters / infra nodes fill it in here. This will be used to for all masters/infra nodes. If you only have one pool id to use make this the same as `rhsm_node_pool`. 100 | - **rhsm_node_pool**: - If you have a application pool id for app nodes fill it in here. This will be used for all application nodes. If you only have one pool id to use make this the same as `rhsm_broker_pool` 101 | - **ocs_infra_cluster_usable_storage**: How much usable storage on the INFRA OCS Cluster, This will create bricks of this size on each Infra Node. 102 | - **ocs_app_cluster_usable_storage**: How much usable storage on the Application OCS Cluster, This will create bricks of this size on each APP Node. 103 | Number of Nodes 104 | - **master_nodes**: Defaults to 3 -> [1,2,3] 105 | - **infra_nodes**: Defaults to 3 -> [1,2,3] 106 | - **app_nodes**: Defaults to 3 -> [1,2,3] add additional nodes here. 107 | 108 | Optional Variables: 109 | 110 | - **vnet_cidr**: - Can customize as needed, ex `"10.0.0.0/16"` 111 | By Default the HTPasswdPasswordIdentityProvider is used but can be customized, this will be templated out to the ansible hosts file. By default htpasswd user is added. 112 | - **openshift_master_htpasswd_users**: - Contains the user: < passwd hash generated from htpasswd -n user > 113 | - **deploy_cns**: true 114 | - **deploy_cns_to_infra**: true - This should always be 'True' if depoy_cns is 'True', no longer create separate CNS nodes 115 | - **deploy_metrics**: true 116 | - **deploy_logging**: true 117 | - **deploy_prometheus**: true 118 | - **metrics_volume_size**: '20Gi' 119 | - **logging_volume_size**: '100Gi' 120 | - **prometheus_volume_size**: '20Gi' 121 | 122 | ## Deployment 123 | After all pre-reqs are met and required variables have been filled out the deployment consists of running the following: 124 | `ansible-playbook deploy.yml -e @vars.yml` 125 | 126 | The ansible control host running the deployment will be setup to use ssh proxy through the bastion in order to reach all nodes. The openshift inventory `hosts` file will be templated into the project root directory and used for the Installation. 127 | 128 | ## Destroy 129 | `ansible-playbook destroy.yml -e@vars.yml` 130 | -------------------------------------------------------------------------------- /activate-internal-lb.yml: -------------------------------------------------------------------------------- 1 | - hosts: masters 2 | tasks: 3 | - name: Configure master client for local API 4 | lineinfile: 5 | path: "{{ item }}" 6 | regexp: " server: https://.+" 7 | line: " server: https://{{ ansible_hostname }}" 8 | state: present 9 | backup: yes 10 | with_items: 11 | - /root/.kube/config 12 | - /home/{{ admin_user }}/.kube/config 13 | - /etc/origin/master/admin.kubeconfig 14 | 15 | - name: Configure master node for local API 16 | lineinfile: 17 | path: /etc/origin/node/system:node:{{ ansible_hostname }}.kubeconfig 18 | regexp: " server: https://[^:]+:{{ openshift_master_api_port }}" 19 | line: " server: https://{{ ansible_hostname }}:{{ openshift_master_api_port }}" 20 | state: present 21 | backup: yes 22 | notify: Restart atomic-openshift-node 23 | handlers: 24 | - name: Restart atomic-openshift-node 25 | service: 26 | name: atomic-openshift-node 27 | state: restarted 28 | 29 | - hosts: nodes:!masters 30 | tasks: 31 | - name: Configure non-master node for API load balancer 32 | lineinfile: 33 | path: /etc/origin/node/system:node:{{ ansible_hostname }}.kubeconfig 34 | regexp: " server: https://[^:]+:{{ openshift_master_api_port }}" 35 | line: " server: https://{{ master_lb_dns }}:{{ openshift_master_api_port }}" 36 | state: present 37 | backup: yes 38 | notify: Restart atomic-openshift-node 39 | handlers: 40 | - name: Restart atomic-openshift-node 41 | service: 42 | name: atomic-openshift-node 43 | state: restarted 44 | 45 | - hosts: localhost 46 | become: yes 47 | tasks: 48 | - name: Configure local client for API load balancer 49 | lineinfile: 50 | path: /home/{{ admin_user }}/.kube/config 51 | regexp: " server: https://[^:]+:{{ hostvars[groups['masters'][0]]['openshift_master_api_port'] }}" 52 | line: " server: https://{{ hostvars[groups['masters'][0]]['master_lb_dns'] }}:{{ hostvars[groups['masters'][0]]['openshift_master_api_port'] }}" 53 | state: present 54 | backup: yes 55 | 56 | # The bastion is not part of the Ansible inventory file. 57 | - name: Add bastion host 58 | add_host: 59 | name: bastion 60 | groups: bastions 61 | ansible_user: "{{ admin_user }}" 62 | ansible_become: True 63 | 64 | - name: Configure bastion client for API load balancer 65 | lineinfile: 66 | path: /home/{{ admin_user }}/.kube/config 67 | regexp: " server: https://[^:]+:{{ hostvars[groups['masters'][0]]['openshift_master_api_port'] }}" 68 | line: " server: https://{{ hostvars[groups['masters'][0]]['master_lb_dns'] }}:{{ hostvars[groups['masters'][0]]['openshift_master_api_port'] }}" 69 | state: present 70 | backup: yes 71 | delegate_to: bastion 72 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = hosts 3 | callback_whitelist = profile_tasks, timer 4 | host_key_checking = False 5 | forks = 15 6 | log_path = ansible.log 7 | timeout = 60 8 | 9 | [ssh_connection] 10 | ssh_args = -C -o ControlMaster=auto -o StrictHostKeyChecking=no -o ControlPersist=120s -o GSSAPIAuthentication=no -o PreferredAuthentications=publickey 11 | pipelining = True 12 | -------------------------------------------------------------------------------- /azure-cli.yml: -------------------------------------------------------------------------------- 1 | - name: Azure CLI | Pre-Reqs 2 | hosts: localhost 3 | become: true 4 | vars: 5 | rhsm_repos: 6 | - rhel-7-server-rpms 7 | - rhel-7-server-extras-rpms 8 | - rhel-7-server-ose-3.10-rpms 9 | - rhel-7-fast-datapath-rpms 10 | - rhel-7-server-ansible-2.5-rpms 11 | rhsm_repos_disable: 12 | - rhel-7-server-htb-rpms 13 | - rhel-7-server-ose-3.9-rpms 14 | message: | 15 | This host is now setup, next steps are 16 | Login 17 | - az login 18 | Create Service Principal if one isn't available 19 | - az ad sp create-for-rbac --name ServicePrincipalName --password PASSWORD 20 | Copy vars.yml.example to vars.yml 21 | - cp vars.yml.example vars.yml 22 | Edit vars.yml 23 | - vi vars.yml 24 | tasks: 25 | - name: Setup | Disable RHSM repositories 26 | rhsm_repository: 27 | name: "{{ rhsm_repos_disable }}" 28 | state: disabled 29 | 30 | - name: Setup | Enable RHSM repositories 31 | rhsm_repository: 32 | name: "{{ rhsm_repos }}" 33 | state: enabled 34 | 35 | - name: Setup | Import Microsft RPM key 36 | rpm_key: 37 | state: present 38 | key: https://packages.microsoft.com/keys/microsoft.asc 39 | 40 | - name: Setup | Check for exisiting Microsoft yum repo 41 | stat: 42 | path: /etc/yum.repos.d/azure-cli.repo 43 | register: repo_check 44 | 45 | - name: Setup | Install CLI yum repo 46 | shell: sh -c 'echo -e "[azure-cli]\nname=Azure CLI\nbaseurl=https://packages.microsoft.com/yumrepos/azure-cli\nenabled=1\ngpgcheck=1\ngpgkey=https://packages.microsoft.com/keys/microsoft.asc" > /etc/yum.repos.d/azure-cli.repo' 47 | when: not repo_check.stat.exists 48 | 49 | - name: Setup | Install required packages 50 | yum: 51 | name: "{{ item }}" 52 | state: latest 53 | loop: 54 | - https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 55 | - python2-pip 56 | - azure-cli 57 | - atomic-openshift-clients 58 | - openshift-ansible 59 | 60 | - name: Setup | Remove epel 61 | yum: 62 | name: epel-release 63 | state: absent 64 | 65 | - name: Setup | Install Azure ansible pip modules 66 | pip: 67 | name: ansible[azure] 68 | 69 | - name: Setup | Prompt 70 | debug: 71 | msg: "{{ message.split('\n') }}" 72 | 73 | -------------------------------------------------------------------------------- /deploy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | roles: 4 | - { role: azure_infra, tags: infra } 5 | 6 | # The Azure VMs are provisioned asynchronously. It can take some time 7 | # until they are reachable via SSH, even after Azure reports that the 8 | # provisioning state is "Succeeded". 9 | - hosts: nodes 10 | gather_facts: no 11 | tasks: 12 | - name: Wait for nodes to become reachable via SSH 13 | wait_for_connection: 14 | sleep: 30 15 | timeout: 1200 16 | connect_timeout: 10 17 | 18 | - hosts: nodes 19 | roles: 20 | - { role: ocp_pre, tags: ocp-pre } 21 | 22 | - name: call openshift prerequisites play 23 | import_playbook: /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml 24 | tags: 25 | - ocp-pre 26 | - ocp-deploy 27 | 28 | - name: call openshift deploy_cluster play 29 | import_playbook: /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml 30 | tags: ocp-deploy 31 | 32 | - name: install Grafana 33 | import_playbook: /usr/share/ansible/openshift-ansible/playbooks/openshift-grafana/config.yml 34 | tags: 35 | - logging-metrics 36 | - ocp-deploy 37 | 38 | - hosts: nodes 39 | roles: 40 | - { role: ocp_post, tags: ocp-post } 41 | 42 | - hosts: localhost 43 | tasks: 44 | - name: Copy kube config to installation host 45 | fetch: 46 | src: /home/{{ admin_user }}/.kube/config 47 | dest: ~/.kube/ 48 | flat: yes 49 | delegate_to: "{{ groups.masters[0] }}" 50 | tags: ocp-post 51 | 52 | # The bastion is not part of the Ansible inventory file. 53 | - name: Add bastion host 54 | add_host: 55 | name: bastion 56 | groups: bastions 57 | ansible_user: "{{ admin_user }}" 58 | ansible_become: True 59 | tags: ocp-post 60 | 61 | - name: Copy kube config to bastion host 62 | copy: 63 | src: ~/.kube/config 64 | dest: /home/{{ admin_user }}/.kube/ 65 | delegate_to: bastion 66 | tags: ocp-post 67 | 68 | - name: Configure internal master LB 69 | import_playbook: activate-internal-lb.yml 70 | when: master_lb_private_ip is defined 71 | 72 | - hosts: localhost 73 | vars: 74 | admin: "{{ openshift_master_htpasswd_users.keys() | to_nice_yaml }}" 75 | tasks: 76 | # This only works after the activate-internal-lb.yml playbook has 77 | # been run, if an internal Azure load balancer is being used. 78 | - name: add cluster-admin 79 | shell: oc adm policy add-cluster-role-to-user cluster-admin {{ admin }} 80 | tags: ocp-post 81 | -------------------------------------------------------------------------------- /destroy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | vars: 4 | skip: False 5 | tasks: 6 | - name: Destroy Azure | Check for ResourceGroup 7 | shell: az group exists --name "{{ rg }}" 8 | register: rg_exist 9 | tags: always 10 | 11 | - block: 12 | - debug: var=rg_exist 13 | 14 | - name: Destroy Azure | Gather list of VMs 15 | shell: az vm list -g "{{ rg }}" --query "[].name" -o tsv 16 | register: vm_list 17 | 18 | # The bastion is not part of the Ansible inventory, but we 19 | # need to unregister it with RHSM. 20 | - name: Destroy Azure | Add bastion host 21 | add_host: 22 | name: bastion 23 | groups: bastions 24 | ansible_user: "{{ admin_user }}" 25 | ansible_become: True 26 | 27 | - name: Destroy Azure | RHSM unregister 28 | redhat_subscription: 29 | state: absent 30 | delegate_to: "{{ item }}" 31 | loop: "{{ vm_list.stdout_lines }}" 32 | 33 | # We use the CLI option "--no-wait" to delete all VMs in 34 | # parallel, and then wait in an Ansible loop until all of them 35 | # are gone. 36 | - name: Destroy Azure | Schedule VM deletion 37 | command: az vm delete --resource-group {{ rg }} --name {{ item }} --no-wait --yes 38 | loop: "{{ vm_list.stdout_lines }}" 39 | 40 | # Wait up to 20 minutes until all VMs have been deleted. 41 | - name: Destroy Azure | Wait for VM deletion 42 | command: az vm list -g "{{ rg }}" --query "[].name" -o tsv 43 | register: vm_list2 44 | retries: 20 45 | delay: 60 46 | until: vm_list2.stdout_lines | length == 0 47 | 48 | - name: Destroy Azure | Gather list of StorageAccounts 49 | shell: az storage account list -g "{{ rg }}" --query "[].name" -o tsv 50 | register: sa_list 51 | 52 | - name: Destroy Azure | Delete Storage Accounts 53 | azure_rm_storageaccount: 54 | resource_group: "{{ rg }}" 55 | name: "{{ item }}" 56 | state: absent 57 | loop: "{{ sa_list.stdout_lines }}" 58 | 59 | - name: Destroy Azure | Gather list of disks 60 | shell: az disk list -g "{{ rg }}" --query "[].name" -o tsv 61 | register: disk_list 62 | 63 | # We use the CLI option "--no-wait" to delete all disks in 64 | # parallel. 65 | - name: Destroy Azure | Schedule managed disk deletion 66 | command: az disk delete --resource-group {{ rg }} --name {{ item }} --no-wait --yes 67 | loop: "{{ disk_list.stdout_lines }}" 68 | 69 | # Wait up to 20 minutes until all disks have been deleted. 70 | - name: Destroy Azure | Wait for disk deletion 71 | command: az disk list -g "{{ rg }}" --query "[].name" -o tsv 72 | register: disk_list2 73 | retries: 20 74 | delay: 60 75 | until: disk_list2.stdout_lines | length == 0 76 | 77 | - name: Destroy Azure | Gather list of AvailabilitySets 78 | shell: az vm availability-set list -g "{{ rg }}" --query "[].name" -o tsv 79 | register: as_list 80 | 81 | - name: Destroy Azure | Delete Availability Sets 82 | azure_rm_availabilityset: 83 | name: "{{ item }}" 84 | location: "{{ location }}" 85 | resource_group: "{{ rg }}" 86 | state: absent 87 | loop: "{{ as_list.stdout_lines }}" 88 | 89 | - name: Destroy Azure | Gather list of LoadBalancers 90 | shell: az network lb list -g "{{ rg }}" --query "[].name" -o tsv 91 | register: lb_list 92 | 93 | - name: Destroy Azure | Delete LoadBalancers 94 | azure_rm_loadbalancer: 95 | name: "{{ item }}" 96 | location: "{{ location }}" 97 | resource_group: "{{ rg }}" 98 | state: absent 99 | loop: "{{ lb_list.stdout_lines }}" 100 | 101 | - name: Destroy Azure | Gather list of NICs 102 | shell: az network nic list -g "{{ rg }}" --query "[].name" -o tsv 103 | register: nic_list 104 | 105 | - name: Destroy Azure | Delete NICs 106 | azure_rm_networkinterface: 107 | resource_group: "{{ rg }}" 108 | name: "{{ item }}" 109 | state: absent 110 | loop: "{{ nic_list.stdout_lines }}" 111 | 112 | - name: Destroy Azure | Gather list of PublicIPs 113 | shell: az network public-ip list -g "{{ rg }}" --query "[].name" -o tsv 114 | register: pip_list 115 | 116 | - name: Destroy Azure | Delete PublicIPs 117 | azure_rm_publicipaddress: 118 | resource_group: "{{ rg }}" 119 | name: "{{ item }}" 120 | state: absent 121 | loop: "{{ pip_list.stdout_lines }}" 122 | 123 | - name: Destroy Azure | Gather list of Network Security Groups 124 | shell: az network nsg list -g "{{ rg }}" --query "[].name" -o tsv 125 | register: nsg_list 126 | 127 | - name: Destroy Azure | Delete Network Security Groups 128 | azure_rm_securitygroup: 129 | resource_group: "{{ rg }}" 130 | name: "{{ item }}" 131 | state: absent 132 | loop: "{{ nsg_list.stdout_lines }}" 133 | 134 | - name: Destroy Azure | Gather list of vNets 135 | shell: az network vnet list -g "{{ rg }}" --query '[].name' -o tsv 136 | register: vnet_list 137 | tags: vnet 138 | when: not vnet_preserve 139 | 140 | - name: Destroy Azure | Delete vNet 141 | azure_rm_virtualnetwork: 142 | resource_group: "{{ rg }}" 143 | name: "{{ item }}" 144 | state: absent 145 | tags: vnet 146 | loop: "{{ vnet_list.stdout_lines }}" 147 | when: not vnet_preserve 148 | when: rg_exist.stdout == 'true' 149 | -------------------------------------------------------------------------------- /roles/azure_infra/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/azure_infra/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for azure_infra 3 | state: present 4 | location: eastus 5 | rg: "test" 6 | 7 | 8 | admin_user: cloud-user 9 | # This will be added to each instance's authorized_keys file 10 | admin_pubkey: '' 11 | # Path for the private key which will be added to localhost ssh_config for proxying through the bastion 12 | admin_privkey: '~/.ssh/id_rsa' 13 | 14 | # Dictionary of usernames (keys) and public SSH keys (values). These 15 | # will be created on the bastion and added to the ocpadmin group. If 16 | # this dictionary is not empty, the ocpadmin group will be configured 17 | # for password-less sudo. 18 | bastion_users: {} 19 | 20 | # Service Principal name 21 | sp_name: '' 22 | # Place this in vars file that can be encrypted in root of play and not checked into SCM 23 | sp_secret: '' 24 | 25 | 26 | ####################### 27 | # RHSM 28 | # Username / Password or 29 | # Activation key / OrgId 30 | ####################### 31 | rhsm_user: '' 32 | rhsm_pass: '' 33 | rhsm_key: "" 34 | rhsm_org: "" 35 | # Can specify separate pools so only Application nodes use paid subs 36 | # or keep the same if only using one pool 37 | rhsm_broker_pool: "" 38 | rhsm_node_pool: "" 39 | rhsm_repos: 40 | - rhel-7-server-rpms 41 | - rhel-7-server-extras-rpms 42 | - rhel-7-server-ose-3.10-rpms 43 | - rhel-7-fast-datapath-rpms 44 | - rhel-7-server-ansible-2.4-rpms 45 | bastion_pkgs: 46 | - ansible 47 | - atomic-openshift-utils 48 | - atomic-openshift-clients 49 | - git 50 | - tmux 51 | - screen 52 | 53 | ####################### 54 | # Custom Named Certs 55 | ####################### 56 | # Router 57 | #router_cert: '{"cafile": "/path/to/ca_cert", "certfile": "/path/to/fullchain.cer", "keyfile": "/vagrant/keys/domain.key"}' 58 | # Master 59 | #master_cert: '[{"cafile": "/path/to/ca_cert", "certfile": "/path/to/fullchain.cer", "keyfile": "/path/to/key/domain.key", "names": ["openshift.console.domain.name"]}]' 60 | 61 | ####################### 62 | # Network 63 | ####################### 64 | vnet_name: "{{ rg }}vnet" 65 | vnet_cidr: "192.168.0.0/24" 66 | master_subnet_cidr: "192.168.0.0/27" 67 | infra_subnet_cidr: "192.168.0.32/27" 68 | cns_subnet_cidr: "192.168.0.64/27" 69 | app_subnet_cidr: "192.168.0.128/25" 70 | 71 | api_port: 443 72 | 73 | vnet_preserve: false 74 | 75 | ####################### 76 | # LB 77 | ####################### 78 | # Name of Master LB 79 | master_lb_public_ip: "masterExternalLB" 80 | 81 | # Name of Router LB 82 | router_lb_public_ip: "routerExternalLB" 83 | 84 | ####################### 85 | # nodes 86 | ####################### 87 | master_nodes: [1,2,3] 88 | infra_nodes: [1,2,3] 89 | app_nodes: [1,2,3] 90 | 91 | # registry Storage Account Name 92 | registry_storage_account: "{{ rg | regex_replace('-') }}" 93 | 94 | ####################### 95 | # OCP Identity 96 | # Use 'htpasswd -n ' to generate password hash. (htpasswd from httpd-tools RPM) 97 | ####################### 98 | # Example with admin:redhat 99 | openshift_master_htpasswd_users: {'admin': '$apr1$zAhyA9Ko$rBxBOwAwwtRuuaw8OtCwH0'} 100 | openshift_master_identity_providers: [{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider' }] 101 | 102 | # 103 | #ocp_admin_passwd: 'redhat' 104 | 105 | ####################### 106 | # deploy CNS 107 | ####################### 108 | deploy_cns: true 109 | deploy_cns_on_infra: true 110 | deploy_metrics: true 111 | deploy_logging: true 112 | deploy_prometheus: true 113 | 114 | # volume sizing defaults 115 | metrics_volume_size: '20Gi' 116 | logging_volume_size: '100Gi' 117 | prometheus_volume_size: '20Gi' 118 | 119 | 120 | ####################### 121 | ####################### 122 | # Don't change 123 | ####################### 124 | ####################### 125 | bastion: "{{ rg }}b.{{ location }}.cloudapp.azure.com" 126 | -------------------------------------------------------------------------------- /roles/azure_infra/files/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | inventory = hosts 3 | callback_whitelist = profile_tasks, timer 4 | host_key_checking = False 5 | forks = 15 6 | log_path = ansible.log 7 | timeoute = 60 8 | 9 | [ssh_connection] 10 | ssh_args = -C -o ControlMaster=auto -o StrictHostKeyChecking=no -o ControlPersist=120s -o GSSAPIAuthentication=no -o PreferredAuthentications=publickey 11 | pipelining = True 12 | -------------------------------------------------------------------------------- /roles/azure_infra/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for azure_infra 3 | 4 | - name: Restart dnsmasq 5 | systemd: 6 | name: dnsmasq 7 | state: restarted 8 | delegate_to: "{{ groups['bastions'][0] }}" 9 | -------------------------------------------------------------------------------- /roles/azure_infra/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: your name 3 | description: your description 4 | company: your company (optional) 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: license (GPLv2, CC-BY, etc) 18 | 19 | min_ansible_version: 1.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. -------------------------------------------------------------------------------- /roles/azure_infra/tasks/azure_deploy.yml: -------------------------------------------------------------------------------- 1 | - name: Azure | Manage Resource Group 2 | azure_rm_resourcegroup: 3 | state: "{{ state }}" 4 | name: "{{ rg }}" 5 | location: "{{ location }}" 6 | 7 | - name: Azure | Manage vNet 8 | azure_rm_virtualnetwork: 9 | name: "{{ vnet_name }}" 10 | resource_group: "{{ rg }}" 11 | address_prefixes_cidr: 12 | - "{{ vnet_cidr }}" 13 | 14 | - name: Azure | Manage Master subnet 15 | azure_rm_subnet: 16 | name: master_subnet 17 | state: "{{ state }}" 18 | virtual_network_name: "{{ vnet_name }}" 19 | resource_group: "{{ rg }}" 20 | address_prefix_cidr: "{{ master_subnet_cidr }}" 21 | 22 | - name: Azure | Manage Infra subnet 23 | azure_rm_subnet: 24 | name: infra_subnet 25 | state: "{{ state }}" 26 | virtual_network_name: "{{ vnet_name }}" 27 | resource_group: "{{ rg }}" 28 | address_prefix_cidr: "{{ infra_subnet_cidr }}" 29 | 30 | # only create if deploying CNS on separate nodes 31 | - name: Azure | Manage CNS subnet 32 | azure_rm_subnet: 33 | name: cns_subnet 34 | state: "{{ state }}" 35 | virtual_network_name: "{{ vnet_name }}" 36 | resource_group: "{{ rg }}" 37 | address_prefix_cidr: "{{ cns_subnet_cidr }}" 38 | when: 39 | - deploy_cns | default(true) | bool 40 | - not deploy_cns_on_infra | default(false) | bool 41 | 42 | - name: Azure | Manage App subnet 43 | azure_rm_subnet: 44 | name: app_subnet 45 | state: "{{ state }}" 46 | virtual_network_name: "{{ vnet_name }}" 47 | resource_group: "{{ rg }}" 48 | address_prefix_cidr: "{{ app_subnet_cidr }}" 49 | 50 | - name: Azure | Manage Bastion Network Security Group 51 | azure_rm_securitygroup: 52 | resource_group: "{{ rg }}" 53 | name: bastion-nsg 54 | purge_rules: yes 55 | rules: 56 | - name: bastion-ssh 57 | description: "SSH access from Internet" 58 | protocol: Tcp 59 | destination_port_range: 22 60 | access: Allow 61 | priority: 500 62 | direction: Inbound 63 | 64 | - name: Azure | Manage Master Network Security Group 65 | azure_rm_securitygroup: 66 | resource_group: "{{ rg }}" 67 | name: master-nsg 68 | purge_rules: yes 69 | rules: 70 | - name: master-ssh 71 | description: "SSH from bastion" 72 | protocol: Tcp 73 | destination_port_range: 22 74 | source_address_prefix: VirtualNetwork 75 | access: Allow 76 | priority: 500 77 | direction: Inbound 78 | - name: master-etcd 79 | description: "ETCD service ports" 80 | protocol: Tcp 81 | destination_port_range: 2379-2380 82 | source_address_prefix: VirtualNetwork 83 | access: Allow 84 | priority: 525 85 | direction: Inbound 86 | - name: master-api 87 | description: "API port" 88 | protocol: Tcp 89 | destination_port_range: "{{ api_port }}" 90 | access: Allow 91 | priority: 550 92 | direction: Inbound 93 | - name: master-api-lb 94 | description: "API LB port" 95 | protocol: Tcp 96 | destination_port_range: "{{ api_port }}" 97 | access: Allow 98 | priority: 575 99 | direction: Inbound 100 | - name: master-ocp-tcp-dns 101 | description: "TCP DNS " 102 | protocol: Tcp 103 | destination_port_range: 8053 104 | source_address_prefix: VirtualNetwork 105 | access: Allow 106 | priority: 600 107 | direction: Inbound 108 | - name: master-ocp-tcp-fluentd 109 | description: "TCP fluentd" 110 | protocol: Tcp 111 | destination_port_range: 24224 112 | source_address_prefix: VirtualNetwork 113 | access: Allow 114 | priority: 625 115 | direction: Inbound 116 | - name: master-node-kubelet 117 | description: "Kubelet" 118 | protocol: Tcp 119 | destination_port_range: 10250 120 | source_address_prefix: VirtualNetwork 121 | access: Allow 122 | priority: 650 123 | direction: Inbound 124 | - name: master-node-sdn 125 | description: "OpenShift SDN" 126 | protocol: Udp 127 | destination_port_range: 4789 128 | source_address_prefix: VirtualNetwork 129 | access: Allow 130 | priority: 675 131 | direction: Inbound 132 | 133 | - name: Azure | Manage Infra Network Security Group 134 | azure_rm_securitygroup: 135 | resource_group: "{{ rg }}" 136 | name: infra-node-nsg 137 | purge_rules: yes 138 | rules: 139 | - name: infra-ssh 140 | description: "SSH from bastion" 141 | protocol: Tcp 142 | destination_port_range: 22 143 | source_address_prefix: VirtualNetwork 144 | access: Allow 145 | priority: 500 146 | direction: Inbound 147 | - name: infra-router-http-ports 148 | description: "OpenShift Router HTTP" 149 | protocol: Tcp 150 | destination_port_range: 80 151 | source_address_prefix: AzureLoadBalancer 152 | access: Allow 153 | priority: 525 154 | direction: Inbound 155 | - name: infra-router-ports-https 156 | description: "OpenShift Router HTTPS" 157 | protocol: Tcp 158 | destination_port_range: 443 159 | source_address_prefix: AzureLoadBalancer 160 | access: Allow 161 | priority: 550 162 | direction: Inbound 163 | - name: infra-es-ports-rest 164 | description: "ElasticSearch Rest Port" 165 | protocol: Tcp 166 | destination_port_range: 9200 167 | source_address_prefix: VirtualNetwork 168 | access: Allow 169 | priority: 575 170 | direction: Inbound 171 | - name: infra-es-port-node 172 | description: "ElasticSearch Node Port" 173 | protocol: Tcp 174 | destination_port_range: 9300 175 | source_address_prefix: VirtualNetwork 176 | access: Allow 177 | priority: 600 178 | direction: Inbound 179 | - name: infra-node-kubelet 180 | description: "Kubelet" 181 | protocol: Tcp 182 | destination_port_range: 10250 183 | source_address_prefix: VirtualNetwork 184 | access: Allow 185 | priority: 625 186 | direction: Inbound 187 | - name: infra-node-sdn 188 | description: "OpenShift SDN" 189 | protocol: Udp 190 | destination_port_range: 4789 191 | source_address_prefix: VirtualNetwork 192 | access: Allow 193 | priority: 650 194 | direction: Inbound 195 | - name: infra-router-port-http-vnet 196 | description: "OpenShift Router HTTP from vNet" 197 | protocol: Tcp 198 | destination_port_range: 80 199 | access: Allow 200 | priority: 675 201 | direction: Inbound 202 | - name: infra-router-port-https-vnet 203 | description: "OpenShift Router HTTPS from vNet" 204 | protocol: Tcp 205 | destination_port_range: 443 206 | access: Allow 207 | priority: 700 208 | direction: Inbound 209 | 210 | - name: Azure | Manage Node Network Security Group 211 | azure_rm_securitygroup: 212 | resource_group: "{{ rg }}" 213 | name: node-nsg 214 | purge_rules: yes 215 | rules: 216 | - name: node-ssh 217 | description: "SSH from bastion" 218 | protocol: Tcp 219 | destination_port_range: 22 220 | source_address_prefix: VirtualNetwork 221 | access: Allow 222 | priority: 500 223 | direction: Inbound 224 | - name: node-kubelet 225 | description: "Kubelet" 226 | protocol: Tcp 227 | destination_port_range: 10250 228 | source_address_prefix: VirtualNetwork 229 | access: Allow 230 | priority: 525 231 | direction: Inbound 232 | - name: node-sdn 233 | description: "ElasticSearch and OCP Apps" 234 | protocol: Tcp 235 | destination_port_range: 4789 236 | source_address_prefix: VirtualNetwork 237 | access: Allow 238 | priority: 550 239 | direction: Inbound 240 | 241 | # Only create this NSG if deploying CNS on separate Nodes 242 | - name: Azure | Manage CNS Network Security Group 243 | azure_rm_securitygroup: 244 | resource_group: "{{ rg }}" 245 | name: cns-nsg 246 | purge_rules: yes 247 | rules: 248 | - name: node-ssh 249 | description: "SSH from bastion" 250 | protocol: Tcp 251 | destination_port_range: 22 252 | source_address_prefix: VirtualNetwork 253 | access: Allow 254 | priority: 500 255 | direction: Inbound 256 | - name: node-kubelet 257 | description: "Kubelet" 258 | protocol: Tcp 259 | destination_port_range: 10250 260 | source_address_prefix: VirtualNetwork 261 | access: Allow 262 | priority: 525 263 | direction: Inbound 264 | - name: node-sdn 265 | description: "ElasticSearch and OCP Apps" 266 | protocol: Tcp 267 | destination_port_range: 4789 268 | source_address_prefix: VirtualNetwork 269 | access: Allow 270 | priority: 550 271 | direction: Inbound 272 | - name: gluster-ssh 273 | description: "Gluster SSH" 274 | protocol: Tcp 275 | destination_port_range: 2222 276 | source_address_prefix: VirtualNetwork 277 | access: Allow 278 | priority: 575 279 | direction: Inbound 280 | - name: gluster-daemon 281 | description: "Gluster Daemon" 282 | protocol: Tcp 283 | destination_port_range: 24008 284 | source_address_prefix: VirtualNetwork 285 | access: Allow 286 | priority: 600 287 | direction: Inbound 288 | - name: gluster-mgmt 289 | description: "Gluster Management" 290 | protocol: Tcp 291 | destination_port_range: 24009 292 | source_address_prefix: VirtualNetwork 293 | access: Allow 294 | priority: 625 295 | direction: Inbound 296 | - name: gluster-client 297 | description: "Gluster Clients" 298 | protocol: Tcp 299 | destination_port_range: 49152-49664 300 | source_address_prefix: VirtualNetwork 301 | access: Allow 302 | priority: 650 303 | direction: Inbound 304 | - name: portmap-tcp 305 | description: "Portmap TCP" 306 | protocol: Tcp 307 | destination_port_range: 111 308 | source_address_prefix: VirtualNetwork 309 | access: Allow 310 | priority: 675 311 | direction: Inbound 312 | - name: portmap-udp 313 | description: "Portmap UDP" 314 | protocol: Udp 315 | destination_port_range: 111 316 | source_address_prefix: VirtualNetwork 317 | access: Allow 318 | priority: 700 319 | direction: Inbound 320 | - name: gluster-iscsi 321 | description: "Gluster ISCSI" 322 | protocol: Tcp 323 | destination_port_range: 3260 324 | source_address_prefix: VirtualNetwork 325 | access: Allow 326 | priority: 725 327 | direction: Inbound 328 | - name: gluster-block 329 | description: "Gluster Blockd" 330 | protocol: Tcp 331 | destination_port_range: 24010 332 | source_address_prefix: VirtualNetwork 333 | access: Allow 334 | priority: 750 335 | direction: Inbound 336 | when: 337 | - deploy_cns | default(true) | bool 338 | - not deploy_cns_on_infra | default(false) | bool 339 | 340 | # Update Infra NSG if deploy_cns_on_infra = true 341 | - name: Azure | Update Infra NSG with CNS rules 342 | azure_rm_securitygroup: 343 | resource_group: "{{ rg }}" 344 | name: infra-node-nsg 345 | rules: 346 | - name: gluster-ssh 347 | description: "Gluster SSH" 348 | protocol: Tcp 349 | destination_port_range: 2222 350 | source_address_prefix: VirtualNetwork 351 | access: Allow 352 | priority: 725 353 | direction: Inbound 354 | - name: gluster-daemon 355 | description: "Gluster Daemon" 356 | protocol: Tcp 357 | destination_port_range: 24008 358 | source_address_prefix: VirtualNetwork 359 | access: Allow 360 | priority: 750 361 | direction: Inbound 362 | - name: gluster-mgmt 363 | description: "Gluster Management" 364 | protocol: Tcp 365 | destination_port_range: 24009 366 | source_address_prefix: VirtualNetwork 367 | access: Allow 368 | priority: 775 369 | direction: Inbound 370 | - name: gluster-client 371 | description: "Gluster Clients" 372 | protocol: Tcp 373 | destination_port_range: 49152-49664 374 | source_address_prefix: VirtualNetwork 375 | access: Allow 376 | priority: 800 377 | direction: Inbound 378 | - name: portmap-tcp 379 | description: "Portmap TCP" 380 | protocol: Tcp 381 | destination_port_range: 111 382 | source_address_prefix: VirtualNetwork 383 | access: Allow 384 | priority: 825 385 | direction: Inbound 386 | - name: portmap-udp 387 | description: "Portmap UDP" 388 | protocol: Udp 389 | destination_port_range: 111 390 | source_address_prefix: VirtualNetwork 391 | access: Allow 392 | priority: 850 393 | direction: Inbound 394 | - name: gluster-iscsi 395 | description: "Gluster ISCSI" 396 | protocol: Tcp 397 | destination_port_range: 3260 398 | source_address_prefix: VirtualNetwork 399 | access: Allow 400 | priority: 875 401 | direction: Inbound 402 | - name: gluster-block 403 | description: "Gluster Blockd" 404 | protocol: Tcp 405 | destination_port_range: 24010 406 | source_address_prefix: VirtualNetwork 407 | access: Allow 408 | priority: 900 409 | direction: Inbound 410 | when: 411 | - deploy_cns | default(true) | bool 412 | - deploy_cns_on_infra | default(false) | bool 413 | 414 | 415 | # Public IPS 416 | - name: Azure | Manage Master Public IP 417 | azure_rm_publicipaddress: 418 | resource_group: "{{ rg }}" 419 | state: "{{ state }}" 420 | name: masterPublicIP 421 | domain_name_label: "{{ rg }}" 422 | allocation_method: Static 423 | register: master_lb_ip 424 | when: master_lb_private_ip is not defined 425 | 426 | # The two set_fact tasks have to be inside the blocks, they cannot be 427 | # merged. The router_lb_ip register variable is set (and overwritten) 428 | # even when the "when" condition is false. 429 | - block: 430 | - name: Azure | Manage Router Public IP without DNS label 431 | azure_rm_publicipaddress: 432 | resource_group: "{{ rg }}" 433 | state: "{{ state }}" 434 | name: routerPublicIP 435 | allocation_method: Static 436 | register: router_lb_ip1 437 | 438 | - set_fact: 439 | router_lb_ip: "{{ router_lb_ip1.state.ip_address }}" 440 | when: router_lb_dns_label is not defined 441 | 442 | - block: 443 | - name: Azure | Manage Router Public IP with DNS label 444 | azure_rm_publicipaddress: 445 | resource_group: "{{ rg }}" 446 | state: "{{ state }}" 447 | name: routerPublicIP 448 | allocation_method: Static 449 | domain_name_label: "{{ router_lb_dns_label }}" 450 | register: router_lb_ip2 451 | 452 | - set_fact: 453 | router_lb_ip: "{{ router_lb_ip2.state.ip_address }}" 454 | when: router_lb_dns_label is defined 455 | 456 | # Makes this idompotent, will fail normally since 457 | # domain_name_label is attached to bastion-VMNic 458 | - name: Azure | Check for existing BastionFQDN 459 | shell: az network public-ip show -n bastionExternalIP -g {{ rg }} --query dnsSettings.fqdn 460 | register: bastion_fqdn 461 | ignore_errors: true 462 | 463 | - name: Azure | Manage Bastion Public IP 464 | azure_rm_publicipaddress: 465 | resource_group: "{{ rg }}" 466 | name: bastionExternalIP 467 | domain_name_label: "{{ rg }}b" 468 | allocation_method: Static 469 | register: bastion_public_ip 470 | when: bastion_fqdn.stdout == '' 471 | 472 | - name: Azure | Manage Bastion Public IP 473 | azure_rm_publicipaddress: 474 | resource_group: "{{ rg }}" 475 | state: "{{ state }}" 476 | name: bastionExternalIP 477 | domain_name_label: "{{ rg }}b" 478 | allocation_method: Static 479 | register: bastion_public_ip 480 | 481 | # LBs 482 | - name: Azure | Manage public Master LB 483 | azure_rm_loadbalancer: 484 | name: ocpMasterLB 485 | location: "{{ location }}" 486 | resource_group: "{{ rg }}" 487 | frontend_ip_configurations: 488 | - name: masterApiFrontend 489 | public_ip_address: masterPublicIP 490 | backend_address_pools: 491 | - name: masterAPIBackend 492 | probes: 493 | - name: masterHealthProbe 494 | port: "{{ api_port }}" 495 | protocol: Tcp 496 | load_balancing_rules: 497 | - name: ocpApiHealth 498 | frontend_ip_configuration: masterApiFrontend 499 | backend_address_pool: masterAPIBackend 500 | probe: masterHealthProbe 501 | protocol: Tcp 502 | frontend_port: "{{ api_port }}" 503 | backend_port: "{{ api_port }}" 504 | load_distribution: SourceIPProtocol 505 | when: master_lb_private_ip is not defined 506 | 507 | # As of Ansible 2.5, the azure_rm_loadbalancer has no support for 508 | # creating an internal Azure load balancer. Use the Azure CLI instead. 509 | - name: Azure | Manage private Master LB 510 | block: 511 | - name: Azure | Create private Master LB 512 | command: > 513 | az network lb create 514 | --resource-group {{ rg }} 515 | --name ocpMasterLB 516 | --location {{ location }} 517 | --frontend-ip-name masterApiFrontend 518 | --private-ip-address {{ master_lb_private_ip }} 519 | --vnet-name {{ vnet_name }} 520 | --subnet master_subnet 521 | --public-ip-address '' 522 | --backend-pool-name masterAPIBackend 523 | 524 | - name: Azure | Create private Master LB probe 525 | command: > 526 | az network lb probe create 527 | --resource-group {{ rg }} 528 | --lb-name ocpMasterLB 529 | --name masterHealthProbe 530 | --protocol Tcp 531 | --port {{ api_port }} 532 | 533 | - name: Azure | Create private Master LB rule 534 | command: > 535 | az network lb rule create 536 | --resource-group {{ rg }} 537 | --lb-name ocpMasterLB 538 | --name ocpApiHealth 539 | --protocol Tcp 540 | --frontend-port {{ api_port }} 541 | --backend-port {{ api_port }} 542 | --frontend-ip-name masterApiFrontend 543 | --backend-pool-name masterAPIBackend 544 | --probe-name masterHealthProbe 545 | --load-distribution SourceIPProtocol 546 | when: master_lb_private_ip is defined 547 | 548 | - name: Azure | Manage Router LB 549 | azure_rm_loadbalancer: 550 | name: ocpRouterLB 551 | location: "{{ location }}" 552 | resource_group: "{{ rg }}" 553 | frontend_ip_configurations: 554 | - name: routerFrontEnd 555 | public_ip_address: routerPublicIP 556 | backend_address_pools: 557 | - name: routerBackEnd 558 | probes: 559 | - name: routerHealthProbe 560 | port: 80 561 | protocol: Tcp 562 | load_balancing_rules: 563 | - name: routerRule 564 | frontend_ip_configuration: routerFrontEnd 565 | backend_address_pool: routerBackEnd 566 | probe: routerHealthProbe 567 | protocol: Tcp 568 | frontend_port: 80 569 | backend_port: 80 570 | load_distribution: SourceIPProtocol 571 | - name: httpsRouterRule 572 | frontend_ip_configuration: routerFrontEnd 573 | backend_address_pool: routerBackEnd 574 | probe: routerHealthProbe 575 | protocol: Tcp 576 | frontend_port: 443 577 | backend_port: 443 578 | load_distribution: SourceIPProtocol 579 | 580 | # availablity Sets 581 | - name: Azure | Manage Availability Sets 582 | azure_rm_availabilityset: 583 | name: "{{ item }}" 584 | state: "{{ state }}" 585 | location: "{{ location }}" 586 | resource_group: "{{ rg }}" 587 | platform_fault_domain_count: 2 588 | sku: Aligned 589 | loop: 590 | - "ocp-master-instances" 591 | - "ocp-infra-instances" 592 | - "ocp-app-instances" 593 | 594 | # availablity Set for CNS Only 595 | - name: Azure | Manage CNS Availability Set 596 | azure_rm_availabilityset: 597 | name: ocp-cns-instances 598 | state: "{{ state }}" 599 | location: "{{ location }}" 600 | resource_group: "{{ rg }}" 601 | platform_fault_domain_count: 2 602 | sku: Aligned 603 | when: 604 | - deploy_cns | default(true) | bool 605 | - not deploy_cns_on_infra | default(false) | bool 606 | 607 | # create VM Nics 608 | # Lacks ability for adding Nics to backend pool of LB 609 | # https://github.com/ansible/ansible/issues/37734 610 | #- name: Azure | Manage VM Nics 611 | # azure_rm_networkinterface: 612 | # name: "{{ item.name }}" 613 | # resource_group: "{{ rg }}" 614 | # virtual_network_name: "{{ vnet_name }}" 615 | # subnet_name: "{{ item.subnet }}" 616 | # security_group_name: "{{ }}" 617 | # public_ip: false 618 | 619 | - name: Azure | Manage Master Nics 620 | shell: " az network nic create --resource-group {{ rg }} \ 621 | --name ocp-master-{{ item }}VMNic \ 622 | --vnet-name {{ vnet_name }} \ 623 | --subnet master_subnet \ 624 | --network-security-group master-nsg \ 625 | --lb-name ocpMasterLB \ 626 | --lb-address-pools masterAPIBackend \ 627 | --internal-dns-name ocp-master-{{ item }} \ 628 | --public-ip-address '' " 629 | loop: "{{ master_nodes }}" 630 | register: nic 631 | failed_when: 632 | - "'InternalDnsName' not in nic.stderr" 633 | - "nic.rc != 0" 634 | changed_when: false 635 | 636 | - name: Azure | Manage Infra Nics 637 | shell: " az network nic create --resource-group {{ rg }} \ 638 | --name ocp-infra-{{ item }}VMNic \ 639 | --vnet-name {{ vnet_name }} \ 640 | --subnet infra_subnet \ 641 | --network-security-group infra-node-nsg \ 642 | --lb-name ocpRouterLB \ 643 | --lb-address-pools RouterBackend \ 644 | --internal-dns-name ocp-infra-{{ item }} \ 645 | --public-ip-address '' " 646 | loop: "{{ infra_nodes }}" 647 | register: nic 648 | failed_when: 649 | - "'InternalDnsName' not in nic.stderr" 650 | - "nic.rc != 0" 651 | changed_when: false 652 | 653 | - name: Azure | Manage Node Nics 654 | shell: " az network nic create --resource-group {{ rg }} \ 655 | --name ocp-app-{{ item }}VMNic \ 656 | --vnet-name {{ vnet_name }} \ 657 | --subnet app_subnet \ 658 | --network-security-group node-nsg \ 659 | --internal-dns-name ocp-app-{{ item }} \ 660 | --public-ip-address '' " 661 | loop: "{{ app_nodes }}" 662 | register: nic 663 | failed_when: 664 | - "'InternalDnsName' not in nic.stderr" 665 | - "nic.rc != 0" 666 | changed_when: false 667 | 668 | - name: Azure | Manage CNS Nics 669 | shell: " az network nic create --resource-group {{ rg }} \ 670 | --name ocp-cns-{{ item }}VMNic \ 671 | --vnet-name {{ vnet_name }} \ 672 | --subnet cns_subnet \ 673 | --network-security-group cns-nsg \ 674 | --internal-dns-name ocp-cns-{{ item }} \ 675 | --public-ip-address '' " 676 | loop: "{{ cns_nodes }}" 677 | register: nic 678 | failed_when: 679 | - "'InternalDnsName' not in nic.stderr" 680 | - "nic.rc != 0" 681 | changed_when: false 682 | when: 683 | - deploy_cns | default(true) | bool 684 | - not deploy_cns_on_infra | default(false) | bool 685 | 686 | - name: Azure | Manage public Bastion Nic 687 | shell: " az network nic create --resource-group {{ rg }} \ 688 | --name bastion-VMNic \ 689 | --vnet-name {{ vnet_name }} \ 690 | --subnet master_subnet \ 691 | --network-security-group bastion-nsg \ 692 | --public-ip-address bastionExternalIP " 693 | register: nic 694 | failed_when: 695 | - "'InternalDnsName' not in nic.stderr" 696 | - "nic.rc != 0" 697 | changed_when: false 698 | when: bastion_private_ip is not defined 699 | 700 | # The "--public-ip-address" parameter can be removed if the host on 701 | # which the deploy.yml playbook runs can connect to the private 702 | # bastion IP address. 703 | - name: Azure | Manage public/private Bastion Nic 704 | shell: " az network nic create --resource-group {{ rg }} \ 705 | --name bastion-VMNic \ 706 | --vnet-name {{ vnet_name }} \ 707 | --subnet master_subnet \ 708 | --network-security-group bastion-nsg \ 709 | --public-ip-address bastionExternalIP \ 710 | --private-ip-address {{ bastion_private_ip }} " 711 | register: nic 712 | failed_when: 713 | - "'InternalDnsName' not in nic.stderr" 714 | - "nic.rc != 0" 715 | changed_when: false 716 | when: bastion_private_ip is defined 717 | 718 | # Create VMs 719 | # 720 | # We use the CLI option "--no-wait" to create all VMs in parallel, and 721 | # then wait in an Ansible loop until all of them exist. 722 | 723 | # Data disks: 724 | # 32 GB Container Storage for emptydir, /var/lib/origin 725 | # 32 GB Docker VG 726 | # 32 GB ETCD 727 | - name: Azure | Schedule Master VM creation 728 | command: > 729 | az vm create 730 | --resource-group {{ rg }} 731 | --availability-set ocp-master-instances 732 | --name ocp-master-{{ item }} 733 | --size {{ vm_size_master }} 734 | --storage-sku Standard_LRS 735 | --data-disk-sizes 32 32 32 736 | --nics ocp-master-{{ item }}VMNic 737 | --image RedHat:RHEL:7-RAW:latest 738 | --admin-username {{ admin_user }} 739 | --authentication-type ssh 740 | --ssh-dest-key-path /home/{{ admin_user }}/.ssh/authorized_keys 741 | --ssh-key-value "{{ admin_pubkey }}" 742 | --no-wait 743 | loop: "{{ master_nodes }}" 744 | 745 | - set_fact: 746 | infra_cns_size: 512 747 | when: 748 | - deploy_cns | default(true) | bool 749 | - deploy_cns_on_infra | default(false) | bool 750 | 751 | # Data disks: 752 | # 64 GB Container Storage for emptydir, /var/lib/origin 753 | # 32 GB Docker VG 754 | # 512 GB Gluster brick (optional) 755 | - name: Azure | Schedule Infra VM creation 756 | command: > 757 | az vm create 758 | --resource-group {{ rg }} 759 | --availability-set ocp-infra-instances 760 | --name ocp-infra-{{ item }} 761 | --size {{ vm_size_infra }} 762 | --storage-sku Standard_LRS 763 | --data-disk-sizes 64 32 {{ ocs_infra_cluster_usable_storage | default('') }} 764 | --nics ocp-infra-{{ item }}VMNic 765 | --image RedHat:RHEL:7-RAW:latest 766 | --admin-username {{ admin_user }} 767 | --authentication-type ssh 768 | --ssh-dest-key-path /home/{{ admin_user }}/.ssh/authorized_keys 769 | --ssh-key-value "{{ admin_pubkey }}" 770 | --no-wait 771 | loop: "{{ infra_nodes }}" 772 | 773 | # Data disks: 774 | # 64 GB Container Storage for emptydir, /var/lib/origin 775 | # 32 GB Docker VG 776 | - name: Azure | Schedule App VM creation 777 | command: > 778 | az vm create 779 | --resource-group {{ rg }} 780 | --availability-set ocp-app-instances 781 | --name ocp-app-{{ item }} 782 | --size {{ vm_size_node }} 783 | --storage-sku Standard_LRS 784 | --data-disk-sizes 64 32 {{ ocs_app_cluster_usable_storage if (infra_nodes|length + app_nodes|length >= 6) else '' }} 785 | --nics ocp-app-{{ item }}VMNic 786 | --image RedHat:RHEL:7-RAW:latest 787 | --admin-username {{ admin_user }} 788 | --authentication-type ssh 789 | --ssh-dest-key-path /home/{{ admin_user }}/.ssh/authorized_keys 790 | --ssh-key-value "{{ admin_pubkey }}" 791 | --no-wait 792 | loop: "{{ app_nodes }}" 793 | 794 | # Data disks: 795 | # DEPRICATED No longer creates CNS Nodes 796 | # 64 GB Container Storage for emptydir, /var/lib/origin 797 | # 32 GB Docker VG 798 | # 512 GB Gluster brick 799 | # 800 | - name: Azure | Schedule CNS VM creation 801 | command: > 802 | az vm create 803 | --resource-group {{ rg }} 804 | --availability-set ocp-cns-instances 805 | --name ocp-cns-{{ item }} 806 | --size {{ vm_size_cns }} 807 | --storage-sku Standard_LRS 808 | --data-disk-sizes 64 32 512 809 | --nics ocp-cns-{{ item }}VMNic 810 | --image RedHat:RHEL:7-RAW:latest 811 | --admin-username {{ admin_user }} 812 | --authentication-type ssh 813 | --ssh-dest-key-path /home/{{ admin_user }}/.ssh/authorized_keys 814 | --ssh-key-value "{{ admin_pubkey }}" 815 | --no-wait 816 | loop: "{{ cns_nodes }}" 817 | when: 818 | - deploy_cns | default(true) | bool 819 | - not deploy_cns_on_infra | default(false) | bool 820 | 821 | - name: Azure | Schedule Bastion VM creation 822 | command: > 823 | az vm create 824 | --resource-group {{ rg }} 825 | --name bastion 826 | --size {{ vm_size_bastion }} 827 | --storage-sku Standard_LRS 828 | --nics bastion-VMNic 829 | --image RedHat:RHEL:7-RAW:latest 830 | --admin-username {{ admin_user }} 831 | --authentication-type ssh 832 | --ssh-dest-key-path /home/{{ admin_user }}/.ssh/authorized_keys 833 | --ssh-key-value "{{ admin_pubkey }}" 834 | --no-wait 835 | 836 | - name: Azure | Wait for master VM creation 837 | command: az vm list -g "{{ rg }}" --query "[?provisioningState=='Succeeded'].name" -o tsv 838 | register: vm_list 839 | retries: 30 840 | delay: 60 841 | until: vm_list.stdout_lines | map("regex_search", "ocp-master-[0-9]+") | select("string") | list | length == master_nodes | length 842 | 843 | # Infra VMs 844 | 845 | - name: Azure | Wait for infra VM creation 846 | command: az vm list -g "{{ rg }}" --query "[?provisioningState=='Succeeded'].name" -o tsv 847 | register: vm_list 848 | retries: 30 849 | delay: 60 850 | until: vm_list.stdout_lines | map("regex_search", "ocp-infra-[0-9]+") | select("string") | list | length == infra_nodes | length 851 | 852 | - name: Azure | Wait for app VM creation 853 | command: az vm list -g "{{ rg }}" --query "[?provisioningState=='Succeeded'].name" -o tsv 854 | register: vm_list 855 | retries: 30 856 | delay: 60 857 | until: vm_list.stdout_lines | map("regex_search", "ocp-app-[0-9]+") | select("string") | list | length == app_nodes | length 858 | 859 | 860 | # CNS VMs 861 | - block: 862 | - name: Azure | Wait for CNS VM creation 863 | command: az vm list -g "{{ rg }}" --query "[?provisioningState=='Succeeded'].name" -o tsv 864 | register: vm_list 865 | retries: 30 866 | delay: 60 867 | until: vm_list.stdout_lines | map("regex_search", "ocp-cns-[0-9]+") | select("string") | list | length == cns_nodes | length 868 | when: 869 | - deploy_cns | default(true) | bool 870 | - not deploy_cns_on_infra | default(false) | bool 871 | 872 | - name: Azure | Wait for bastion VM creation 873 | command: az vm list -g "{{ rg }}" --query "[?provisioningState=='Succeeded'].name" -o tsv 874 | register: vm_list 875 | retries: 30 876 | delay: 60 877 | until: "'bastion' in vm_list.stdout_lines" 878 | 879 | # Registry 880 | - name: Azure | Manage Registry Storage Account 881 | azure_rm_storageaccount: 882 | resource_group: "{{ rg }}" 883 | name: "{{ registry_storage_account }}" 884 | state: "{{ state }}" 885 | location: "{{ location }}" 886 | account_type: Standard_LRS 887 | - shell: az storage account keys list --account-name "{{ registry_storage_account }}" \ 888 | --resource-group "{{ rg }}" --query "[?keyName == 'key1'].value" -o tsv 889 | register: key 890 | 891 | - set_fact: 892 | registry_storage_account_key: "{{ key.stdout }}" 893 | 894 | - name: Azure | template ansible hosts 895 | template: 896 | src: hosts.j2 897 | dest: hosts 898 | backup: yes 899 | 900 | - name: Azure | SSH config file exists with proper permissions 901 | file: 902 | state: touch 903 | path: ~/.ssh/config 904 | mode: 0600 905 | 906 | - name: Azure | update SSH proxy config 907 | blockinfile: 908 | create: yes 909 | path: ~/.ssh/config 910 | insertafter: EOF 911 | marker: "# {mark} {{ rg }} ANSIBLE MANAGED BLOCK " 912 | block: | 913 | Host bastion 914 | HostName {{ rg }}b.{{ location }}.cloudapp.azure.com 915 | User {{ admin_user }} 916 | IdentityFile {{ admin_privkey }} 917 | StrictHostKeyChecking no 918 | UserKnownHostsFile /dev/null 919 | LogLevel QUIET 920 | Host ocp* 921 | ProxyCommand ssh {{ admin_user }}@bastion -W %h:%p 922 | IdentityFile {{ admin_privkey }} 923 | User {{ admin_user }} 924 | StrictHostKeyChecking no 925 | UserKnownHostsFile /dev/null 926 | LogLevel QUIET 927 | 928 | - name: Refresh inventory to ensure new instaces exist in inventory 929 | meta: refresh_inventory 930 | -------------------------------------------------------------------------------- /roles/azure_infra/tasks/bastion.yml: -------------------------------------------------------------------------------- 1 | #- set_fact: 2 | # router_lb_ip: "{{ hostvars['localhost']['router_lb_ip'] }}" 3 | # registry_storage_account_key: "{{ hostvars['localhost']['registry_storage_account_key'] }}" 4 | - name: add bastion host 5 | add_host: 6 | name: "{{ rg }}b.{{ location }}.cloudapp.azure.com" 7 | groups: bastions 8 | ansible_user: "{{ admin_user }}" 9 | ansible_become: True 10 | 11 | # The Azure VMs are provisioned asynchronously. It can take some time 12 | # until they are reachable via SSH, even after Azure reports that the 13 | # provisioning state is "Succeeded". 14 | - name: Wait for nodes to become reachable via SSH 15 | wait_for_connection: 16 | sleep: 30 17 | timeout: 1200 18 | connect_timeout: 10 19 | delegate_to: "{{ groups['bastions'][0] }}" 20 | 21 | - name: Azure | Bastion RHSM increase timeout 22 | lineinfile: 23 | dest: /etc/rhsm/rhsm.conf 24 | line: 'server_timeout=600' 25 | insertafter: '^proxy_password =' 26 | delegate_to: "{{ groups['bastions'][0] }}" 27 | 28 | - name: Azure | Bastion RHSM unregister 29 | redhat_subscription: 30 | state: absent 31 | register: task_result 32 | until: task_result is success 33 | retries: 10 34 | delay: 30 35 | delegate_to: "{{ groups['bastions'][0] }}" 36 | 37 | - name: Azure | Bastion RHSM Activation Key 38 | redhat_subscription: 39 | state: "{{ state }}" 40 | activationkey: "{{ rhsm_key }}" 41 | org_id: "{{ rhsm_org }}" 42 | pool_ids: "{{ rhsm_broker_pool }}" 43 | delegate_to: "{{ groups['bastions'][0] }}" 44 | when: 45 | - rhsm_key is defined and rhsm_key != "" 46 | - rhsm_org is defined and rhsm_org != "" 47 | 48 | - name: Azure | Bastion RHSM Username - Password 49 | redhat_subscription: 50 | state: "{{ state }}" 51 | username: "{{ rhsm_user }}" 52 | password: "{{ rhsm_pass }}" 53 | pool_ids: "{{ rhsm_broker_pool }}" 54 | delegate_to: "{{ groups['bastions'][0] }}" 55 | when: 56 | - rhsm_user is defined and rhsm_user != "" 57 | - rhsm_pass is defined and rhsm_pass != "" 58 | 59 | - name: Azure | Bastion disable all repos 60 | rhsm_repository: 61 | name: '*' 62 | state: disabled 63 | delegate_to: "{{ groups['bastions'][0] }}" 64 | 65 | - name: Azure | Bastion enable repos 66 | rhsm_repository: 67 | name: "{{ rhsm_repos }}" 68 | state: enabled 69 | delegate_to: "{{ groups['bastions'][0] }}" 70 | 71 | - name: Azure | Bastion install required packages 72 | yum: 73 | name: "{{ bastion_pkgs }}" 74 | state: present 75 | delegate_to: "{{ groups['bastions'][0] }}" 76 | 77 | - name: Azure | Bastion template ansible hosts 78 | template: 79 | src: hosts.j2 80 | dest: hosts 81 | backup: yes 82 | delegate_to: "{{ groups['bastions'][0] }}" 83 | 84 | - name: Azure | Bastion copy files 85 | copy: 86 | src: ansible.cfg 87 | dest: ansible.cfg 88 | delegate_to: "{{ groups['bastions'][0] }}" 89 | 90 | - block: 91 | - name: Azure | Add ocpadmin group 92 | group: 93 | name: ocpadmin 94 | delegate_to: "{{ groups['bastions'][0] }}" 95 | 96 | - name: Allow password-less sudo for ocpadmin group 97 | copy: 98 | dest: /etc/sudoers.d/ocpadmin 99 | content: "%ocpadmin ALL=(ALL) NOPASSWD: ALL" 100 | delegate_to: "{{ groups['bastions'][0] }}" 101 | 102 | - name: Azure | Add local user 103 | user: 104 | name: "{{ item.key }}" 105 | groups: ocpadmin 106 | append: yes 107 | delegate_to: "{{ groups['bastions'][0] }}" 108 | with_dict: "{{ bastion_users }}" 109 | 110 | - name: Azure | Add authorized key 111 | authorized_key: 112 | user: "{{ item.key }}" 113 | key: "{{ item.value }}" 114 | delegate_to: "{{ groups['bastions'][0] }}" 115 | with_dict: "{{ bastion_users }}" 116 | when: bastion_users != {} 117 | 118 | - block: 119 | - name: Azure | Install dnsmasq 120 | yum: 121 | name: dnsmasq 122 | state: installed 123 | delegate_to: "{{ groups['bastions'][0] }}" 124 | 125 | - name: Azure | Configure private dnsmasq domain 126 | template: 127 | src: private-dnsmasq.conf.j2 128 | dest: /etc/dnsmasq.d/private-dnsmasq.conf 129 | delegate_to: "{{ groups['bastions'][0] }}" 130 | notify: Restart dnsmasq 131 | 132 | - name: Azure | Get upstream nameserver 133 | shell: grep '^nameserver ' /etc/resolv.conf | awk '{ print $2 }' 134 | register: cmd 135 | delegate_to: "{{ groups['bastions'][0] }}" 136 | 137 | - set_fact: 138 | upstream_dns: "{{ cmd.stdout }}" 139 | 140 | - name: Azure | Update resolv.conf 141 | lineinfile: 142 | path: /etc/resolv.conf 143 | regexp: "^nameserver " 144 | line: "nameserver 127.0.0.1" 145 | delegate_to: "{{ groups['bastions'][0] }}" 146 | 147 | - name: Azure | Create origin-dns.conf 148 | copy: 149 | content: | 150 | no-resolv 151 | domain-needed 152 | no-negcache 153 | max-cache-ttl=1 154 | enable-dbus 155 | bind-interfaces 156 | listen-address=127.0.0.1 157 | dest: /etc/dnsmasq.d/origin-dns.conf 158 | delegate_to: "{{ groups['bastions'][0] }}" 159 | notify: Restart dnsmasq 160 | 161 | - name: Azure | Create origin-upstream-dns.conf 162 | copy: 163 | content: server={{ upstream_dns }} 164 | dest: /etc/dnsmasq.d/origin-upstream-dns.conf 165 | delegate_to: "{{ groups['bastions'][0] }}" 166 | notify: Restart dnsmasq 167 | when: upstream_dns != '127.0.0.1' 168 | when: dns_domain_nameservers is defined 169 | -------------------------------------------------------------------------------- /roles/azure_infra/tasks/cloudprovider.yml: -------------------------------------------------------------------------------- 1 | - name: azure subscription id 2 | shell: az account show -o tsv --query 'id' 3 | register: subscription_id 4 | changed_when: false 5 | run_once: true 6 | delegate_to: localhost 7 | 8 | - name: azure subscription name 9 | shell: az account show -o tsv --query 'name' 10 | register: subscription_name 11 | changed_when: false 12 | run_once: true 13 | delegate_to: localhost 14 | 15 | - name: azure tenant ID 16 | shell: az account show -o tsv --query 'tenantId' 17 | register: tenant_id 18 | changed_when: false 19 | run_once: true 20 | delegate_to: localhost 21 | 22 | - block: 23 | - name: azure ServicePrincipal ID 24 | shell: az ad app show --id "http://{{sp_name}}" -o tsv --query 'appId' 25 | register: sp_app 26 | changed_when: false 27 | run_once: true 28 | delegate_to: localhost 29 | when: sp_app_id is undefined 30 | 31 | -------------------------------------------------------------------------------- /roles/azure_infra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Azure | Import cloudprovider.yml 3 | include_tasks: cloudprovider.yml 4 | 5 | - name: Azure | Import azure_deploy.yml 6 | include_tasks: azure_deploy.yml 7 | tags: deploy 8 | 9 | - name: Azure | Import bastion.yml 10 | include_tasks: bastion.yml 11 | -------------------------------------------------------------------------------- /roles/azure_infra/templates/hosts.j2: -------------------------------------------------------------------------------- 1 | [OSEv3:children] 2 | masters 3 | etcd 4 | nodes 5 | {% if deploy_cns | default('true') | bool %} 6 | glusterfs 7 | glusterfs_registry 8 | {% endif %} 9 | 10 | [OSEv3:vars] 11 | # fix for bug 12 | # https://access.redhat.com/solutions/3480921 13 | ereg_url_master=registry.access.redhat.com/openshift3/ose-${component}:${version} 14 | oreg_url_node=registry.access.redhat.com/openshift3/ose-${component}:${version} 15 | 16 | 17 | openshift_examples_modify_imagestreams=true 18 | 19 | {% if deploy_cns | default(true) | bool %} 20 | 21 | # Gluster Default SC 22 | openshift_storage_glusterfs_storageclass_default=true 23 | openshift_storageclass_default=False 24 | 25 | {% else %} 26 | {% endif %} 27 | 28 | ansible_ssh_user="{{ admin_user }}" 29 | ansible_become=true 30 | openshift_master_api_port=443 31 | openshift_master_console_port=443 32 | openshift_hosted_router_replicas={{ infra_nodes | length }} 33 | openshift_hosted_registry_replicas={{ infra_nodes | length }} 34 | openshift_master_cluster_method=native 35 | 36 | openshift_cloudprovider_kind=azure 37 | {% if sp_app is defined %} 38 | openshift_cloudprovider_azure_client_id="{{ sp_app.stdout }}" 39 | {% elif sp_app_id is defined %} 40 | openshift_cloudprovider_azure_client_id="{{ sp_app_id }}" 41 | {% endif %} 42 | openshift_cloudprovider_azure_client_secret="{{ sp_secret }}" 43 | openshift_cloudprovider_azure_tenant_id="{{ tenant_id.stdout }}" 44 | openshift_cloudprovider_azure_subscription_id="{{ subscription_id.stdout }}" 45 | openshift_cloudprovider_azure_resource_group={{ rg }} 46 | openshift_cloudprovider_azure_location={{ location }} 47 | openshift_cloudprovider_azure_vnet_name={{ vnet_name }} 48 | openshift_cloudprovider_azure_cloud=AzurePublicCloud 49 | openshift_cloudprovider_azure_security_group_name='node-nsg' 50 | openshift_cloudprovider_azure_availability_set_name='ocp-app-instances' 51 | 52 | {% if master_lb_private_ip is not defined and master_lb_dns is not defined %} 53 | openshift_master_cluster_hostname={{ rg }}.{{ location }}.cloudapp.azure.com 54 | openshift_master_cluster_public_hostname={{ rg }}.{{ location }}.cloudapp.azure.com 55 | {% elif master_lb_dns is defined %} 56 | 57 | openshift_master_cluster_hostname={{ master_lb_dns }} 58 | openshift_master_cluster_public_hostname={{ master_lb_dns }} 59 | {% else %} 60 | openshift_master_cluster_hostname=ocp-master-1 61 | openshift_master_cluster_public_hostname={{ master_lb_dns }} 62 | {% endif %} 63 | 64 | {% if router_lb_default_subdomain is defined %} 65 | openshift_master_default_subdomain={{ router_lb_default_subdomain }} 66 | {% else %} 67 | openshift_master_default_subdomain={{ router_lb_ip }}.nip.io 68 | {% endif %} 69 | 70 | openshift_deployment_type=openshift-enterprise 71 | openshift_release=v3.10 72 | 73 | {% if router_cert is defined %} 74 | #################### 75 | ## custom certs 76 | ## Router 77 | ##################### 78 | openshift_hosted_router_certificate={{ router_cert }} 79 | {% endif %} 80 | 81 | {% if master_cert is defined %} 82 | # MASTER/API 83 | openshift_master_overwrite_named_certificates=true 84 | openshift_master_named_certificates={{ master_cert }} 85 | {% endif %} 86 | 87 | {% if openshift_master_identity_providers is defined %} 88 | openshift_master_identity_providers={{ openshift_master_identity_providers }} 89 | {% else %} 90 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 91 | {% endif %} 92 | 93 | {% if openshift_master_htpasswd_users is defined %} 94 | openshift_master_htpasswd_users={{ openshift_master_htpasswd_users }} 95 | {% else %} 96 | # admin:redhat 97 | openshift_master_htpasswd_users={'admin': '$apr1$VZeARzoK$zYM/4c82PKDeYmw6/RvOV/'} 98 | {% endif %} 99 | openshift_master_manage_htpasswd=true 100 | 101 | openshift_node_local_quota_per_fsgroup=512Mi 102 | os_sdn_network_plugin_name=redhat/openshift-ovs-multitenant 103 | 104 | {% if osm_cluster_network_cidr is defined %} 105 | osm_cluster_network_cidr={{ osm_cluster_network_cidr }} 106 | {% endif %} 107 | {% if openshift_portal_net is defined %} 108 | openshift_portal_net={{ openshift_portal_net }} 109 | {% endif %} 110 | 111 | # Docker 112 | container_runtime_docker_storage_setup_device=/dev/disk/azure/scsi1/lun1 113 | container_runtime_docker_storage_type=overlay2 114 | 115 | # disable cockpit 116 | osm_use_cockpit=false 117 | openshift_enable_service_catalog=true 118 | 119 | {% if osm_project_request_message is defined %} 120 | osm_project_request_message="{{ osm_project_request_message }}" 121 | {% endif %} 122 | 123 | {% if deploy_cns | default(true) | bool %} 124 | # CNS Cnfiguration 125 | # Container image to use for glusterfs pods 126 | openshift_storage_glusterfs_image="registry.access.redhat.com/rhgs3/rhgs-server-rhel7:v3.10" 127 | 128 | # Container image to use for glusterblock-provisioner pod 129 | openshift_storage_glusterfs_block_image="registry.access.redhat.com/rhgs3/rhgs-gluster-block-prov-rhel7:v3.10" 130 | 131 | # Container image to use for heketi pods 132 | openshift_storage_glusterfs_heketi_image="registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7:v3.10" 133 | 134 | # logging 135 | {% if deploy_logging | default('True') | bool %} 136 | openshift_logging_install_logging=True 137 | openshift_logging_es_pvc_dynamic=true 138 | openshift_logging_es_pvc_size={{ logging_volume_size }}Gi 139 | openshift_logging_es_cluster_size={{ infra_nodes|length }} 140 | openshift_logging_es_pvc_storage_class_name='glusterfs-registry-block' 141 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"} 142 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"} 143 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"} 144 | {% else %} 145 | openshift_logging_install_logging=True 146 | {% endif %} 147 | 148 | # metrics 149 | {% if deploy_logging | default('True') | bool %} 150 | openshift_metrics_install_metrics=True 151 | openshift_metrics_storage_kind=dynamic 152 | openshift_metrics_storage_volume_size={{ metrics_volume_size }}Gi 153 | openshift_metrics_cassandra_pvc_storage_class_name='glusterfs-registry-block' 154 | openshift_metrics_hawkular_nodeselector={"node-role.kubernetes.io/infra": "true"} 155 | openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra": "true"} 156 | openshift_metrics_heapster_nodeselector={"node-role.kubernetes.io/infra": "true"} 157 | {% else %} 158 | openshift_metrics_install_metrics=False 159 | {% endif %} 160 | 161 | # prometheus 162 | {% if deploy_prometheus | default(True) | bool %} 163 | openshift_hosted_prometheus_deploy=True 164 | openshift_prometheus_storage_kind=dynamic 165 | openshift_prometheus_storage_type=pvc 166 | openshift_prometheus_storage_volume_size={{ prometheus_volume_size | default('25Gi') }}Gi 167 | openshift_prometheus_storage_class='glusterfs-registry-block' 168 | {% else %} 169 | openshift_hosted_prometheus_deploy=False 170 | {% endif %} 171 | 172 | # grafana 173 | {% if deploy_grafan | default(true) | bool %} 174 | openshift_grafana_state=present 175 | openshift_grafana_node_exporter=true 176 | openshift_grafana_storage_type=pvc 177 | openshift_grafana_storage_class='glusterfs-registry-block' 178 | openshift_grafana_prometheus_namespace='openshift-metrics' 179 | openshift_grafana_prometheus_serviceaccount='prometheus' 180 | openshift_grafana_prometheus_route='prometheus' 181 | {% else %} 182 | openshift_grafana_state=absent 183 | {% endif %} 184 | 185 | # Cluster 1 186 | # Total Storage allocated (GB) = {{ ocs_infra_cluster_allocated_storage }} 187 | # Total Storage available (GB) = {{ ocs_infra_cluster_usable_storage }} 188 | 189 | {% if (infra_nodes|length + app_nodes|length) >= 6 %} 190 | # Cluster 2 191 | # Total Storage allocated (GB) = 0 192 | # Total Storage available (GB) = {{ ocs_app_cluster_usable_storage }} 193 | 194 | # CNS storage cluster 195 | openshift_storage_glusterfs_namespace=app-storage 196 | openshift_storage_glusterfs_storageclass=true 197 | openshift_storage_glusterfs_storageclass_default=true 198 | openshift_storage_glusterfs_block_deploy=true 199 | openshift_storage_glusterfs_block_host_vol_create=false 200 | openshift_storage_glusterfs_block_host_vol_size=100 201 | openshift_storage_glusterfs_block_storageclass=true 202 | openshift_storage_glusterfs_block_storageclass_default=false 203 | {% endif %} 204 | 205 | # CNS storage for OpenShift infrastructure 206 | openshift_storage_glusterfs_registry_namespace=infra-storage 207 | openshift_storage_glusterfs_registry_storageclass=false 208 | openshift_storage_glusterfs_registry_block_deploy=true 209 | openshift_storage_glusterfs_registry_block_host_vol_create=true 210 | openshift_storage_glusterfs_registry_block_host_vol_size={{ ocs_infra_cluster_allocated_storage }} 211 | openshift_storage_glusterfs_registry_block_storageclass=true 212 | openshift_storage_glusterfs_registry_block_storageclass_default=false 213 | {% else %} 214 | # StorageClass if not CNS 215 | openshift_storageclass_default=true 216 | openshift_storageclass_name=azure-storage 217 | openshift_storageclass_provisioner=azure-disk 218 | openshift_storageclass_parameters={'storageaccounttype': 'Standard_LRS', 'kind': 'managed'} 219 | openshift_storageclass_default=true 220 | {% endif %} 221 | 222 | # Setup azure blob registry storage 223 | openshift_hosted_registry_storage_kind=object 224 | openshift_hosted_registry_storage_provider=azure_blob 225 | openshift_hosted_registry_storage_azure_blob_accountname={{ registry_storage_account }} 226 | openshift_hosted_registry_storage_azure_blob_accountkey={{ registry_storage_account_key }} 227 | openshift_hosted_registry_storage_azure_blob_container=registry 228 | openshift_hosted_registry_storage_azure_blob_realm=core.windows.net 229 | 230 | [masters] 231 | {% for i in master_nodes %} 232 | ocp-master-{{ i }} 233 | {% endfor %} 234 | 235 | [infra] 236 | {% for i in infra_nodes %} 237 | ocp-infra-{{ i }} 238 | {% endfor %} 239 | 240 | [etcd] 241 | {% for i in master_nodes %} 242 | ocp-master-{{ i }} 243 | {% endfor %} 244 | 245 | [nodes] 246 | {% for i in master_nodes %} 247 | ocp-master-{{ i }} openshift_node_group_name='node-config-master' openshift_schedulable=true 248 | {% endfor %} 249 | {% for i in infra_nodes %} 250 | ocp-infra-{{ i }} openshift_node_group_name='node-config-infra' 251 | {% endfor %} 252 | {% for i in app_nodes %} 253 | ocp-app-{{ i }} openshift_node_group_name='node-config-compute' 254 | {% endfor %} 255 | 256 | {% if deploy_cns | default(true) | bool and deploy_cns is defined %} 257 | {% if ( infra_nodes|length + app_nodes|length ) >= 6 %} 258 | [glusterfs_registry] 259 | {% for i in infra_nodes %} 260 | ocp-infra-{{ i }} glusterfs_zone={{ i }} glusterfs_devices='[ "/dev/disk/azure/scsi1/lun2" ]' 261 | {% endfor %} 262 | 263 | [glusterfs] 264 | {% for i in infra_nodes %} 265 | ocp-app-{{ i }} glusterfs_zone={{ i }} glusterfs_devices='[ "/dev/disk/azure/scsi1/lun2" ]' 266 | {% endfor %} 267 | {% else %} 268 | [glusterfs_registry] 269 | [glusterfs] 270 | {% for i in infra_nodes %} 271 | ocp-infra-{{ i }} glusterfs_zone={{ i }} glusterfs_devices='[ "/dev/disk/azure/scsi1/lun2" ]' 272 | {% endfor %} 273 | {% endif %} 274 | {% endif %} 275 | -------------------------------------------------------------------------------- /roles/azure_infra/templates/private-dnsmasq.conf.j2: -------------------------------------------------------------------------------- 1 | {% for item in dns_domain_nameservers.keys() %} 2 | server=/{{ item }}/{{ dns_domain_nameservers[item] }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /roles/azure_infra/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/azure_infra/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - azure_infra -------------------------------------------------------------------------------- /roles/azure_infra/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for azure_infra -------------------------------------------------------------------------------- /roles/ocp_post/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for ocp_post 3 | 4 | - name: Restart dnsmasq 5 | systemd: 6 | name: dnsmasq 7 | state: restarted 8 | -------------------------------------------------------------------------------- /roles/ocp_post/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for ocp_post 3 | 4 | - name: Azure | Configure private dnsmasq domains 5 | template: 6 | src: private-dnsmasq.conf.j2 7 | dest: /etc/dnsmasq.d/private-dnsmasq.conf 8 | notify: Restart dnsmasq 9 | when: dns_domain_nameservers is defined 10 | -------------------------------------------------------------------------------- /roles/ocp_post/templates/private-dnsmasq.conf.j2: -------------------------------------------------------------------------------- 1 | {% for item in dns_domain_nameservers.keys() %} 2 | server=/{{ item }}/{{ dns_domain_nameservers[item] }} 3 | {% endfor %} 4 | -------------------------------------------------------------------------------- /roles/ocp_pre/README.md: -------------------------------------------------------------------------------- 1 | Role Name 2 | ========= 3 | 4 | A brief description of the role goes here. 5 | 6 | Requirements 7 | ------------ 8 | 9 | Any pre-requisites that may not be covered by Ansible itself or the role should be mentioned here. For instance, if the role uses the EC2 module, it may be a good idea to mention in this section that the boto package is required. 10 | 11 | Role Variables 12 | -------------- 13 | 14 | A description of the settable variables for this role should go here, including any variables that are in defaults/main.yml, vars/main.yml, and any variables that can/should be set via parameters to the role. Any variables that are read from other roles and/or the global scope (ie. hostvars, group vars, etc.) should be mentioned here as well. 15 | 16 | Dependencies 17 | ------------ 18 | 19 | A list of other roles hosted on Galaxy should go here, plus any details in regards to parameters that may need to be set for other roles, or variables that are used from other roles. 20 | 21 | Example Playbook 22 | ---------------- 23 | 24 | Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too: 25 | 26 | - hosts: servers 27 | roles: 28 | - { role: username.rolename, x: 42 } 29 | 30 | License 31 | ------- 32 | 33 | BSD 34 | 35 | Author Information 36 | ------------------ 37 | 38 | An optional section for the role authors to include contact information, or a website (HTML is not allowed). 39 | -------------------------------------------------------------------------------- /roles/ocp_pre/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for ocp_pre -------------------------------------------------------------------------------- /roles/ocp_pre/files/storageClass.yml: -------------------------------------------------------------------------------- 1 | kind: StorageClass 2 | apiVersion: storage.k8s.io/v1 3 | metadata: 4 | name: standard 5 | annotations: 6 | storageclass.kubernetes.io/is-default-class: "true" 7 | provisioner: kubernetes.io/azure-disk 8 | parameters: 9 | storageaccounttype: Standard_LRS 10 | kind: managed 11 | -------------------------------------------------------------------------------- /roles/ocp_pre/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for ocp_pre -------------------------------------------------------------------------------- /roles/ocp_pre/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: your name 3 | description: your description 4 | company: your company (optional) 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: license (GPLv2, CC-BY, etc) 18 | 19 | min_ansible_version: 1.2 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # platforms is a list of platforms, and each platform has a name and a list of versions. 34 | # 35 | # platforms: 36 | # - name: Fedora 37 | # versions: 38 | # - all 39 | # - 25 40 | # - name: SomePlatform 41 | # versions: 42 | # - all 43 | # - 1.0 44 | # - 7 45 | # - 99.99 46 | 47 | galaxy_tags: [] 48 | # List tags for your role here, one per line. A tag is a keyword that describes 49 | # and categorizes the role. Users find roles by searching for tags. Be sure to 50 | # remove the '[]' above, if you add tags to this list. 51 | # 52 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 53 | # Maximum 20 tags per role. 54 | 55 | dependencies: [] 56 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 57 | # if you add dependencies to this list. -------------------------------------------------------------------------------- /roles/ocp_pre/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Hack for grafana until fixed officially 3 | - name: Azure | block in file 4 | blockinfile: 5 | path: /usr/share/ansible/openshift-ansible/roles/openshift_grafana/tasks/install_grafana.yaml 6 | insertbefore: " Add new datasource to grafana" 7 | block: | 8 | - name: "Wait for grafana service" 9 | uri: 10 | url: "{% raw %}{{ grafana_route }}{% endraw %}/api/datasources" 11 | status_code: 200 12 | register: result 13 | until: result.status == 200 14 | retries: 60 15 | delay: 1 16 | delegate_to: localhost 17 | run_once: true 18 | become: true 19 | 20 | - name: Azure | Import subscribe.yml 21 | include_tasks: subscribe.yml 22 | 23 | - name: Azure | Import storage_container.yml 24 | include_tasks: storage_container.yml 25 | 26 | - name: Azure | Import storage_etcd.yml 27 | include_tasks: storage_etcd.yml 28 | when: inventory_hostname in groups.masters 29 | -------------------------------------------------------------------------------- /roles/ocp_pre/tasks/storage_container.yml: -------------------------------------------------------------------------------- 1 | - name: Create XFS filesystem 2 | filesystem: 3 | fstype: xfs 4 | dev: /dev/disk/azure/scsi1/lun0 5 | 6 | - name: Set proper permissions on mount point 7 | file: 8 | path: /var/lib/origin/openshift.local.volumes 9 | state: directory 10 | mode: 0755 11 | 12 | - name: Restore SELinux context 13 | command: restorecon -R /var/lib/origin/openshift.local.volumes 14 | changed_when: False 15 | 16 | - name: Get filesystem UUID 17 | command: blkid -sUUID -ovalue /dev/disk/azure/scsi1/lun0 18 | register: blkid 19 | 20 | - name: Mount XFS filesystem 21 | mount: 22 | state: mounted 23 | path: /var/lib/origin/openshift.local.volumes 24 | src: UUID={{ blkid.stdout }} 25 | fstype: xfs 26 | opts: gquota 27 | -------------------------------------------------------------------------------- /roles/ocp_pre/tasks/storage_etcd.yml: -------------------------------------------------------------------------------- 1 | - name: Create XFS filesystem 2 | filesystem: 3 | fstype: xfs 4 | dev: /dev/disk/azure/scsi1/lun2 5 | 6 | - name: Set proper permissions on mount point 7 | file: 8 | path: /var/lib/etcd 9 | state: directory 10 | mode: 0755 11 | 12 | - name: Restore SELinux context 13 | command: restorecon -R /var/lib/etcd 14 | 15 | - name: Get filesystem UUID 16 | command: blkid -sUUID -ovalue /dev/disk/azure/scsi1/lun2 17 | register: blkid 18 | 19 | - name: Mount XFS filesystem 20 | mount: 21 | state: mounted 22 | path: /var/lib/etcd 23 | src: UUID={{ blkid.stdout }} 24 | fstype: xfs 25 | -------------------------------------------------------------------------------- /roles/ocp_pre/tasks/subscribe.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Azure | Remove rh-cloud.repo 3 | file: 4 | path: /etc/yum.repos.d/rh-cloud.repo 5 | state: absent 6 | when: "'nodes' in group_names" 7 | 8 | - name: Azure | Remove yum rhui load balancers 9 | file: 10 | path: /etc/yum.repos.d/rhui-load-balancers 11 | state: absent 12 | when: "'nodes' in group_names" 13 | 14 | - name: Azure | Remove RHEL7 package 15 | yum: 16 | name: RHEL7 17 | state: absent 18 | when: "'nodes' in group_names" 19 | 20 | - name: Azure | RHSM increase timeout 21 | lineinfile: 22 | dest: /etc/rhsm/rhsm.conf 23 | line: 'server_timeout=600' 24 | insertafter: '^proxy_password =' 25 | when: "'nodes' in group_names" 26 | 27 | - name: Azure | RHSM unregister 28 | redhat_subscription: 29 | state: absent 30 | register: task_result 31 | until: task_result is success 32 | retries: 10 33 | delay: 30 34 | when: "'nodes' in group_names" 35 | 36 | - name: Azure | RHSM register (username-passwd) Master-Infra-CNS 37 | redhat_subscription: 38 | state: present 39 | username: "{{ rhsm_user }}" 40 | password: "{{ rhsm_pass }}" 41 | pool_ids: "{{ rhsm_broker_pool }}" 42 | register: task_result 43 | until: task_result is success 44 | retries: 10 45 | delay: 30 46 | when: 47 | - ( inventory_hostname in groups.masters ) or 48 | ( inventory_hostname in groups.infra ) or 49 | ( inventory_hostname in groups.glusterfs ) 50 | - rhsm_user is defined and rhsm_user != "" 51 | - rhsm_pass is defined and rhsm_pass != "" 52 | 53 | - name: Azure | RHSM register (activation-key) Master-Infra-CNS 54 | redhat_subscription: 55 | state: present 56 | activationkey: "{{ rhsm_key }}" 57 | org_id: "{{ rhsm_org }}" 58 | pool_ids: "{{ rhsm_broker_pool }}" 59 | register: task_result 60 | until: task_result is success 61 | retries: 10 62 | delay: 30 63 | when: 64 | - ( inventory_hostname in groups.masters ) or 65 | ( inventory_hostname in groups.infra ) or 66 | ( inventory_hostname in groups.glusterfs ) 67 | - rhsm_key is defined and rhsm_key != "" 68 | - rhsm_org is defined and rhsm_org != "" 69 | 70 | - name: remove premium pool id | Master and Infra subscription only 71 | redhat_subscription: 72 | state: absent 73 | pool_ids: "{{ rhsm_node_pool }}" 74 | when: 75 | - ( inventory_hostname in groups.masters ) or 76 | ( inventory_hostname in groups.infra ) or 77 | ( inventory_hostname in groups.glusterfs ) 78 | - rhsm_node_pool != rhsm_broker_pool 79 | 80 | - name: Azure | RHSM register (username-passwd) Application nodes 81 | redhat_subscription: 82 | state: present 83 | username: "{{ rhsm_user }}" 84 | password: "{{ rhsm_pass }}" 85 | pool_ids: "{{ rhsm_node_pool }}" 86 | register: task_result 87 | serial: 5 88 | until: task_result is success 89 | retries: 10 90 | delay: 30 91 | when: 92 | - inventory_hostname not in groups.masters 93 | - inventory_hostname not in groups.infra 94 | - inventory_hostname not in groups.glusterfs 95 | - rhsm_user is defined and rhsm_user != "" 96 | - rhsm_pass is defined and rhsm_pass != "" 97 | 98 | - name: Azure | RHSM register (activation-key) Application nodes 99 | redhat_subscription: 100 | state: present 101 | activationkey: "{{ rhsm_key }}" 102 | org_id: "{{ rhsm_org }}" 103 | pool_ids: "{{ rhsm_node_pool }}" 104 | register: task_result 105 | serial: 5 106 | until: task_result is success 107 | retries: 10 108 | delay: 30 109 | when: 110 | - inventory_hostname not in groups.masters 111 | - inventory_hostname not in groups.infra 112 | - inventory_hostname not in groups.glusterfs 113 | - rhsm_key is defined and rhsm_key != "" 114 | - rhsm_org is defined and rhsm_org != "" 115 | 116 | - name: Azure | RHSM disable all repos 117 | rhsm_repository: 118 | name: '*' 119 | state: disabled 120 | register: repo_result 121 | until: repo_result is success 122 | retries: 10 123 | delay: 30 124 | when: "'nodes' in group_names" 125 | 126 | - name: Azure | RHSM enable repos for OCP 127 | rhsm_repository: 128 | name: "{{ rhsm_repos }}" 129 | state: enabled 130 | until: repo_result is success 131 | serial: 5 132 | retries: 10 133 | delay: 30 134 | register: repo_result 135 | when: "'nodes' in group_names" 136 | 137 | - name: Azure | Install OCP client 138 | yum: 139 | name: atomic-openshift-clients 140 | state: latest 141 | when: "'masters' in group_names" 142 | -------------------------------------------------------------------------------- /roles/ocp_pre/templates/azure.j2: -------------------------------------------------------------------------------- 1 | tenantId: {{ tenant_id.stdout }} 2 | subscriptionId: {{ subscription_id.stdout }} 3 | {% if sp_app is defined %} 4 | aadClientId: {{ sp_app.stdout }} 5 | {% elif sp_app_id is defined %} 6 | aadClientId: {{ sp_app_id }} 7 | {% endif %} 8 | {% if sp_secret is defined %} 9 | aadClientSecret: {{ sp_secret }} 10 | {% endif %} 11 | aadTenantID: {{ tenant_id.stdout }} 12 | resourceGroup: {{ rg }} 13 | cloud: {{ openshift_cloudprovider_azure_cloud }} 14 | location: {{ location }} 15 | vnetName: {{ vnet_name }} 16 | securityGroupName: {{ openshift_cloudprovider_azure_security_group_name }} 17 | primaryAvailabilitySetName: {{ openshift_cloudprovider_azure_availability_set_name }} 18 | useInstanceMetadata: true 19 | -------------------------------------------------------------------------------- /roles/ocp_pre/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /roles/ocp_pre/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - ocp_pre -------------------------------------------------------------------------------- /roles/ocp_pre/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for ocp_pre -------------------------------------------------------------------------------- /vars.yml.example: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for azure_infra 3 | state: present 4 | location: eastus 5 | rg: "test" 6 | 7 | ####################### 8 | # nodes 9 | ####################### 10 | master_nodes: [1,2,3] 11 | infra_nodes: [1,2,3] 12 | app_nodes: [1,2,3] 13 | # Min of 3 required and add odd nums only for CNS 14 | cns_nodes: [1,2,3] 15 | 16 | ####################### 17 | # vm sizes (adapt) 18 | # az vm list-sizes -l -o table 19 | ####################### 20 | # For a cheaper solution when testing 21 | #vm_size_master: Standard_E2_v3 22 | #vm_size_infra: Standard_E4_v3 23 | #vm_size_node: Standard_E2_v3 24 | #vm_size_cns: Standard_E4_v3 25 | 26 | vm_size_master: Standard_D4s_v3 27 | vm_size_infra: Standard_D8s_v3 28 | vm_size_node: Standard_D2s_v3 29 | vm_size_cns: Standard_D8s_v3 30 | vm_size_bastion: Standard_D1 31 | 32 | ####################### 33 | # SSH user for all VMs created 34 | ####################### 35 | admin_user: cloud-user 36 | # This will be added to each instance's authorized_keys file 37 | admin_pubkey: 'ssh-rsa LDKJFDLKJDFLDKJDF' 38 | # Path for the private key which will be added to localhost ssh_config for proxying through the bastion 39 | admin_privkey: '~/.ssh/id_rsa' 40 | 41 | # If set, creates local users with password-less sudo on the bastion. 42 | #bastion_users: 43 | # user1: 'ssh-rsa LDKJFDLKJDFLDKJDF' 44 | # user2: 'ssh-rsa FDLKJDFLDKJDFLDKJ' 45 | 46 | ####################### 47 | # Service Principal 48 | ####################### 49 | # This is used to template the cloud config file on each node for azure 50 | sp_name: devops 51 | # Place this in vars file that can be encrypted in root of play and not checked into SCM 52 | sp_secret: 'password' 53 | #sp_app_id: 'dfdffdfa0103' 54 | 55 | ####################### 56 | # RHSM 57 | # Username / Password or 58 | # Activation key / OrgId 59 | ####################### 60 | rhsm_user: '' 61 | rhsm_pass: '' 62 | rhsm_key: "" 63 | rhsm_org: "" 64 | # Can specify separate pools so only Application nodes use paid subs 65 | # or keep the same if only using one pool 66 | rhsm_broker_pool: "" 67 | rhsm_node_pool: "" 68 | # For openshift contakiner Storage 69 | rhsm_ocs_pool: "" 70 | rhsm_repos: 71 | - rhel-7-server-rpms 72 | - rhel-7-server-extras-rpms 73 | - rhel-7-server-ose-3.10-rpms 74 | - rhel-7-fast-datapath-rpms 75 | - rhel-7-server-ansible-2.4-rpms 76 | bastion_pkgs: 77 | - ansible 78 | - atomic-openshift-clients 79 | - git 80 | - tmux 81 | - screen 82 | 83 | ####################### 84 | # Network 85 | ####################### 86 | vnet_name: "{{ rg }}vnet" 87 | vnet_cidr: "192.168.0.0/16" 88 | master_subnet_cidr: "192.168.0.0/27" 89 | infra_subnet_cidr: "192.168.0.32/27" 90 | cns_subnet_cidr: "192.168.0.64/27" 91 | app_subnet_cidr: "192.168.0.128/25" 92 | 93 | api_port: 443 94 | 95 | # If this is set, the bastion host is configured with the given static 96 | # IP address. A public IP address is still assigned to the bastion 97 | # because it is required during installation, but can be removed in 98 | # Azure after the installation is done. This is useful in environments 99 | # where the VNet is accessible from a private network. 100 | #bastion_private_ip: 192.168.0.20 101 | 102 | # Override the defaults for the SDN cluster network and the services 103 | # network. 104 | #osm_cluster_network_cidr: 10.29.0.0/16 105 | #openshift_portal_net: 10.28.0.0/16 106 | 107 | # If this is set to true, the VNet in the given resource group is not 108 | # deleted by the destroy.yml playbook. This can be used in 109 | # environments where a peering agreement or Express Route exists for 110 | # the given resource group, that was configured before the OpenShift 111 | # installation. 112 | vnet_preserve: false 113 | 114 | ####################### 115 | # LB 116 | ####################### 117 | # DNS name for the master load balancer. If not set, 118 | # {{ rg }}.{{ location }}.cloudapp.azure.com will be used. 119 | #master_lb_dns: "" 120 | 121 | # If this is set, the master load balancer will be configured as an 122 | # internal Azure load balancer with the given IP address as its 123 | # frontend IP address. There will be no public IP address assigned to 124 | # the load balancer. This is useful in environments where the VNet is 125 | # accessible from a private network. 126 | # 127 | # If this is set, the master_lb_dns parameter also needs to be defined 128 | # and point to this IP address in DNS. 129 | #master_lb_private_ip: 192.168.0.21 130 | 131 | # If this is set, Azure creates a DNS record for the public router IP 132 | # address. The public router IP will change if the environment is 133 | # deleted and redeployed, but the DNS name will stay the same. 134 | # 135 | # This can be referenced in a CNAME record in an external DNS 136 | # server. The full DNS name is: 137 | # 138 | # {{ router_lb_dns_label }}.{{ location }}.cloudapp.azure.com 139 | # 140 | #router_lb_dns_label: ocpapplb1 141 | 142 | # If this is set, application routes use this domain by default 143 | # instead of nip.io. This usually requires router_lb_dns_label to be 144 | # set, and a wildcard DNS entry to exist for *.app.example.com. 145 | #router_lb_default_subdomain: app.example.com 146 | 147 | # registry Storage Account Name 148 | registry_storage_account: "{{ rg | regex_replace('-') }}" 149 | 150 | 151 | ########################### 152 | # DNS 153 | ########################### 154 | 155 | # Optional dictionary of DNS domains and nameservers to configure on 156 | # the bastion and each node. DNS lookups for the given domains will be 157 | # done against the given nameservers. This can be used in environments 158 | # where a peering agreement or Express Route exists for the given 159 | # resource group 160 | # 161 | #dns_domain_nameservers: 162 | # internal.example.com: 192.168.100.1 163 | 164 | 165 | ####################### 166 | # Custom Named Certs 167 | ####################### 168 | # Router 169 | #router_cert: '{"cafile": "/path/to/ca_cert", "certfile": "/path/to/fullchain.cer", "keyfile": "/vagrant/keys/domain.key"}' 170 | ## Master 171 | #master_cert: '[{"cafile": "/path/to/ca_cert", "certfile": "/path/to/fullchain.cer", "keyfile": "/path/to/key/domain.key", "names": ["openshift.console.domain.name"]}]' 172 | 173 | 174 | ####################### 175 | # OCP Identity 176 | # Use 'htpasswd -n ' to generate password hash. (htpasswd from httpd-tools RPM) 177 | ####################### 178 | # Example with admin:changeme 179 | openshift_master_htpasswd_users: {'admin': '$apr1$zAhyA9Ko$rBxBOwAwwtRuuaw8OtCwH0'} 180 | openshift_master_identity_providers: [{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 181 | 182 | # 183 | #ocp_admin_passwd: 'redhat' 184 | 185 | ####################### 186 | # deploy CNS 187 | ####################### 188 | deploy_cns: true 189 | deploy_metrics: true 190 | deploy_logging: true 191 | deploy_prometheus: true 192 | 193 | # volume sizing defaults 194 | metrics_volume_size: 25 195 | logging_volume_size: 100 196 | prometheus_volume_size: 25 197 | 198 | # OpenShift Container StorageClass 199 | # Sizes in Gi 200 | # infra usable storage needs to be greater than below calculations 201 | # logging_volume_size * ec2_count_infra) + metrics_volume_size + prometheus_volume_size) * 1.3 + registry_volume_size (if using OCS for registry) 202 | ocs_infra_cluster_allocated_storage: "{{ ((( logging_volume_size|int * infra_nodes|length|int ) + metrics_volume_size|int + prometheus_volume_size|int ) * 1.3 ) | round | int }}" 203 | ocs_infra_cluster_usable_storage: 512 204 | ocs_app_cluster_usable_storage: 100 205 | ####################### 206 | # UI customization 207 | ####################### 208 | 209 | #project_request_message: To create a new project, contact your team administrator. 210 | 211 | ####################### 212 | ####################### 213 | # Don't change 214 | ####################### 215 | ####################### 216 | bastion: "{{ rg }}b.{{ location }}.cloudapp.azure.com" 217 | deploy_cns_on_infra: true 218 | --------------------------------------------------------------------------------