├── .gitignore ├── LICENSE ├── README.adoc ├── ansible.cfg ├── docs ├── .gitkeep ├── bb0.adoc ├── bb1.adoc ├── bb3.adoc ├── bb4.adoc ├── bb_template.adoc ├── contributing.adoc ├── getstarted.adoc └── images │ ├── add_data_source.png │ ├── config_data_source.png │ ├── copy_to_clipboard.png │ ├── create_env.png │ ├── create_mbaas_target.png │ ├── datasource_success.png │ ├── env_values.png │ ├── grafana_dashboard.png │ ├── grafana_dashboard_import.png │ ├── grafana_login.png │ ├── import_dashboard.png │ ├── import_dashboard_2.png │ ├── login_ocp.png │ ├── mbaas_health.png │ ├── mbaas_key.png │ ├── mbaas_route.png │ ├── mbaas_sample.png │ ├── open_grafana.png │ ├── rhmap_admin.png │ ├── rhmap_login.png │ ├── rhmap_route.png │ ├── select_mbaas.png │ ├── select_millicore.png │ ├── stc_bb1.png │ ├── token.png │ └── token_command_line.png ├── files └── grafana │ ├── Kubernetes_cluster_monitoring_via.json │ ├── openshift_metrics.json │ └── openshift_routing_layer.json ├── hack ├── README.md └── kcli-plan │ ├── files │ ├── env.yml │ ├── install-ocp.sh │ └── secrets.yml │ ├── mini.yml │ └── scripts │ ├── prep-bastion.sh │ └── prep-host.sh ├── playbooks ├── bb00-openstack_deprovisioning.yml ├── bb00-openstack_provisioning.yml ├── bb1 │ └── add_user.yml ├── bb4 │ ├── proxy.yml │ ├── redhat-registry-mirror.yml │ └── templates │ │ ├── config.j2 │ │ ├── local-registry-setup-v2.j2 │ │ └── squid.j2 ├── bb8 │ └── deploy_fis.yml ├── group_vars │ └── all ├── prepare_ssh.yml ├── roles │ ├── bb0-openstack │ │ ├── README.md │ │ ├── defaults │ │ │ └── main.yml │ │ ├── handlers │ │ │ └── main.yml │ │ ├── meta │ │ │ └── main.yml │ │ ├── provisioner-image │ │ │ ├── Dockerfile │ │ │ ├── README.md │ │ │ └── build.sh │ │ ├── tasks │ │ │ ├── create-inventory-mini.yml │ │ │ ├── deprovisioning-post-once.yml │ │ │ ├── deprovisioning.yml │ │ │ ├── provisioning-dns-cloudflare.yml │ │ │ ├── provisioning-dns-nip.io.yml │ │ │ ├── provisioning-pre-once.yml │ │ │ ├── provisioning-prepare-bastion.yml │ │ │ ├── provisioning.yml │ │ │ └── validate-parameters.yml │ │ ├── templates │ │ │ ├── env.yml.j2 │ │ │ └── secrets.yml.j2 │ │ ├── tests │ │ │ ├── inventory │ │ │ └── test.yml │ │ └── vars │ │ │ └── main.yml │ ├── check_cleanup │ │ └── tasks │ │ │ └── main.yml │ ├── check_connectivity │ │ └── tasks │ │ │ └── main.yml │ ├── check_disks │ │ └── tasks │ │ │ └── main.yml │ ├── check_dns │ │ └── tasks │ │ │ └── main.yml │ ├── check_docker │ │ └── tasks │ │ │ └── main.yml │ ├── check_docker_setup │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── docker-storage-setup.j2 │ ├── check_docker_validation │ │ └── tasks │ │ │ └── main.yml │ ├── check_firewall │ │ └── tasks │ │ │ └── main.yml │ ├── check_firewall_initialize │ │ └── tasks │ │ │ └── main.yml │ ├── check_glusterfs │ │ └── tasks │ │ │ └── main.yml │ ├── check_hostname │ │ └── tasks │ │ │ └── main.yml │ ├── check_networking │ │ └── tasks │ │ │ └── main.yml │ ├── check_nm │ │ └── tasks │ │ │ └── main.yml │ ├── check_ntp │ │ ├── handlers │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ ├── chrony.j2 │ │ │ └── ntp.j2 │ ├── check_os │ │ └── tasks │ │ │ └── main.yml │ ├── check_packages_bastion │ │ └── tasks │ │ │ └── main.yml │ ├── check_packages_nodes │ │ └── tasks │ │ │ └── main.yml │ ├── check_proxy │ │ └── tasks │ │ │ └── main.yml │ ├── check_selinux │ │ └── tasks │ │ │ └── main.yml │ ├── check_sizing │ │ └── tasks │ │ │ └── main.yml │ ├── check_storage │ │ └── tasks │ │ │ └── main.yml │ ├── check_subscription │ │ └── tasks │ │ │ └── main.yml │ └── print │ │ └── tasks │ │ └── main.yml ├── subscription-register.yml ├── subscription-unregister.yml ├── templates │ ├── hosts-v3.10.j2 │ └── hosts-v3.11.j2 ├── validate.yml ├── validate_config.yml └── vars │ ├── bb4.yml │ └── bb6.yml └── setup.sh /.gitignore: -------------------------------------------------------------------------------- 1 | playbooks/subscription-register-private.yml 2 | playbooks/*.entry 3 | env.yml 4 | secret.yml 5 | inventory 6 | ip 7 | kcli-parameters.yml 8 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "[]" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright [yyyy] [name of copyright owner] 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | -------------------------------------------------------------------------------- /README.adoc: -------------------------------------------------------------------------------- 1 | = Project STC 2 | 3 | :Author: Tero Ahonen 4 | :Email: tahonen@redhat.com 5 | :Date: 20.08.2018 6 | 7 | :toc: macro 8 | 9 | toc::[] 10 | 11 | == Purpose 12 | Project STC is the effort driven by the Red Hat EMEA Openshift Specialist 13 | Solution Architects team to create reusable assets for the deployment of 14 | Red Hat Openshift environments for 15 | 16 | * Demos 17 | * Proof of Technologies 18 | * Proof of Concepts 19 | * non Productive Openshift environments 20 | 21 | The leading idea behind STC is to start with a *Minimal Viable Setup* (MVS) consisting 22 | of: 23 | 24 | * 1 Bastion 25 | * 3 Master 26 | * 3 Nodes 27 | 28 | deployed with standard Red Hat Openshift capabilities. The topology is now dynamic in terms of 3 possible STC *Flavor* to be used for the setup (standard, mini and full) 29 | 30 | MVS includes following services: 31 | 32 | * Container registry 33 | * Routing layer 34 | * Prometheus 35 | * Ansible and Template service brokers 36 | * Hawkular based metrics 37 | * EFK stack log aggregation for apps 38 | * Openshift Containers Storage (OCS) 39 | * Grafana 40 | 41 | === Supported OCP Versions 42 | 43 | [cols="1,1",options="header"] 44 | |======= 45 | |Version |Description 46 | |3.11 | STC default OCP version 47 | |3.10 | OCP previous version 48 | |======= 49 | 50 | For a complete list of technical pre-requisites, please check https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/getstarted.adoc[this document]. 51 | 52 | Additional capabilities and features can then be added to this MVS, based on the 53 | scenarios which need to be demoed or verified. 54 | 55 | The following image depicts the System Architecture of the MVS 56 | 57 | image::docs/images/stc_bb1.png[] 58 | 59 | == STC? 60 | STC is an abbreviation from the tabletop game https://en.wikipedia.org/wiki/Warhammer_40,000[Warhammer 40K] 61 | and stands for *Standard Template Construct* 62 | 63 | In the context of this repository, it represents a predefined set of 64 | *Building Blocks*, which can be used to achieve predefined setups and use-cases. 65 | 66 | == Building blocks (BB) 67 | Each *Building Block* represents a certain capability, which can be used either 68 | singular, in combination or dependency of another *Building Block*. 69 | 70 | Please check the following list of existing and planned *Building Blocks* 71 | 72 | * https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb0.adoc[BB0 - Validate and prepare infrastructure] 73 | * https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb1.adoc[BB1 - Install Minimal Viable Product] 74 | * https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb4.adoc[BB4 - Disconnected Installation with Docker Distribution] 75 | 76 | An archive of BB not yet tested nor updated for newest versions is available inside the `archive` directory 77 | 78 | == Get started 79 | Check prerequirements for executing STC from https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/getstarted.adoc[here]. 80 | 81 | Once prerequirements are in place start validating your environment with https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb0.adoc[BB0] 82 | 83 | == Want to contribute? 84 | 85 | Check basic information about contributing building blocks from Check basic information about contributing building blocks from https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/contributing.adoc[Contributing to STC] 86 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | log_path = ansible.log 3 | forks = 10 4 | callback_whitelist = profile_tasks 5 | host_key_checking = False -------------------------------------------------------------------------------- /docs/.gitkeep: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/.gitkeep -------------------------------------------------------------------------------- /docs/bb0.adoc: -------------------------------------------------------------------------------- 1 | == Building Block 0 - Validate and prepare infrastructure 2 | This *Building Block* is targeted to verify a given environment 3 | before attempting to install Red Hat OpenShift. 4 | 5 | While running this *Building Block* the following technical prerequisites will be verified 6 | 7 | * each system having at least 4 CPU-Cores ( physical or virtual ) 8 | * each system having at least 16GB RAM 9 | * each system having at least 20GB of Disk Storage under / 10 | * each system having at least 40GB of Disk Storage under /var 11 | * each system having at least 15GB of Disk Storage under empty device for container storage (e.g. /dev/vdb) 12 | * if used, each system that need to host OCS having at least 100GB of raw disk (e.g. /dev/vdc) 13 | 14 | Hardware requirements are based on official https://docs.openshift.com/container-platform/latest/install/prerequisites.html#hardware[OCP documentation] 15 | 16 | === Prerequisites for this Building Block 17 | To be able to run this *Building Block* we recommend to prepare the 18 | following systems for the default *Flavor* (Standard): 19 | 20 | * 1 RHEL Server as *Bastion* incl. HAProxy Load Balancer 21 | * 3 RHEL Servers as *Red Hat Openshift Master* incl. Infrastructure Containers 22 | * 3 RHEL Servers as *Red Hat Openshift Nodes* incl. OCS Containers 23 | 24 | These systems can be bare metal or virtual server running on any hypervisor. 25 | 26 | Topology can be modified in terms of Flavors: 27 | 28 | * *Standard*: default flavor, 7 nodes 29 | ** 1 Bastion 30 | ** 3 Masters 31 | ** 3 Nodes: contains OCS 32 | * *Mini*: 4 nodes 33 | ** 1 Bastion 34 | ** 1 Master: OCS node 1 35 | ** 1 Infranode: OCS node 2 36 | ** 1 Node: OCS node 3 37 | * *Full*: 10 nodes 38 | ** 1 Bastion 39 | ** 3 Masters 40 | ** 3 Infranodes: contains OCS 41 | ** 3 Nodes 42 | 43 | 44 | NOTE: Please be aware that currently Red Hat Openshift expects a minimum of 45 | 4 CPU Cores and 16GB of RAM per node. 46 | 47 | === Steps to Perform for this Building Block 48 | 49 | ==== Download Scripts and Playbooks 50 | With the YAML Configuration file ready, you will now have to download the 51 | scripts and Ansible Playbooks. To do so, please run the following commands 52 | on your *Bastion* server as privileged user: 53 | 54 | ``` 55 | $ sudo -i 56 | # curl -o stc.tgz -L https://github.com/RedHat-EMEA-SSA-Team/stc/archive/latest.tar.gz 57 | # tar xvf stc.tgz --strip 1 58 | # chmod +x setup.sh 59 | ``` 60 | 61 | ==== Prepare Configuration File 62 | As a first step, you need to create a configuration YAML file named `env.yml`, describing your 63 | environment. Based on this file, this *Building Block* will perform a set of 64 | verification steps. 65 | 66 | To make the creation of this configuration YAML file as easy as possible, 67 | we provided a simple script at STC Installer for you: 68 | 69 | Please fill in the following information into the script when prompted, default value are inside [] 70 | 71 | 72 | * Hardware Requirements: If your infrastructure respect hardware requirements 73 | * OCP Version: Select OpenShift version, default is 3.10 74 | * Cluster hostname (API DNS): Hostname under which your Red Hat Openshift cluster 75 | will be accessible after installation 76 | Wilcard DNS for Apps: DNS-Record as entered in your DNS Server to catch all future applications 77 | running on Red Hat Openshift 78 | * *Cluster Topology*: Select STC flavor, standard (default), mini or full 79 | * *Bastion hostname*: The hostname of jumphost to run validation and installation 80 | * *Master hostname*: 1 or 3, depends on flavor 81 | * *Infranode hostname*: 1 or 3, available only for mini or full flavor 82 | * *Nodes hostname*: 1 or 3, depends on flavor 83 | * *Proxy support*: Enable proxy access for OpenShift services, Docker, Git and environments configuration 84 | * *Internet proxy HTTP*: Hostname and Port of your HTTP 85 | for outside connectivity. 86 | * *Internet proxy HTTPS*: Hostname and Port of your HTTP Proxy, if one is needed 87 | for outside connectivity. 88 | * *No proxy*: Select host to skip for proxing (you can skip localhost and .svc as they will be automatically added) 89 | * *Proxy username*: Username in case you need to authenticate with the Proxies 90 | defined in the previous fields. 91 | * *Proxy password*: Password in case you need to authenticate with the Proxies 92 | defined in the previous fields. 93 | * *Remote SSH user*: Remote user which is used to access all hosts used 94 | for Openshift. Must have sudo access on hosts. Defaults to root. 95 | * *How do you manage your subscription and repositories*: Select RHSM for direct access to Red Hat CDN for 96 | Subscription Management or select Satellite if present 97 | * *RHSM Username*: Select your RHSM Username, password will be asked and stored in Ansible Vault later 98 | * *Satellite Org ID and Activation Key*: Provide an Org ID and an Acivation Key which containers OpenShift required repositories through a properly configured Content View 99 | 100 | The content of this YAML will be generated in the stc directory and used by the installer, if you agreed to proceed with `y` option, the validation will start: 101 | 102 | ``` 103 | Generated configuration: 104 | 105 | ********************* STC Conf file ********************* 106 | ocp_version: 3.11 107 | api_dns: openshift.example.com 108 | apps_dns: apps.example.com 109 | bastion: bastion.example.com 110 | lb: bastion.example,com 111 | masters: 112 | - master01.example.com 113 | - master02.example.com 114 | - master03.example.com 115 | nodes: 116 | - node01.example.com 117 | - node02.example.com 118 | - node03.example.com 119 | proxy_http: http://proxy.example.com:3128 120 | proxy_https: http://proxy.example.com:3128 121 | proxy_no: proxy.example.com 122 | cns: 123 | - master01.example.com 124 | - master02.example.com 125 | - master03.example.com 126 | ssh_user: cloud-user 127 | subscription_activationkey: ocp39 128 | subscription_org_id: RedHat 129 | ****************** End STC Conf file ******************** 130 | 131 | Do you want to proceed? 132 | y n 133 | 134 | ``` 135 | 136 | 137 | 138 | Other Example with standard STC topology (nodes contains router and registry): 139 | 140 | ``` 141 | ocp_version: 3.11 142 | lb: bastion 143 | bastion: bastion 144 | masters: 145 | - master01 146 | - master02 147 | - master03 148 | nodes: 149 | - node01 150 | - node02 151 | - node03 152 | ssh_user: cloud-user 153 | apps_dns: apps.your-ip.nip.io 154 | api_dns: master.your-ip.nip.io 155 | rhn_username: username 156 | ``` 157 | 158 | Example with smaller topology and infranodes, with version 3.10 159 | 160 | ``` 161 | ocp_version: 3.10 162 | bastion: bastion 163 | masters: 164 | - master01 165 | infranodes: 166 | - infranode01 167 | nodes: 168 | - node01 169 | ssh_user: cloud-user 170 | proxy_http: 'http://proxy.company.local:3128' 171 | proxy_https: 'http://proxy.company.local:3128' 172 | proxy_no: 'satellite.company.local' 173 | apps_dns: apps.company.local 174 | api_dns: master01.company.local 175 | rhn_username: username 176 | ``` 177 | 178 | 179 | ==== Setup bastion host and validate configuration 180 | In this step, we will be using a script to 181 | 182 | * prepare the *Bastion* system 183 | * verify the correctness of the created YAML Configuration file 184 | 185 | To do so, please run the following command on your *Bastion* server as `root` or as sudoers user. 186 | 187 | ``` 188 | ./setup.sh 189 | ``` 190 | 191 | The script will ask you: 192 | 193 | * Which version of OpenShift to prepare for prerequisites and verify, defaults to 3.10 194 | * Which type of Subscription management to use in order to register hosts, default is RHSM (need access to Red Hat CDN), and also Satellite giving an Organization ID and an Activation Key 195 | 196 | After this it will start registering Bastion host and start the validation across nodes, preparing an inventory file to be used to install OCP later on. 197 | 198 | ``` 199 | ____ _____ ____ 200 | / ___|_ _/ ___| 201 | \___ \ | || | 202 | ___) || || |___ 203 | |____/ |_| \____| 204 | 205 | 206 | 207 | Welcome to STC OpenShift Installation Validator 208 | Defaults value are shown in [] 209 | 210 | Please select OCP Version to install: 3.11, 3.10 211 | [3.11] 3.10 212 | 213 | *** selected 3.11 214 | 215 | Please insert Cluster hostname (API DNS): 216 | openshift.example.com 217 | Please insert Wilcard DNS for Apps: 218 | apps.example.com 219 | 220 | Cluster Topology Setup 221 | 222 | Please select STC Flavor 223 | [standard] mini full 224 | 225 | Selected standard Flavor 226 | 227 | Please insert Bastion Node hostname: 228 | bastion.example.com 229 | 230 | Please insert Master 1 hostname: 231 | master01.example.com 232 | Please insert Master 2 hostname: 233 | master02.example.com 234 | Please insert Master 3 hostname: 235 | master03.example.com 236 | 237 | 238 | Please insert Node 1 hostname: 239 | node01.example.com 240 | Please insert Node 2 hostname: 241 | node02.example.com 242 | Please insert Node 3 hostname: 243 | node03.example.com 244 | 245 | Is there any Proxy to use for OpenShift and Container Runtime? 246 | y [n] 247 | y 248 | Please insert HTTP Proxy: 249 | http://proxy.example.com:3128 250 | Please insert HTTPS Proxy: 251 | http://proxy.example.com:3128 252 | Please insert No Proxy (leave blank if any, automatically adding localhost,127.0.0.1,.svc) 253 | proxy.example.com 254 | Please insert Proxy Username (leave blank if any) 255 | 256 | Please insert Proxy Password (leave blank if any) 257 | 258 | 259 | Please insert SSH username to be used by Ansible: 260 | cloud-user 261 | Please select Subscription management: RHSM or Satellite 262 | [rhsm] satellite 263 | satellite 264 | *** registering host to Satellite 265 | Please insert Organization ID: 266 | RedHat 267 | 268 | Please insert Activation Key: 269 | ocp39 270 | 271 | 272 | Generated configuration: 273 | 274 | ********************* STC Conf file ********************* 275 | ocp_version: 3.11 276 | api_dns: openshift.example.com 277 | apps_dns: apps.example.com 278 | bastion: bastion.example.com 279 | lb: bastion.example,com 280 | masters: 281 | - master01.example.com 282 | - master02.example.com 283 | - master03.example.com 284 | nodes: 285 | - node01.example.com 286 | - node02.example.com 287 | - node03.example.com 288 | proxy_http: http://proxy.example.com:3128 289 | proxy_https: http://proxy.example.com:3128 290 | proxy_no: proxy.example.com 291 | cns: 292 | - node01.example.com 293 | - node02.example.com 294 | - node03.example.com 295 | ssh_user: cloud-user 296 | subscription_activationkey: ocp39 297 | subscription_org_id: RedHat 298 | ****************** End STC Conf file ******************** 299 | 300 | Do you want to proceed? 301 | y n 302 | 303 | 304 | PLAY RECAP ********************************************************************* 305 | localhost : ok=1 changed=0 unreachable=0 failed=0 306 | ``` 307 | 308 | You will also be asked to provide a password to `ssh` into the 7 systems and for 309 | a password, which will be used to encrypt all given passwords during installation 310 | and later steps. 311 | 312 | ==== Test Ansible inventory and public key authentication 313 | To verify that our previous steps worked and that the public keys have all been 314 | successfully transfered to the 7 systems, please run the following 315 | 316 | ``` 317 | ansible -i inventory all -m ping 318 | ``` 319 | 320 | you should get the following output 321 | 322 | ``` 323 | master01.example.com | SUCCESS => { 324 | "changed": false, 325 | "ping": "pong" 326 | } 327 | master02.example.com | SUCCESS => { 328 | "changed": false, 329 | "ping": "pong" 330 | } 331 | master03.example.com | SUCCESS => { 332 | "changed": false, 333 | "ping": "pong" 334 | } 335 | node01.example.com | SUCCESS => { 336 | "changed": false, 337 | "ping": "pong" 338 | } 339 | node02.example.com | SUCCESS => { 340 | "changed": false, 341 | "ping": "pong" 342 | } 343 | node03.example.com | SUCCESS => { 344 | "changed": false, 345 | "ping": "pong" 346 | } 347 | bastion.example.com | SUCCESS => { 348 | "changed": false, 349 | "ping": "pong" 350 | } 351 | [root@localhost ocppoc]# 352 | ``` 353 | 354 | ==== Validate nodes and external connections for OCP 355 | In the final step, we will run the real validation. 356 | To do so, please run 357 | 358 | ``` 359 | ansible-playbook -i inventory --ask-vault-pass playbooks/validate.yml 360 | ``` 361 | 362 | If all steps perform without raising an error, then you are ready to 363 | proceed and install Red Hat Openshift. 364 | 365 | ``` 366 | PLAY [Validate environment] **************************************************** 367 | 368 | PLAY [Verify subcription and subscribe nodes] ********************************** 369 | 370 | TASK [Check Red Hat subscription] ********************************************** 371 | < output removed > 372 | 373 | TASK [Disable all repos] ******************************************************* 374 | < output removed > 375 | 376 | TASK [Enable correct repos] **************************************************** 377 | < output removed > 378 | 379 | PLAY [Check supported Operating Systems] *************************************** 380 | 381 | TASK [Gathering Facts] ********************************************************* 382 | < output removed > 383 | 384 | TASK [assert] ****************************************************************** 385 | < output removed > 386 | 387 | PLAY [Check connectivity to whitelisted hosts] ********************************* 388 | 389 | TASK [Ping proxy whitelisted sites] ******************************************** 390 | < output removed > 391 | 392 | TASK [Check download speed] **************************************************** 393 | < output removed > 394 | 395 | TASK [set_fact] **************************************************************** 396 | < output removed > 397 | 398 | TASK [debug] ******************************************************************* 399 | < output removed > 400 | 401 | TASK [Ensude nc is installed] ************************************************** 402 | < output removed > 403 | 404 | TASK [Start nc -l to all valid ports] ****************************************** 405 | < output removed > 406 | 407 | PLAY [Check all ports from bastion] ******************************************** 408 | 409 | TASK [Check that all needed ports are open] ************************************ 410 | < output removed > 411 | 412 | TASK [Ensure nc absent] ******************************************************** 413 | < output removed > 414 | 415 | PLAY [Vadiate that selinux is in place] **************************************** 416 | 417 | TASK [check if selinux is running and enforced] ******************************** 418 | < output removed > 419 | 420 | PLAY [Identify the space available in] ***************************************** 421 | 422 | TASK [command] ***************************************************************** 423 | < output removed > 424 | 425 | TASK [Set root disk facts] ***************************************************** 426 | < output removed > 427 | 428 | TASK [Fail if there is not enough space available in /] ************************ 429 | < output removed > 430 | 431 | PLAY [Check if Network Manager is running] ************************************* 432 | 433 | TASK [Ensure that NetworkManager is running] *********************************** 434 | < output removed > 435 | 436 | TASK [Report status of Network Manager] **************************************** 437 | < output removed > 438 | 439 | PLAY [Prepare install and validate docker] ************************************* 440 | 441 | TASK [Gathering Facts] ********************************************************* 442 | < output removed > 443 | 444 | TASK [docker_setup : setup] **************************************************** 445 | < output removed > 446 | 447 | TASK [docker_setup : Figure out device reserved for docker] ******************** 448 | < output removed > 449 | 450 | TASK [docker_setup : set_fact] ************************************************* 451 | < output removed > 452 | 453 | TASK [docker_setup : Ensure docker installed] ********************************** 454 | < output removed > 455 | 456 | TASK [docker_setup : Detect Docker storage configuration status] *************** 457 | < output removed > 458 | 459 | TASK [docker_setup : Create docker storage configuration] ********************** 460 | < output removed > 461 | 462 | TASK [docker_setup : Apply Docker storage configuration changes] *************** 463 | < output removed > 464 | 465 | TASK [docker_setup : Fail if Docker version is < 1.12] ************************* 466 | < output removed > 467 | 468 | TASK [docker_setup : Enable docker] ******************************************** 469 | < output removed > 470 | 471 | TASK [docker_setup : Start docker] ********************************************* 472 | < output removed > 473 | 474 | TASK [docker_validation : Pull some basic docker images] *********************** 475 | < output removed > 476 | 477 | PLAY RECAP ********************************************************************* 478 | bastion : ok=8 changed=0 unreachable=0 failed=0 479 | localhost : ok=1 changed=0 unreachable=0 failed=0 480 | master01 : ok=28 changed=8 unreachable=0 failed=0 481 | master02 : ok=28 changed=8 unreachable=0 failed=0 482 | master03 : ok=28 changed=8 unreachable=0 failed=0 483 | node01 : ok=28 changed=8 unreachable=0 failed=0 484 | node02 : ok=28 changed=8 unreachable=0 failed=0 485 | node03 : ok=28 changed=8 unreachable=0 failed=0 486 | 487 | ``` 488 | 489 | 490 | === Hacking Building Block Flavors === 491 | 492 | It would be still possible to use a free topology schema editing the `env.yml` for changes such as more nodes or external load balancer 493 | 494 | === OpenStack provisioner === 495 | 496 | How to use the OpenStack provisioner for openstack infrastructure provisioning. If you need more flexibilty please consider: https://access.redhat.com/documentation/en-us/openshift_container_platform/3.11/html/configuring_clusters/install-config-configuring-openstack[OCP documentation] 497 | 498 | *Just-for-informations* if you like to actived the openstack cloud provider: A cloud provide change the nodename to the name of the instance. It means it is strongly recommanded that instance name on openstack, openshift nodename and dns name is the same and use FQDN! 499 | 500 | [NOTE] 501 | ==== 502 | Please take care that the quotas are set correctly, for example:```openstack quota set --secgroups 100 --volumes 100 --ram 62520 --cores 50 admin``` 503 | ==== 504 | 505 | ``` 506 | docker run -ti $(pwd):/work:z quay.io/redhat/stc-openstack-provisioner 507 | 508 | export OS_USERNAME=admin 509 | export OS_PASSWORD=xxxx 510 | export OS_AUTH_URL=xxxx 511 | export OS_PROJECT_NAME=admin 512 | export OS_USER_DOMAIN_NAME=Default 513 | export OS_PROJECT_DOMAIN_NAME=Default 514 | export OS_IDENTITY_API_VERSION=3 515 | 516 | export STC_RHN_PASSWORD=xxxx 517 | export STC_RHN_USERNAME=xxx 518 | export STC_SUBSCRIPTION_POOL_ID=xx 519 | export STC_REGISTRY_TOKEN_USER=xxx 520 | export STC_REGISTRY_TOKEN=xxxx 521 | export STC_FLAVOR=mini # Only supported flavor at the moment is mini 522 | export STC_IAAS_MACHINE_SIZE=m1.xlarge # m1.xlarge is Default 523 | export STC_IAAS_CONTAINER_STORAGE_DISK=15 # Size in GB of container image storage, default 15. 524 | export STC_IAAS_GLUSTERFS_DISK=100 # Size in GB of gluster disk, default 100. 525 | export STC_IAAS_INTERNAL_NETWORK=admin # Network name where the VMs are added, default admin. 526 | 527 | # Please upload an RHEL Image to your openstack env. 528 | # openstack image create --public --disk-format qcow2 --file rhel-server-7.6-update-4-x86_64-kvm.qcow2 rhel-server-7.6-update-4-x86_64 529 | export STC_IAAS_IMAGE="RHEL IMAGE NAME" # Default rhel-server-7.6-x86_64-kvm 530 | 531 | cd /work 532 | ./playbooks/bb00-openstack_provisioning.yml 533 | ``` -------------------------------------------------------------------------------- /docs/bb1.adoc: -------------------------------------------------------------------------------- 1 | == Building Block 1 - Install Minimal Viable Product 2 | 3 | === Prerequisites for this Building Block 4 | To be able to run this *Building Block* you will have to successfully performed 5 | https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb0.adoc[Building Block 0] 6 | 7 | === Steps to Perform for this Building Block 8 | Please login to your *bastion* server and run the following commands 9 | 10 | ``` 11 | # export WORK_DIR=/root/stc 12 | $ cd $WORK_DIR 13 | $ ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/prerequisites.yml 14 | 15 | ``` 16 | 17 | Once your output ends with 18 | 19 | ``` 20 | bastion : ok=117 changed=3 unreachable=0 failed=0 21 | localhost : ok=14 changed=0 unreachable=0 failed=0 22 | master01 : ok=776 changed=111 unreachable=0 failed=0 23 | master02 : ok=446 changed=48 unreachable=0 failed=0 24 | master03 : ok=446 changed=48 unreachable=0 failed=0 25 | node01 : ok=228 changed=18 unreachable=0 failed=0 26 | node02 : ok=230 changed=28 unreachable=0 failed=0 27 | node03 : ok=230 changed=28 unreachable=0 failed=0 28 | ``` 29 | 30 | ``` 31 | $ cd $WORK_DIR 32 | $ ansible-playbook /usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml 33 | ``` 34 | 35 | you should have a running Red Hat OpenShift Container Platform cluster. 36 | 37 | === Post Installation Tasks 38 | ==== Create Admin-User 39 | If you want to change the password for `admin` user, which you should do, please 40 | run the following commands on your *bastion* server 41 | 42 | ``` 43 | [cloud-user@bastion ~]$ ansible -i inventory masters -m command -a 'htpasswd -b /etc/origin/master/htpasswd admin change12_me' 44 | master01 | SUCCESS | rc=0 >> 45 | Adding password for user admin 46 | 47 | master03 | SUCCESS | rc=0 >> 48 | Adding password for user admin 49 | 50 | master02 | SUCCESS | rc=0 >> 51 | Adding password for user admin 52 | 53 | [cloud-user@bastion ~]$ ansible masters -l master01 -m command -a 'oc adm policy add-cluster-role-to-user cluster-admin admin' 54 | master01 | SUCCESS | rc=0 >> 55 | cluster role "cluster-admin" added: "admin" 56 | ``` 57 | 58 | This will 59 | 60 | * change the way the user `admin` is authorized to `htpasswd` 61 | * will give the user `admin` the password `change12_me`, which you can obviously 62 | change to your liking 63 | * will make user `admin` to a cluster-admin 64 | -------------------------------------------------------------------------------- /docs/bb3.adoc: -------------------------------------------------------------------------------- 1 | == Building Block 3 - Install CFME on Openshift 2 | This *Building Block* is targeted to install Cloudforms Management Engine (CFME) on Openshift. 3 | 4 | WIP 5 | -------------------------------------------------------------------------------- /docs/bb4.adoc: -------------------------------------------------------------------------------- 1 | == Building Block 4 - Disconnected Installation with Docker Distribution 2 | This *Building Block* is targeted to support Disconnected Installation. 3 | 4 | It installs a Docker Registry in the bastion to sync and proxy all requires Container images for 5 | OCP installation. 6 | 7 | === Prerequisites for this Building Block 8 | * BB0 with Satellite based subscription management (RPMS) 9 | === Steps to Perform for this Building Block 10 | 11 | Change registry hostname and port in 12 | 13 | ``` 14 | playbooks/vars/bb4.yml 15 | ``` 16 | 17 | And the run it 18 | 19 | ``` 20 | ansible-playbook playbooks/bb4/disconnected.yml 21 | ``` 22 | -------------------------------------------------------------------------------- /docs/bb_template.adoc: -------------------------------------------------------------------------------- 1 | == Building Block 0 - Validate and prepare infrastructure 2 | This *Building Block* is targeted to ... 3 | 4 | === Prerequisites for this Building Block 5 | 6 | === Steps to Perform for this Building Block 7 | -------------------------------------------------------------------------------- /docs/contributing.adoc: -------------------------------------------------------------------------------- 1 | == Contributing content to STC 2 | 3 | If you 4 | 5 | * have idea about useful *Building Blocks* 6 | * improvements on the documentation 7 | * bugfixes ( yes we sometimes do have bugs ... ) 8 | 9 | please contribute. 10 | 11 | === Some basic guidelines 12 | 13 | Directory structure is like this: 14 | 15 | ``` 16 | . 17 | ├── README.adoc 18 | ├── docs 19 | │   ├── bb_template.adoc 20 | │   ├── bb0.adoc 21 | │   ├── bb1.adoc 22 | │   ├── bb2.adoc 23 | │   └── ... 24 | ├── playbooks 25 | │   ├── bb1 26 | │   │ ├── bb1.yml 27 | │   │ └── roles 28 | │   │ └── ... 29 | │   ├── bb2 30 | │   │ └── ... 31 | │   ├── group_vars 32 | │   ├── vars 33 | │   │ ├── bb1.yml 34 | │   │ ├── bb2.yml 35 | │   │ └── ... 36 | └── ... 37 | ... 38 | ``` 39 | 40 | [square] 41 | * Store your building block plays under `playbooks/bbX` 42 | * Store your playbook vars in `playbooks/vars/bbX.yml` file 43 | * Create documentation about building block in file `playbooks/docs/bbX.adoc` 44 | * Use https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb_template.adoc[BB doc template] as template for your documentation. 45 | * Keep playbook simple. Better to make multiple small ones than one big one. 46 | 47 | 48 | === Working with vars 49 | 50 | Default vars are stored in `playbooks/group_vars/all` file and will be included automatically to your plays. 51 | 52 | If you need to use data that is collected during installation, use env.yml. Add the following to your playbook: 53 | 54 | ``` 55 | ... 56 | vars_files: 57 | - "{{file_env}}" 58 | ... 59 | ``` 60 | -------------------------------------------------------------------------------- /docs/getstarted.adoc: -------------------------------------------------------------------------------- 1 | == Gettings started with STC 2 | 3 | To get started you will need two main things 4 | 5 | [square] 6 | * At least 4 VMs / hosts 7 | * RedHat Openshift Container Platform subscription 8 | 9 | === Hardware requirements 10 | 11 | These are minimal hardware requirements 12 | 13 | [square] 14 | * 4 vCPU 15 | * 16 GB memory 16 | * storage 17 | ** device 1 (for OS) 18 | *** 20 Gb / 19 | *** 40 Gb /var 20 | ** device 2 (for container runtime storage) 21 | *** 40 Gb (no file system or mount needed) 22 | ** device 3 (for Openshift Container Storage) 23 | *** 100+ Gb (no file system or mount needed) 24 | 25 | All hosts should be identical 26 | 27 | === Network requirements 28 | * at least one network device and IP address 29 | * NetworkManager enabled 30 | * resolvable hostnames 31 | * Wildcard DNS A record for applications (*.apps.your.company.com) 32 | * DNS A record for master API (master.your.company.com) 33 | * hosts need to have access to each other (all ports or ports listed https://docs.openshift.com/container-platform/latest/install_config/install/prerequisites.html#required-ports[in here]) 34 | * Internet connection, direct or thru proxy 35 | * at least one of the hosts must able to get SSL connection to all others hosts 36 | * if firewall rules are active (e.g. on the bastion hosts), the rules must be maintained by iptables, not firewalld 37 | 38 | === Operating system requirements 39 | * RHEL 7.4+ minimal (OS will be updated during validation phase) 40 | * root or sudo access 41 | * Subscriptions are allocated to systems (machines). 42 | ** Incase it is not, we provide an helper playbook: 43 | + 44 | ---- 45 | ./setup.sh 46 | cp playbooks/subscription-register.yml playbooks/subscription-register-private.yml 47 | 48 | $EDITOR playbooks/subscription-register-private.yml <1> 49 | ansible-playbook -i inventory playbooks/subscription-register-private.yml 50 | ---- 51 | <1> Adjust playbook - add RHSN user, passwort or activation key & org and pool id 52 | 53 | * Repositories enabled (The `subscription-register*` playbook above add repositories to ) 54 | 55 | === Other useful info 56 | * Information about proxy; address, username, password, whitelists etc. 57 | * Red Hat Subscription Management (RHSM) account info or Satellite activation keys for registering and subscribing hosts 58 | -------------------------------------------------------------------------------- /docs/images/add_data_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/add_data_source.png -------------------------------------------------------------------------------- /docs/images/config_data_source.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/config_data_source.png -------------------------------------------------------------------------------- /docs/images/copy_to_clipboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/copy_to_clipboard.png -------------------------------------------------------------------------------- /docs/images/create_env.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/create_env.png -------------------------------------------------------------------------------- /docs/images/create_mbaas_target.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/create_mbaas_target.png -------------------------------------------------------------------------------- /docs/images/datasource_success.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/datasource_success.png -------------------------------------------------------------------------------- /docs/images/env_values.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/env_values.png -------------------------------------------------------------------------------- /docs/images/grafana_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/grafana_dashboard.png -------------------------------------------------------------------------------- /docs/images/grafana_dashboard_import.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/grafana_dashboard_import.png -------------------------------------------------------------------------------- /docs/images/grafana_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/grafana_login.png -------------------------------------------------------------------------------- /docs/images/import_dashboard.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/import_dashboard.png -------------------------------------------------------------------------------- /docs/images/import_dashboard_2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/import_dashboard_2.png -------------------------------------------------------------------------------- /docs/images/login_ocp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/login_ocp.png -------------------------------------------------------------------------------- /docs/images/mbaas_health.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/mbaas_health.png -------------------------------------------------------------------------------- /docs/images/mbaas_key.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/mbaas_key.png -------------------------------------------------------------------------------- /docs/images/mbaas_route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/mbaas_route.png -------------------------------------------------------------------------------- /docs/images/mbaas_sample.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/mbaas_sample.png -------------------------------------------------------------------------------- /docs/images/open_grafana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/open_grafana.png -------------------------------------------------------------------------------- /docs/images/rhmap_admin.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/rhmap_admin.png -------------------------------------------------------------------------------- /docs/images/rhmap_login.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/rhmap_login.png -------------------------------------------------------------------------------- /docs/images/rhmap_route.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/rhmap_route.png -------------------------------------------------------------------------------- /docs/images/select_mbaas.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/select_mbaas.png -------------------------------------------------------------------------------- /docs/images/select_millicore.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/select_millicore.png -------------------------------------------------------------------------------- /docs/images/stc_bb1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/stc_bb1.png -------------------------------------------------------------------------------- /docs/images/token.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/token.png -------------------------------------------------------------------------------- /docs/images/token_command_line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/RedHat-EMEA-SSA-Team/stc/9f9a64a6eec649c263cb9c74dbb432393c27174f/docs/images/token_command_line.png -------------------------------------------------------------------------------- /files/grafana/Kubernetes_cluster_monitoring_via.json: -------------------------------------------------------------------------------- 1 | { 2 | "__inputs": [ 3 | { 4 | "name": "DS_PROMETHEUS", 5 | "label": "Prometheus", 6 | "description": "", 7 | "type": "datasource", 8 | "pluginId": "prometheus", 9 | "pluginName": "Prometheus" 10 | } 11 | ], 12 | "__requires": [ 13 | { 14 | "type": "grafana", 15 | "id": "grafana", 16 | "name": "Grafana", 17 | "version": "4.6.1" 18 | }, 19 | { 20 | "type": "panel", 21 | "id": "graph", 22 | "name": "Graph", 23 | "version": "" 24 | }, 25 | { 26 | "type": "datasource", 27 | "id": "prometheus", 28 | "name": "Prometheus", 29 | "version": "1.0.0" 30 | }, 31 | { 32 | "type": "panel", 33 | "id": "singlestat", 34 | "name": "Singlestat", 35 | "version": "" 36 | } 37 | ], 38 | "annotations": { 39 | "list": [ 40 | { 41 | "builtIn": 1, 42 | "datasource": "-- Grafana --", 43 | "enable": true, 44 | "hide": true, 45 | "iconColor": "rgba(0, 211, 255, 1)", 46 | "name": "Annotations & Alerts", 47 | "type": "dashboard" 48 | } 49 | ] 50 | }, 51 | "description": "Monitor a Kubernetes cluster using Prometheus TSDB. Shows overall cluster CPU / Memory / Disk usage as well as individual pod statistics. ", 52 | "editable": true, 53 | "gnetId": 162, 54 | "graphTooltip": 1, 55 | "hideControls": false, 56 | "id": null, 57 | "links": [], 58 | "refresh": "1m", 59 | "rows": [ 60 | { 61 | "collapse": false, 62 | "height": "250px", 63 | "panels": [ 64 | { 65 | "cacheTimeout": null, 66 | "colorBackground": false, 67 | "colorValue": false, 68 | "colors": [ 69 | "rgba(50, 172, 45, 0.97)", 70 | "rgba(237, 129, 40, 0.89)", 71 | "rgba(245, 54, 54, 0.9)" 72 | ], 73 | "datasource": "${DS_PROMETHEUS}", 74 | "editable": true, 75 | "error": false, 76 | "format": "percent", 77 | "gauge": { 78 | "maxValue": 100, 79 | "minValue": 0, 80 | "show": true, 81 | "thresholdLabels": false, 82 | "thresholdMarkers": true 83 | }, 84 | "id": 4, 85 | "interval": null, 86 | "links": [], 87 | "mappingType": 1, 88 | "mappingTypes": [ 89 | { 90 | "name": "value to text", 91 | "value": 1 92 | }, 93 | { 94 | "name": "range to text", 95 | "value": 2 96 | } 97 | ], 98 | "maxDataPoints": 100, 99 | "nullPointMode": "connected", 100 | "nullText": null, 101 | "postfix": "", 102 | "postfixFontSize": "50%", 103 | "prefix": "", 104 | "prefixFontSize": "50%", 105 | "rangeMaps": [ 106 | { 107 | "from": "null", 108 | "text": "N/A", 109 | "to": "null" 110 | } 111 | ], 112 | "span": 3, 113 | "sparkline": { 114 | "fillColor": "rgba(31, 118, 189, 0.18)", 115 | "full": false, 116 | "lineColor": "rgb(31, 120, 193)", 117 | "show": false 118 | }, 119 | "tableColumn": "", 120 | "targets": [ 121 | { 122 | "expr": "(sum(node_memory_MemTotal) - sum(node_memory_MemAvailable) ) / sum(node_memory_MemTotal) * 100", 123 | "format": "time_series", 124 | "interval": "10s", 125 | "intervalFactor": 1, 126 | "refId": "A", 127 | "step": 300 128 | } 129 | ], 130 | "thresholds": "65, 90", 131 | "title": "Cluster memory usage", 132 | "type": "singlestat", 133 | "valueFontSize": "80%", 134 | "valueMaps": [ 135 | { 136 | "op": "=", 137 | "text": "N/A", 138 | "value": "null" 139 | } 140 | ], 141 | "valueName": "current" 142 | }, 143 | { 144 | "cacheTimeout": null, 145 | "colorBackground": false, 146 | "colorValue": false, 147 | "colors": [ 148 | "rgba(50, 172, 45, 0.97)", 149 | "rgba(237, 129, 40, 0.89)", 150 | "rgba(245, 54, 54, 0.9)" 151 | ], 152 | "datasource": "${DS_PROMETHEUS}", 153 | "editable": true, 154 | "error": false, 155 | "format": "percent", 156 | "gauge": { 157 | "maxValue": 100, 158 | "minValue": 0, 159 | "show": true, 160 | "thresholdLabels": false, 161 | "thresholdMarkers": true 162 | }, 163 | "id": 11, 164 | "interval": null, 165 | "links": [], 166 | "mappingType": 1, 167 | "mappingTypes": [ 168 | { 169 | "name": "value to text", 170 | "value": 1 171 | }, 172 | { 173 | "name": "range to text", 174 | "value": 2 175 | } 176 | ], 177 | "maxDataPoints": 100, 178 | "nullPointMode": "connected", 179 | "nullText": null, 180 | "postfix": "", 181 | "postfixFontSize": "50%", 182 | "prefix": "", 183 | "prefixFontSize": "50%", 184 | "rangeMaps": [ 185 | { 186 | "from": "null", 187 | "text": "N/A", 188 | "to": "null" 189 | } 190 | ], 191 | "span": 3, 192 | "sparkline": { 193 | "fillColor": "rgba(31, 118, 189, 0.18)", 194 | "full": false, 195 | "lineColor": "rgb(31, 120, 193)", 196 | "show": false 197 | }, 198 | "tableColumn": "", 199 | "targets": [ 200 | { 201 | "expr": "(sum(node_memory_MemTotal{purpose=\"app\"}) - sum(node_memory_MemAvailable{purpose=\"app\"}) ) / sum(node_memory_MemTotal{purpose=\"app\"}) * 100", 202 | "format": "time_series", 203 | "interval": "10s", 204 | "intervalFactor": 1, 205 | "refId": "A", 206 | "step": 300 207 | } 208 | ], 209 | "thresholds": "65, 90", 210 | "title": "App Node memory usage", 211 | "type": "singlestat", 212 | "valueFontSize": "80%", 213 | "valueMaps": [ 214 | { 215 | "op": "=", 216 | "text": "N/A", 217 | "value": "null" 218 | } 219 | ], 220 | "valueName": "current" 221 | }, 222 | { 223 | "cacheTimeout": null, 224 | "colorBackground": false, 225 | "colorValue": false, 226 | "colors": [ 227 | "rgba(50, 172, 45, 0.97)", 228 | "rgba(237, 129, 40, 0.89)", 229 | "rgba(245, 54, 54, 0.9)" 230 | ], 231 | "datasource": "${DS_PROMETHEUS}", 232 | "decimals": 2, 233 | "editable": true, 234 | "error": false, 235 | "format": "percent", 236 | "gauge": { 237 | "maxValue": 100, 238 | "minValue": 0, 239 | "show": true, 240 | "thresholdLabels": false, 241 | "thresholdMarkers": true 242 | }, 243 | "id": 6, 244 | "interval": null, 245 | "links": [], 246 | "mappingType": 1, 247 | "mappingTypes": [ 248 | { 249 | "name": "value to text", 250 | "value": 1 251 | }, 252 | { 253 | "name": "range to text", 254 | "value": 2 255 | } 256 | ], 257 | "maxDataPoints": 100, 258 | "nullPointMode": "connected", 259 | "nullText": null, 260 | "postfix": "", 261 | "postfixFontSize": "50%", 262 | "prefix": "", 263 | "prefixFontSize": "50%", 264 | "rangeMaps": [ 265 | { 266 | "from": "null", 267 | "text": "N/A", 268 | "to": "null" 269 | } 270 | ], 271 | "span": 3, 272 | "sparkline": { 273 | "fillColor": "rgba(31, 118, 189, 0.18)", 274 | "full": false, 275 | "lineColor": "rgb(31, 120, 193)", 276 | "show": false 277 | }, 278 | "tableColumn": "", 279 | "targets": [ 280 | { 281 | "expr": "sum(sum by (io_kubernetes_container_name)( rate(container_cpu_usage_seconds_total{image!=\"\"}[1m] ) )) / count(node_cpu{mode=\"system\"}) * 100", 282 | "interval": "10s", 283 | "intervalFactor": 1, 284 | "refId": "A", 285 | "step": 300 286 | } 287 | ], 288 | "thresholds": "65, 90", 289 | "title": "Cluster CPU usage", 290 | "type": "singlestat", 291 | "valueFontSize": "80%", 292 | "valueMaps": [ 293 | { 294 | "op": "=", 295 | "text": "N/A", 296 | "value": "null" 297 | } 298 | ], 299 | "valueName": "current" 300 | }, 301 | { 302 | "cacheTimeout": null, 303 | "colorBackground": false, 304 | "colorValue": false, 305 | "colors": [ 306 | "rgba(50, 172, 45, 0.97)", 307 | "rgba(237, 129, 40, 0.89)", 308 | "rgba(245, 54, 54, 0.9)" 309 | ], 310 | "datasource": "${DS_PROMETHEUS}", 311 | "decimals": 2, 312 | "editable": true, 313 | "error": false, 314 | "format": "percent", 315 | "gauge": { 316 | "maxValue": 100, 317 | "minValue": 0, 318 | "show": true, 319 | "thresholdLabels": false, 320 | "thresholdMarkers": true 321 | }, 322 | "id": 7, 323 | "interval": null, 324 | "links": [], 325 | "mappingType": 1, 326 | "mappingTypes": [ 327 | { 328 | "name": "value to text", 329 | "value": 1 330 | }, 331 | { 332 | "name": "range to text", 333 | "value": 2 334 | } 335 | ], 336 | "maxDataPoints": 100, 337 | "nullPointMode": "connected", 338 | "nullText": null, 339 | "postfix": "", 340 | "postfixFontSize": "50%", 341 | "prefix": "", 342 | "prefixFontSize": "50%", 343 | "rangeMaps": [ 344 | { 345 | "from": "null", 346 | "text": "N/A", 347 | "to": "null" 348 | } 349 | ], 350 | "span": 3, 351 | "sparkline": { 352 | "fillColor": "rgba(31, 118, 189, 0.18)", 353 | "full": false, 354 | "lineColor": "rgb(31, 120, 193)", 355 | "show": false 356 | }, 357 | "tableColumn": "", 358 | "targets": [ 359 | { 360 | "expr": "(sum(node_filesystem_size{mountpoint!~'/etc.*',device!~\"/dev/docker/docker.*\",fstype!~'(tmpfs|overlay)'}) - sum(node_filesystem_free{mountpoint!~'/etc.*',device!~\"/dev/docker/docker.*\",fstype!~'(tmpfs|overlay)'}) ) / sum(node_filesystem_size{mountpoint!~'/etc.*',device!~\"/dev/docker/docker.*\",fstype!~'(tmpfs|overlay)'}) * 100", 361 | "format": "time_series", 362 | "interval": "10s", 363 | "intervalFactor": 1, 364 | "metric": "", 365 | "refId": "A", 366 | "step": 300 367 | } 368 | ], 369 | "thresholds": "65, 90", 370 | "title": "Cluster Filesystem usage", 371 | "type": "singlestat", 372 | "valueFontSize": "80%", 373 | "valueMaps": [ 374 | { 375 | "op": "=", 376 | "text": "N/A", 377 | "value": "null" 378 | } 379 | ], 380 | "valueName": "current" 381 | } 382 | ], 383 | "repeat": null, 384 | "repeatIteration": null, 385 | "repeatRowId": null, 386 | "showTitle": false, 387 | "title": "Row", 388 | "titleSize": "h6" 389 | }, 390 | { 391 | "collapse": false, 392 | "height": "250px", 393 | "panels": [ 394 | { 395 | "aliasColors": {}, 396 | "bars": false, 397 | "dashLength": 10, 398 | "dashes": false, 399 | "datasource": "${DS_PROMETHEUS}", 400 | "decimals": 3, 401 | "editable": true, 402 | "error": false, 403 | "fill": 0, 404 | "grid": {}, 405 | "id": 3, 406 | "legend": { 407 | "alignAsTable": true, 408 | "avg": true, 409 | "current": true, 410 | "max": false, 411 | "min": false, 412 | "rightSide": true, 413 | "show": true, 414 | "sort": "current", 415 | "sortDesc": true, 416 | "total": false, 417 | "values": true 418 | }, 419 | "lines": true, 420 | "linewidth": 2, 421 | "links": [], 422 | "nullPointMode": "connected", 423 | "percentage": false, 424 | "pointradius": 5, 425 | "points": false, 426 | "renderer": "flot", 427 | "seriesOverrides": [], 428 | "spaceLength": 10, 429 | "span": 12, 430 | "stack": false, 431 | "steppedLine": false, 432 | "targets": [ 433 | { 434 | "expr": "sum by (pod_name)( rate(container_cpu_usage_seconds_total{pod_name!=\"\"}[1m] ) )", 435 | "format": "time_series", 436 | "interval": "10s", 437 | "intervalFactor": 1, 438 | "legendFormat": "{{ pod_name }}", 439 | "metric": "container_cpu", 440 | "refId": "A", 441 | "step": 10 442 | } 443 | ], 444 | "thresholds": [], 445 | "timeFrom": null, 446 | "timeShift": null, 447 | "title": "Pod CPU usage", 448 | "tooltip": { 449 | "msResolution": true, 450 | "shared": false, 451 | "sort": 0, 452 | "value_type": "cumulative" 453 | }, 454 | "type": "graph", 455 | "xaxis": { 456 | "buckets": null, 457 | "mode": "time", 458 | "name": null, 459 | "show": true, 460 | "values": [] 461 | }, 462 | "yaxes": [ 463 | { 464 | "format": "percent", 465 | "label": null, 466 | "logBase": 1, 467 | "max": null, 468 | "min": null, 469 | "show": true 470 | }, 471 | { 472 | "format": "short", 473 | "label": null, 474 | "logBase": 1, 475 | "max": null, 476 | "min": null, 477 | "show": true 478 | } 479 | ] 480 | } 481 | ], 482 | "repeat": null, 483 | "repeatIteration": null, 484 | "repeatRowId": null, 485 | "showTitle": false, 486 | "title": "New row", 487 | "titleSize": "h6" 488 | }, 489 | { 490 | "collapse": false, 491 | "height": "250px", 492 | "panels": [ 493 | { 494 | "aliasColors": {}, 495 | "bars": false, 496 | "dashLength": 10, 497 | "dashes": false, 498 | "datasource": "${DS_PROMETHEUS}", 499 | "decimals": 2, 500 | "editable": true, 501 | "error": false, 502 | "fill": 0, 503 | "grid": {}, 504 | "id": 2, 505 | "legend": { 506 | "alignAsTable": true, 507 | "avg": true, 508 | "current": true, 509 | "max": false, 510 | "min": false, 511 | "rightSide": true, 512 | "show": true, 513 | "sideWidth": 200, 514 | "sort": "current", 515 | "sortDesc": true, 516 | "total": false, 517 | "values": true 518 | }, 519 | "lines": true, 520 | "linewidth": 2, 521 | "links": [], 522 | "nullPointMode": "connected", 523 | "percentage": false, 524 | "pointradius": 5, 525 | "points": false, 526 | "renderer": "flot", 527 | "seriesOverrides": [], 528 | "spaceLength": 10, 529 | "span": 12, 530 | "stack": false, 531 | "steppedLine": false, 532 | "targets": [ 533 | { 534 | "expr": "sum(container_memory_usage_bytes{pod_name!=\"\"}) by (pod_name)", 535 | "format": "time_series", 536 | "interval": "10s", 537 | "intervalFactor": 1, 538 | "legendFormat": "{{ pod_name }}", 539 | "metric": "container_memory_usage:sort_desc", 540 | "refId": "A", 541 | "step": 10 542 | } 543 | ], 544 | "thresholds": [], 545 | "timeFrom": null, 546 | "timeShift": null, 547 | "title": "Pod memory usage", 548 | "tooltip": { 549 | "msResolution": false, 550 | "shared": false, 551 | "sort": 0, 552 | "value_type": "cumulative" 553 | }, 554 | "type": "graph", 555 | "xaxis": { 556 | "buckets": null, 557 | "mode": "time", 558 | "name": null, 559 | "show": true, 560 | "values": [] 561 | }, 562 | "yaxes": [ 563 | { 564 | "format": "bytes", 565 | "label": null, 566 | "logBase": 1, 567 | "max": null, 568 | "min": null, 569 | "show": true 570 | }, 571 | { 572 | "format": "short", 573 | "label": null, 574 | "logBase": 1, 575 | "max": null, 576 | "min": null, 577 | "show": true 578 | } 579 | ] 580 | }, 581 | { 582 | "aliasColors": {}, 583 | "bars": false, 584 | "dashLength": 10, 585 | "dashes": false, 586 | "datasource": "${DS_PROMETHEUS}", 587 | "decimals": 2, 588 | "editable": true, 589 | "error": false, 590 | "fill": 0, 591 | "grid": {}, 592 | "id": 8, 593 | "legend": { 594 | "alignAsTable": true, 595 | "avg": true, 596 | "current": true, 597 | "max": false, 598 | "min": false, 599 | "rightSide": true, 600 | "show": true, 601 | "sideWidth": 200, 602 | "sort": "current", 603 | "sortDesc": true, 604 | "total": false, 605 | "values": true 606 | }, 607 | "lines": true, 608 | "linewidth": 2, 609 | "links": [], 610 | "nullPointMode": "connected", 611 | "percentage": false, 612 | "pointradius": 5, 613 | "points": false, 614 | "renderer": "flot", 615 | "seriesOverrides": [], 616 | "spaceLength": 10, 617 | "span": 12, 618 | "stack": false, 619 | "steppedLine": false, 620 | "targets": [ 621 | { 622 | "expr": "sort_desc(- sum by (pod_name) (rate (container_network_receive_bytes_total{pod_name!=\"\"}[1m]) ))", 623 | "format": "time_series", 624 | "interval": "10s", 625 | "intervalFactor": 1, 626 | "legendFormat": "{{ pod_name }}", 627 | "metric": "network", 628 | "refId": "A", 629 | "step": 10 630 | }, 631 | { 632 | "expr": "sort_desc(sum by (pod_name) (rate (container_network_transmit_bytes_total{pod_name!=\"\"}[1m]) ))", 633 | "format": "time_series", 634 | "interval": "10s", 635 | "intervalFactor": 1, 636 | "legendFormat": "{{ pod_name }}", 637 | "metric": "network", 638 | "refId": "B", 639 | "step": 10 640 | } 641 | ], 642 | "thresholds": [], 643 | "timeFrom": null, 644 | "timeShift": null, 645 | "title": "Pod Network i/o", 646 | "tooltip": { 647 | "msResolution": false, 648 | "shared": false, 649 | "sort": 0, 650 | "value_type": "cumulative" 651 | }, 652 | "type": "graph", 653 | "xaxis": { 654 | "buckets": null, 655 | "mode": "time", 656 | "name": null, 657 | "show": true, 658 | "values": [] 659 | }, 660 | "yaxes": [ 661 | { 662 | "format": "bytes", 663 | "label": null, 664 | "logBase": 1, 665 | "max": null, 666 | "min": null, 667 | "show": true 668 | }, 669 | { 670 | "format": "short", 671 | "label": null, 672 | "logBase": 1, 673 | "max": null, 674 | "min": null, 675 | "show": false 676 | } 677 | ] 678 | } 679 | ], 680 | "repeat": null, 681 | "repeatIteration": null, 682 | "repeatRowId": null, 683 | "showTitle": false, 684 | "title": "New row", 685 | "titleSize": "h6" 686 | }, 687 | { 688 | "collapse": false, 689 | "height": 250, 690 | "panels": [ 691 | { 692 | "aliasColors": {}, 693 | "bars": false, 694 | "dashLength": 10, 695 | "dashes": false, 696 | "datasource": "${DS_PROMETHEUS}", 697 | "fill": 1, 698 | "id": 9, 699 | "legend": { 700 | "alignAsTable": true, 701 | "avg": true, 702 | "current": false, 703 | "max": true, 704 | "min": false, 705 | "rightSide": true, 706 | "show": true, 707 | "sort": "avg", 708 | "sortDesc": true, 709 | "total": false, 710 | "values": true 711 | }, 712 | "lines": true, 713 | "linewidth": 1, 714 | "links": [], 715 | "nullPointMode": "null", 716 | "percentage": false, 717 | "pointradius": 5, 718 | "points": false, 719 | "renderer": "flot", 720 | "seriesOverrides": [], 721 | "spaceLength": 10, 722 | "span": 6, 723 | "stack": false, 724 | "steppedLine": false, 725 | "targets": [ 726 | { 727 | "expr": "sum by (pod_name)( rate(container_fs_reads_bytes_total{pod_name!=\"\"}[1m] ) )", 728 | "format": "time_series", 729 | "intervalFactor": 2, 730 | "legendFormat": "{{ pod_name }}", 731 | "refId": "A" 732 | } 733 | ], 734 | "thresholds": [], 735 | "timeFrom": null, 736 | "timeShift": null, 737 | "title": "Pod File Reads", 738 | "tooltip": { 739 | "shared": false, 740 | "sort": 0, 741 | "value_type": "individual" 742 | }, 743 | "type": "graph", 744 | "xaxis": { 745 | "buckets": null, 746 | "mode": "time", 747 | "name": null, 748 | "show": true, 749 | "values": [] 750 | }, 751 | "yaxes": [ 752 | { 753 | "format": "short", 754 | "label": null, 755 | "logBase": 1, 756 | "max": null, 757 | "min": null, 758 | "show": true 759 | }, 760 | { 761 | "format": "short", 762 | "label": null, 763 | "logBase": 1, 764 | "max": null, 765 | "min": null, 766 | "show": true 767 | } 768 | ] 769 | }, 770 | { 771 | "aliasColors": {}, 772 | "bars": false, 773 | "dashLength": 10, 774 | "dashes": false, 775 | "datasource": "${DS_PROMETHEUS}", 776 | "fill": 1, 777 | "id": 10, 778 | "legend": { 779 | "alignAsTable": true, 780 | "avg": true, 781 | "current": false, 782 | "max": true, 783 | "min": false, 784 | "rightSide": true, 785 | "show": true, 786 | "sort": "avg", 787 | "sortDesc": true, 788 | "total": false, 789 | "values": true 790 | }, 791 | "lines": true, 792 | "linewidth": 1, 793 | "links": [], 794 | "nullPointMode": "null", 795 | "percentage": false, 796 | "pointradius": 5, 797 | "points": false, 798 | "renderer": "flot", 799 | "seriesOverrides": [], 800 | "spaceLength": 10, 801 | "span": 6, 802 | "stack": false, 803 | "steppedLine": false, 804 | "targets": [ 805 | { 806 | "expr": "sum by (pod_name)( rate(container_fs_writes_bytes_total{pod_name!=\"\"}[1m] ) )", 807 | "format": "time_series", 808 | "intervalFactor": 2, 809 | "legendFormat": "{{ pod_name }}", 810 | "refId": "A" 811 | } 812 | ], 813 | "thresholds": [], 814 | "timeFrom": null, 815 | "timeShift": null, 816 | "title": "Pod File Writes", 817 | "tooltip": { 818 | "shared": false, 819 | "sort": 0, 820 | "value_type": "individual" 821 | }, 822 | "type": "graph", 823 | "xaxis": { 824 | "buckets": null, 825 | "mode": "time", 826 | "name": null, 827 | "show": true, 828 | "values": [] 829 | }, 830 | "yaxes": [ 831 | { 832 | "format": "short", 833 | "label": null, 834 | "logBase": 1, 835 | "max": null, 836 | "min": null, 837 | "show": true 838 | }, 839 | { 840 | "format": "short", 841 | "label": null, 842 | "logBase": 1, 843 | "max": null, 844 | "min": null, 845 | "show": true 846 | } 847 | ] 848 | } 849 | ], 850 | "repeat": null, 851 | "repeatIteration": null, 852 | "repeatRowId": null, 853 | "showTitle": false, 854 | "title": "Dashboard Row", 855 | "titleSize": "h6" 856 | } 857 | ], 858 | "schemaVersion": 14, 859 | "style": "dark", 860 | "tags": [ 861 | "kubernetes" 862 | ], 863 | "templating": { 864 | "list": [] 865 | }, 866 | "time": { 867 | "from": "now-1h", 868 | "to": "now" 869 | }, 870 | "timepicker": { 871 | "refresh_intervals": [ 872 | "5s", 873 | "10s", 874 | "30s", 875 | "1m", 876 | "5m", 877 | "15m", 878 | "30m", 879 | "1h", 880 | "2h", 881 | "1d" 882 | ], 883 | "time_options": [ 884 | "5m", 885 | "15m", 886 | "1h", 887 | "6h", 888 | "12h", 889 | "24h", 890 | "2d", 891 | "7d", 892 | "30d" 893 | ] 894 | }, 895 | "timezone": "browser", 896 | "title": "Kubernetes cluster monitoring (via Prometheus)", 897 | "version": 9 898 | } -------------------------------------------------------------------------------- /hack/README.md: -------------------------------------------------------------------------------- 1 | # README 2 | 3 | This directory contains helper script for STC development. 4 | 5 | 6 | ## kcli plan 7 | 8 | What is kcli: https://github.com/karmab/kcli 9 | 10 | ``` 11 | $ kcli plan -f hack/kcli-plan/mini.yml stc 12 | [ snipped ] 13 | $ kcli list 14 | +---------------------+--------+------------+----------------------------------+------+---------+--------+ 15 | | Name | Status | Ips | Source 16 | | Plan | Profile | Report | 17 | +---------------------+--------+------------+----------------------------------+------+---------+--------+ 18 | | bastion.example.com | up | 10.88.3.2 | rhel-server-7.6-x86_64-kvm.qcow2 19 | | stc | kvirt | | 20 | | i01.example.com | up | 10.88.3.50 | rhel-server-7.6-x86_64-kvm.qcow2 21 | | stc | kvirt | | 22 | | m01.example.com | up | 10.88.3.30 | rhel-server-7.6-x86_64-kvm.qcow2 23 | | stc | kvirt | | 24 | | n01.example.com | up | 10.88.3.40 | rhel-server-7.6-x86_64-kvm.qcow2 25 | | stc | kvirt | | 26 | +---------------------+--------+------------+----------------------------------+------+---------+--------+ 27 | ``` 28 | 29 | ### Full automated setup 30 | 31 | Create `kcli-parameters.yml` 32 | ``` 33 | --- 34 | stc_clone_url: https://github.com/rbo/stc.git 35 | stc_clone_branch: devel 36 | deploy: true 37 | api_dns: api.example.com 38 | apps_dns: apps.example.com 39 | install_logging: y 40 | install_metrics: y 41 | registry_token_user: nnnn|username 42 | registry_token: $TOKEN$ 43 | ``` 44 | 45 | Run kcli 46 | ``` 47 | kcli plan \ 48 | --paramfile /workdir/kcli-parameters.yml \ 49 | -f hack/kcli-plan/mini.yml \ 50 | stc 51 | ``` 52 | -------------------------------------------------------------------------------- /hack/kcli-plan/files/env.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ocp_version: 3.11 3 | api_dns: {{ api_dns }} 4 | apps_dns: {{ apps_dns }} 5 | bastion: bastion.example.com 6 | lb: bastion.example.com 7 | masters: 8 | {% for master in range(0, masters) -%} 9 | - {{ prefix }}m0{{ master +1 }}.{{ domain }} 10 | {%- endfor %} 11 | infranodes: 12 | {% for infra in range(0, infras) -%} 13 | - {{ prefix }}i0{{ infra +1 }}.{{ domain }} 14 | {%- endfor %} 15 | nodes: 16 | {% for node in range(0, nodes) -%} 17 | - {{ prefix }}n0{{ node +1 }}.{{ domain }} 18 | {%- endfor %} 19 | cns: 20 | - m01.example.com 21 | - i01.example.com 22 | - n01.example.com 23 | container_disk: vdb 24 | ocs_disk: vdc 25 | ssh_user: cloud-user 26 | install_logging: {{ install_logging }} 27 | install_metrics: {{ install_metrics }} 28 | registry_token_user: {{ registry_token_user }} 29 | registry_token: {{ registry_token }} 30 | -------------------------------------------------------------------------------- /hack/kcli-plan/files/install-ocp.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | set -x 5 | 6 | cd /home/cloud-user/stc/ 7 | ansible-playbook -e sudo_password="" playbooks/validate_config.yml 8 | 9 | # Don't need --ask-vault-pass because we don't run ansible-vaule encrypt ... 10 | # Don't need -k -- we don't have a SSH password 11 | ansible-playbook -i inventory playbooks/prepare_ssh.yml 12 | 13 | cd /usr/share/ansible/openshift-ansible/ 14 | ansible-playbook -i /home/cloud-user/stc/inventory playbooks/prerequisites.yml 15 | ansible-playbook -i /home/cloud-user/stc/inventory playbooks/deploy_cluster.yml 16 | 17 | 18 | -------------------------------------------------------------------------------- /hack/kcli-plan/files/secrets.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sudo_password: "" 3 | -------------------------------------------------------------------------------- /hack/kcli-plan/mini.yml: -------------------------------------------------------------------------------- 1 | parameters: 2 | prefix: '' 3 | template: rhel-server-7.6-x86_64-kvm.qcow2 4 | base_disk_size: 60 5 | container_disk_size: 50 6 | container_disk: vdb 7 | glusterfs_disk_size: 100 8 | glusterfs_disk: vdc 9 | master_memory: 16384 10 | infra_memory: 16384 11 | node_memory: 16384 12 | domain: example.com 13 | pool: default 14 | masters: 1 15 | infras: 1 16 | nodes: 1 17 | version: 3.11 18 | stc_clone_url: https://github.com/RedHat-EMEA-SSA-Team/stc.git 19 | notifycmd: "journalctl -n 45 --no-pager /usr/bin/cloud-init" 20 | 21 | api_dns: '' 22 | apps_dns: '' 23 | install_logging: n 24 | install_metrics: n 25 | registry_token_user: '' 26 | registry_token: '' 27 | deploy: false 28 | 29 | {{ domain }}: 30 | type: network 31 | cidr: 10.88.3.0/24 32 | dhcp: True 33 | nat: true 34 | 35 | {{ prefix }}bastion: 36 | template: {{ template }} 37 | numcpus: 2 38 | memory: 4096 39 | rhnregister: true 40 | reservedns: true 41 | sharedkey: true 42 | nets: 43 | - name: {{ domain }} 44 | ip: 10.88.3.2 45 | mask: 255.255.255.0 46 | gateway: 10.88.3.1 47 | dns: 10.88.3.1 48 | disks: 49 | - size: {{ base_disk_size }} 50 | pool: {{ pool }} 51 | files: 52 | - path: /tmp/env.yml 53 | origin: files/env.yml 54 | - path: /tmp/secrets.yml 55 | origin: files/secrets.yml 56 | - path: /tmp/install-ocp.sh 57 | origin: files/install-ocp.sh 58 | scripts: 59 | - scripts/prep-host.sh 60 | - scripts/prep-bastion.sh 61 | notify: true 62 | notifycmd: {{ notifycmd }} 63 | 64 | {% for master in range(0, masters) %} 65 | {{ prefix }}m0{{ master + 1 }}: 66 | template: {{ template }} 67 | numcpus: 4 68 | memory: {{ master_memory }} 69 | rhnregister: true 70 | reservedns: true 71 | sharedkey: true 72 | nets: 73 | - name: {{ domain }} 74 | ip: 10.88.3.3{{ master }} 75 | mask: 255.255.255.0 76 | gateway: 10.88.3.1 77 | dns: 10.88.3.1 78 | disks: 79 | - size: {{ base_disk_size }} 80 | - size: {{ container_disk_size }} 81 | - size: {{ glusterfs_disk_size }} 82 | pool: {{ pool }} 83 | scripts: 84 | - scripts/prep-host.sh 85 | {% endfor %} 86 | 87 | {% for node in range(0, nodes) %} 88 | {{ prefix }}n0{{ node + 1 }}: 89 | template: {{ template }} 90 | numcpus: 4 91 | memory: {{ node_memory }} 92 | rhnregister: true 93 | reservedns: true 94 | sharedkey: true 95 | nets: 96 | - name: {{ domain }} 97 | ip: 10.88.3.4{{ node }} 98 | mask: 255.255.255.0 99 | gateway: 10.88.3.1 100 | dns: 10.88.3.1 101 | disks: 102 | - size: {{ base_disk_size }} 103 | - size: {{ container_disk_size }} 104 | - size: {{ glusterfs_disk_size }} 105 | pool: {{ pool }} 106 | scripts: 107 | - scripts/prep-host.sh 108 | {% endfor %} 109 | 110 | {% for infra in range(0, infras) %} 111 | {{ prefix }}i0{{ infra + 1 }}: 112 | template: {{ template }} 113 | numcpus: 4 114 | memory: {{ infra_memory }} 115 | rhnregister: true 116 | reservedns: true 117 | sharedkey: true 118 | nets: 119 | - name: {{ domain }} 120 | ip: 10.88.3.5{{ infra }} 121 | mask: 255.255.255.0 122 | gateway: 10.88.3.1 123 | dns: 10.88.3.1 124 | disks: 125 | - size: {{ base_disk_size }} 126 | - size: {{ container_disk_size }} 127 | - size: {{ glusterfs_disk_size }} 128 | pool: {{ pool }} 129 | scripts: 130 | - scripts/prep-host.sh 131 | {% endfor %} 132 | -------------------------------------------------------------------------------- /hack/kcli-plan/scripts/prep-bastion.sh: -------------------------------------------------------------------------------- 1 | yum install -y git openshift-ansible screen tmux vim 2 | 3 | cp -v /root/.ssh/id_rsa* /home/cloud-user/.ssh/ 4 | chown cloud-user:cloud-user /home/cloud-user/.ssh/id_rsa* 5 | 6 | echo -e '#!/bin/sh\nexec /usr/bin/ssh -o StrictHostKeyChecking=no "$@"' >> /usr/local/bin/ssh-ignore-key 7 | chmod +x /usr/local/bin/ssh-ignore-key 8 | export GIT_SSH="/usr/local/bin/ssh-ignore-key" 9 | 10 | cd /home/cloud-user/ 11 | 12 | git clone -b {{ stc_clone_branch | default('master') }} --single-branch {{ stc_clone_url }} 13 | 14 | mv -v /tmp/env.yml /home/cloud-user/stc/env.yml 15 | mv -v /tmp/secrets.yml /home/cloud-user/stc/secrets.yml 16 | mv -v /tmp/install-ocp.sh /home/cloud-user/install-ocp.sh 17 | 18 | 19 | chown -R cloud-user:cloud-user /home/cloud-user/stc/ 20 | chown cloud-user:cloud-user /home/cloud-user/install-ocp.sh 21 | chmod +x /home/cloud-user/install-ocp.sh 22 | 23 | {% if deploy %} 24 | runuser --user cloud-user /home/cloud-user/install-ocp.sh 25 | {% endif %} 26 | -------------------------------------------------------------------------------- /hack/kcli-plan/scripts/prep-host.sh: -------------------------------------------------------------------------------- 1 | timedatectl set-timezone UTC 2 | subscription-manager repos \ 3 | --enable=rhel-7-server-extras-rpms \ 4 | --enable=rhel-7-server-ose-{{ version }}-rpms \ 5 | --enable=rhel-7-server-ansible-2.6-rpms 6 | -------------------------------------------------------------------------------- /playbooks/bb00-openstack_deprovisioning.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | 4 | - hosts: localhost 5 | gather_facts: False 6 | become: False 7 | connection: local 8 | serial: 1 # Because my lab is not so fast 9 | tasks: 10 | - name: Parameter validation 11 | import_role: 12 | name: bb0-openstack 13 | tasks_from: validate-parameters.yml 14 | 15 | - name: Create mini inventory 16 | import_role: 17 | name: bb0-openstack 18 | tasks_from: create-inventory-mini.yml 19 | 20 | 21 | - hosts: openstack_instances 22 | gather_facts: False 23 | become: False 24 | connection: local 25 | tasks: 26 | - name: Deprovisioning instances 27 | import_role: 28 | name: bb0-openstack 29 | tasks_from: deprovisioning.yml 30 | 31 | - hosts: localhost 32 | gather_facts: False 33 | become: False 34 | connection: local 35 | tasks: 36 | - name: Deprovisioning post once 37 | import_role: 38 | name: bb0-openstack 39 | tasks_from: deprovisioning-post-once.yml 40 | 41 | -------------------------------------------------------------------------------- /playbooks/bb00-openstack_provisioning.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | 4 | - hosts: localhost 5 | gather_facts: False 6 | become: False 7 | connection: local 8 | serial: 1 # Because my lab is not so fast 9 | tasks: 10 | - name: Parameter validation 11 | import_role: 12 | name: bb0-openstack 13 | tasks_from: validate-parameters.yml 14 | 15 | - name: Create mini inventory 16 | import_role: 17 | name: bb0-openstack 18 | tasks_from: create-inventory-mini.yml 19 | 20 | - name: Provisioning pre once 21 | import_role: 22 | name: bb0-openstack 23 | tasks_from: provisioning-pre-once.yml 24 | 25 | 26 | # openstack_instances, created by create-inventory-mini.yml 27 | - hosts: openstack_instances 28 | gather_facts: False 29 | become: False 30 | connection: local 31 | serial: 1 # Because my lab is not so fast 32 | tasks: 33 | - name: Provisioning 34 | import_role: 35 | name: bb0-openstack 36 | tasks_from: provisioning.yml 37 | -------------------------------------------------------------------------------- /playbooks/bb1/add_user.yml: -------------------------------------------------------------------------------- 1 | - name: Create new user 2 | hosts: masters[0] 3 | gather_facts: false 4 | vars_prompt: 5 | - name: username 6 | prompt: Username 7 | private: false 8 | - name: password 9 | prompt: Password 10 | private: true 11 | tasks: 12 | - name: Add user 13 | command: "htpasswd -b /etc/origin/master/htpasswd {{username}} {{password}}" 14 | -------------------------------------------------------------------------------- /playbooks/bb4/proxy.yml: -------------------------------------------------------------------------------- 1 | - name: Setup squid proxy 2 | hosts: bastion 3 | gather_facts: no 4 | vars_files: 5 | - ../vars/bb4.yml 6 | - ../group_vars/all 7 | tasks: 8 | - name: Install required packages 9 | yum: 10 | name: "{{item}}" 11 | state: present 12 | with_items: "{{proxy_packages}}" 13 | - name: Create squid conf 14 | template: 15 | src: templates/squid.j2 16 | dest: /etc/squid/squid.conf 17 | mode: 0755 18 | notify: Restart squid 19 | handlers: 20 | - name: Restart squid 21 | service: 22 | name: squid 23 | state: restarted 24 | 25 | 26 | -------------------------------------------------------------------------------- /playbooks/bb4/redhat-registry-mirror.yml: -------------------------------------------------------------------------------- 1 | - name: Setup disconnected installation 2 | hosts: bastion 3 | gather_facts: no 4 | vars_files: 5 | - ../vars/bb4.yml 6 | - ../group_vars/all 7 | tasks: 8 | - name: Install required packages 9 | yum: 10 | name: "{{item}}" 11 | state: present 12 | with_items: "{{registry_packages}}" 13 | - name: Configure Docker Registry 14 | template: 15 | src: templates/config.j2 16 | dest: "{{registry_path}}/{{registry_conf}}" 17 | - name: Generate Registry certificates 18 | shell: openssl req -x509 -newkey rsa:4096 -nodes -sha256 -days 3650 -keyout cert.key -out cert.cert -subj "/CN={{registry.split(':')[0]}}" 19 | args: 20 | chdir: "{{registry_path}}" 21 | register: cert_gen 22 | - name: Create Registry dir in /etc/docker/certs.d/ 23 | file: 24 | path: "/etc/docker/certs.d/{{registry.split(':')[0]}}" 25 | state: directory 26 | mode: 0755 27 | - name: Create a symlink inside /etc/docker/certs.d/ for generate certs 28 | file: 29 | src: "{{registry_path}}/cert.cert" 30 | dest: "/etc/docker/certs.d/{{registry.split(':')[0]}}/cert.crt" 31 | state: link 32 | - name: Start and enable Docker Registry 33 | shell: systemctl enable docker-distribution && systemctl start docker-distribution 34 | register: registry_stat 35 | - name: Create script to sync images 36 | template: 37 | src: templates/local-registry-setup-v2.j2 38 | dest: /tmp/local-registry-setup-v2 39 | mode: 0755 40 | - name: Sync images 41 | shell: /tmp/local-registry-setup-v2 > /tmp/sync 42 | register: sync 43 | 44 | 45 | 46 | 47 | -------------------------------------------------------------------------------- /playbooks/bb4/templates/config.j2: -------------------------------------------------------------------------------- 1 | version: 0.1 2 | log: 3 | level: warn 4 | fields: 5 | service: registry 6 | storage: 7 | cache: 8 | layerinfo: inmemory 9 | filesystem: 10 | rootdirectory: /var/lib/registry 11 | http: 12 | addr: :{{registry.split(':')[1] }} 13 | relativeurls: true 14 | secret: {{registry_secret}} 15 | tls: 16 | certificate: /etc/docker-distribution/registry/cert.cert 17 | key: /etc/docker-distribution/registry/cert.key 18 | #clientcas: /etc/docker-distribution/registry/ca.cert 19 | 20 | -------------------------------------------------------------------------------- /playbooks/bb4/templates/local-registry-setup-v2.j2: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | OSE_VERS={{ocp_version}} 4 | REG_VERS=2 5 | UPSTREAM=registry.access.redhat.com 6 | REGISTRY={{registry}} 7 | # https://bugzilla.redhat.com/show_bug.cgi?id=1481130 8 | USE_SKOPEO=yes 9 | SKOPEO_DEST_VERIFY=false 10 | 11 | # Add/remove XXX to variable names to disable/enable syncing of the images 12 | ose_images=" 13 | openshift3/ose-deployer 14 | openshift3/ose-docker-builder 15 | openshift3/ose-docker-registry 16 | openshift3/ose-haproxy-router 17 | openshift3/ose-pod 18 | openshift3/ose-sti-builder 19 | 20 | openshift3/registry-console 21 | 22 | openshift3/logging-auth-proxy 23 | openshift3/oauth-proxy 24 | openshift3/logging-curator 25 | openshift3/logging-elasticsearch 26 | openshift3/logging-fluentd 27 | openshift3/logging-kibana 28 | {% if '%0.2f'| format(ocp_version|float) == '3.10' %} 29 | openshift3/metrics-cassandra 30 | openshift3/metrics-hawkular-metrics 31 | openshift3/metrics-heapster 32 | {% endif %} 33 | 34 | openshift3/prometheus 35 | openshift3/prometheus-alert-buffer 36 | openshift3/prometheus-alertmanager 37 | openshift3/prometheus-node-exporter 38 | 39 | openshift3/ose-service-catalog 40 | openshift3/ose-ansible-service-broker 41 | openshift3/mediawiki-apb 42 | openshift3/postgresql-apb 43 | 44 | openshift3/registry-console 45 | " 46 | 47 | ose_images_cont=" 48 | rhel7/cockpit 49 | rhel7/etcd 50 | openshift3/ose 51 | openshift3/node 52 | openshift3/openvswitch 53 | " 54 | 55 | XXXose_images_opt=" 56 | openshift3/ose-egress-router 57 | openshift3/ose-keepalived-ipfailover 58 | 59 | openshift3/image-inspector 60 | " 61 | 62 | xpaas_images=" 63 | redhat-openjdk-18/openjdk18-openshift 64 | jboss-webserver-3/webserver30-tomcat8-openshift 65 | jboss-eap-7/eap70-openshift 66 | redhat-sso-7/sso70-openshift 67 | rhscl/postgresql-95-rhel7 68 | " 69 | 70 | cns_images=" 71 | rhgs3/rhgs-server-rhel7 72 | rhgs3/rhgs-volmanager-rhel7 73 | rhgs3/rhgs-gluster-block-prov-rhel7 74 | rhgs3/rhgs-s3-server-rhel7 75 | " 76 | 77 | jenkins_images=" 78 | openshift3/jenkins-2-rhel7 79 | openshift3/jenkins-slave-base-rhel7 80 | openshift3/jenkins-slave-maven-rhel7 81 | openshift3/jenkins-slave-nodejs-rhel7 82 | " 83 | 84 | # Configure Docker if needed 85 | [ "$USE_SKOPEO" != "yes" ] && (rpm -q docker > /dev/null 2>&1 || yum install -y docker) 86 | if [ "$USE_SKOPEO" != "yes" ] && ! grep -q "add-registry $REGISTRY" /etc/sysconfig/docker; then 87 | systemctl stop docker 88 | sed -i -e 's,--log-driver=,--log-level=warn --log-driver=,' /etc/sysconfig/docker 89 | sed -i -e 's,--log-level=,--max-concurrent-downloads=10 --log-level=,' /etc/sysconfig/docker 90 | sed -i -e 's,--log-level=,--max-concurrent-uploads=10 --log-level=,' /etc/sysconfig/docker 91 | sed -i -e 's,^ADD_REGISTRY=,#ADD_REGISTRY=,' /etc/sysconfig/docker 92 | sed -i -e 's,^BLOCKED_REGISTRY=,#BLOCKED_REGISTRY=,' /etc/sysconfig/docker 93 | sed -i -e 's,^INSECURE_REGISTRY=,#INSECURE_REGISTRY=,' /etc/sysconfig/docker 94 | cat <> /etc/sysconfig/docker 95 | ADD_REGISTRY='--add-registry $REGISTRY --add-registry $UPSTREAM' 96 | BLOCK_REGISTRY='--block-registry all' 97 | EOF 98 | if [ $REG_VERS -eq 1 ]; then 99 | echo INSECURE_REGISTRY=\'--insecure-registry $REGISTRY\' >> /etc/sysconfig/docker 100 | fi 101 | systemctl enable docker 102 | fi 103 | [ "$USE_SKOPEO" != "yes" ] && systemctl start docker 104 | 105 | # Pull/copy 106 | for img in $ose_images $ose_images_cont $ose_images_opt $cns_images; do 107 | avail="$(curl -s https://$UPSTREAM/v1/repositories/$img/tags | grep -Po '"v?'${OSE_VERS/\./\\.}'.*?"' | tr -d '"' | sort -V)" 108 | # rhel7/etcd has its own versioning 109 | if [ "$img" = "rhel7/etcd" -o "$img" = "rhgs3/rhgs-server-rhel7" -o "$img" = "rhgs3/rhgs-volmanager-rhel7" -o "$img" = "rhgs3/rhgs-gluster-block-prov-rhel7" -o "$img" = "rhgs3/rhgs-s3-server-rhel7" ]; then 110 | [ "$USE_SKOPEO" != "yes" ] && docker pull $UPSTREAM/$img 111 | [ "$USE_SKOPEO" = "yes" ] && echo Copying $img... && skopeo copy --dest-tls-verify=$SKOPEO_DEST_VERIFY --dest-cert-dir=/etc/docker-distribution/registry docker://$UPSTREAM/$img docker://$REGISTRY/$img 112 | fi 113 | [ -n "$avail" ] || continue 114 | # Get latest images with and without v in the tag / patch level 115 | tags="" 116 | tags="$tags $(printf %s\\n $avail | grep v${OSE_VERS}$)" 117 | tags="$tags $(printf %s\\n $avail | grep ^v | tail -n 1)" 118 | tags="$tags $(printf %s\\n $avail | grep -v ^v | tail -n 1)" 119 | tags="$tags $(printf %s\\n $avail | grep ^v | grep -v -- - | tail -n 1)" 120 | tags="$tags $(printf %s\\n $avail | grep -v ^v | grep -v -- - | tail -n 1)" 121 | tags="$(echo $tags | tr ' ' '\n' | sort -u)" 122 | for tag in $tags; do 123 | if [ "$USE_SKOPEO" != "yes" ]; then 124 | docker pull $UPSTREAM/$img:$tag || exit 1 125 | else 126 | echo Copying $img:$tag... 127 | skopeo copy --dest-tls-verify=$SKOPEO_DEST_VERIFY --dest-cert-dir=/etc/docker-distribution/registry docker://$UPSTREAM/$img:$tag docker://$REGISTRY/$img:$tag || exit 1 128 | fi 129 | done 130 | done 131 | 132 | for img in $xpaas_images $jenkins_images; do 133 | # Latest only 134 | if [ "$USE_SKOPEO" != "yes" ]; then 135 | docker pull $UPSTREAM/$img || exit 2 136 | else 137 | echo Copying $img... 138 | skopeo copy --dest-tls-verify=$SKOPEO_DEST_VERIFY --dest-cert-dir=/etc/docker-distribution/registry docker://$UPSTREAM/$img docker://$REGISTRY/$img || exit 2 139 | fi 140 | done 141 | 142 | # Push 143 | if [ "$USE_SKOPEO" != "yes" ]; then 144 | images="$(docker images)" 145 | for img in $ose_images $ose_images_cont $ose_images_opt $xpaas_images $jenkins_images; do 146 | for tag in $(printf %s\\n "$images" | awk '/'$UPSTREAM\\/${img/\//\\/}' / {print $2}'); do 147 | [ "$tag" = "" ] && continue 148 | docker tag $UPSTREAM/$img:$tag $REGISTRY/$img:$tag || exit 3 149 | docker push $REGISTRY/$img:$tag || exit 4 150 | docker rmi $REGISTRY/$img:$tag || exit 5 151 | done 152 | done 153 | fi 154 | 155 | # Garbage collect 156 | /usr/bin/registry garbage-collect /etc/docker-distribution/registry/config.yml 157 | -------------------------------------------------------------------------------- /playbooks/bb4/templates/squid.j2: -------------------------------------------------------------------------------- 1 | acl mynetwork src {{proxy_cidr}} 2 | 3 | http_access allow mynetwork 4 | 5 | #defaults 6 | acl localnet src 10.0.0.0/8 7 | acl localnet src 172.16.0.0/12 8 | acl localnet src 192.168.0.0/16 9 | acl localnet src fc00::/7 10 | acl localnet src fe80::/10 11 | acl SSL_ports port 443 12 | acl Safe_ports port 80 13 | acl Safe_ports port 21 14 | acl Safe_ports port 443 15 | acl Safe_ports port 70 16 | acl Safe_ports port 210 17 | acl Safe_ports port 1025-65535 18 | acl Safe_ports port 280 19 | acl Safe_ports port 488 20 | acl Safe_ports port 591 21 | acl Safe_ports port 777 22 | acl CONNECT method CONNECT 23 | http_access allow manager localhost 24 | http_access deny manager 25 | http_access deny !Safe_ports 26 | http_access deny CONNECT !SSL_ports 27 | http_access allow localnet 28 | http_access allow localhost 29 | http_access deny all 30 | http_port 3128 31 | coredump_dir /var/spool/squid2 32 | refresh_pattern ^ftp: 1440 20% 10080 33 | refresh_pattern ^gopher: 1440 0% 1440 34 | refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 35 | refresh_pattern . 0 20% 4320 36 | 37 | -------------------------------------------------------------------------------- /playbooks/bb8/deploy_fis.yml: -------------------------------------------------------------------------------- 1 | - name: Deploy Monitoring 2 | hosts: masters[0] 3 | vars: 4 | BASEURL: https://raw.githubusercontent.com/jboss-fuse/application-templates/GA 5 | gather_facts: no 6 | tasks: 7 | 8 | - name: Switch to project 'Openshift' 9 | command: oc project openshift 10 | 11 | - name: Update FIS ImageStream 12 | command: oc replace --force -n openshift -f {{BASEURL}}/fis-image-streams.json 13 | 14 | - name: Update karaf2-camel-amq-template 15 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/karaf2-camel-amq-template.json 16 | 17 | - name: Update karaf2-camel-log-template 18 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/karaf2-camel-log-template.json 19 | 20 | - name: Update karaf2-camel-rest-sql-template 21 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/karaf2-camel-rest-sql-template.json 22 | 23 | - name: Update karaf2-cxf-rest-template 24 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/karaf2-cxf-rest-template.json 25 | 26 | - name: Update spring-boot-camel-template 27 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-template.json 28 | 29 | - name: Update spring-boot-camel-amq-template 30 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-amq-template.json 31 | 32 | - name: Update spring-boot-camel-config-template 33 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-config-template.json 34 | 35 | - name: Update spring-boot-camel-drools-template 36 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-drools-template.json 37 | 38 | - name: Update spring-boot-camel-infinispan-template 39 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-infinispan-template.json 40 | 41 | - name: Update spring-boot-camel-rest-sql-template 42 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-rest-sql-template.json 43 | 44 | - name: Update spring-boot-camel-teiid-template 45 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-teiid-template.json 46 | 47 | - name: Update spring-boot-camel-xml-template 48 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-camel-xml-template.json 49 | 50 | - name: Update spring-boot-cxf-jaxws-template 51 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-cxf-jaxws-template.json 52 | 53 | - name: Update spring-boot-cxf-jaxws-template 54 | command: oc replace --force -n openshift -f {{BASEURL}}/quickstarts/spring-boot-cxf-jaxws-template.json 55 | 56 | -------------------------------------------------------------------------------- /playbooks/group_vars/all: -------------------------------------------------------------------------------- 1 | file_inventory: ../inventory 2 | file_env: ../env.yml 3 | file_secrets: ../secrets.yml 4 | file_ip_data: ../ip 5 | sku_name: Employee SKU 6 | disk_free_space: 20 7 | node_mem: 16384 8 | node_vcpus: 4 9 | sizing: fixed 10 | bandwidth_limit: 1 11 | ocp_version: "3.11" 12 | ocs_version_tag: "v3.11" 13 | repos: 14 | - rhel-7-server-rpms 15 | - rhel-7-server-extras-rpms 16 | - "rhel-7-server-ose-{{ '%0.2f'| format(ocp_version|float) }}-rpms" 17 | # Does not make sense on nodes, but is so documented! 18 | # https://docs.openshift.com/container-platform/3.11/install/host_preparation.html 19 | - rhel-7-server-ansible-2.6-rpms 20 | packages: 21 | - wget 22 | - git 23 | - net-tools 24 | - bind-utils 25 | - iptables-services 26 | - bridge-utils 27 | - bash-completion 28 | - kexec-tools 29 | - sos 30 | - psacct 31 | packages_jumphost: 32 | - openshift-ansible 33 | firewall_ports: 34 | - 8443 35 | - 80 36 | - 443 37 | - 53 38 | - 10250 39 | - 2049 40 | - 2379 41 | - 2380 42 | - 4001 43 | - 4789 44 | - 9000 45 | - 1936 46 | - 9200 47 | - 9300 48 | proxy_whitelist: 49 | - github.com 50 | - redhat.com 51 | docker_vg: ocp 52 | docker_version: "1.13" 53 | docker_prepull_tag: "v{{ '%0.2f'| format(ocp_version|float) }}" 54 | docker_prepull: 55 | - registry.access.redhat.com/openshift3/ose-deployer 56 | - registry.access.redhat.com/openshift3/ose-node-problem-detector 57 | - registry.access.redhat.com/openshift3/ose-pod 58 | - registry.access.redhat.com/openshift3/ose-node 59 | - registry.access.redhat.com/openshift3/ose-docker-builder 60 | post_install_components: 61 | - console 62 | - monitoring 63 | - metering 64 | -------------------------------------------------------------------------------- /playbooks/prepare_ssh.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Collect information about subscription and remote user 3 | hosts: localhost 4 | gather_facts: no 5 | vars_files: 6 | - "{{file_env}}" 7 | tasks: 8 | - name: Create key for remote user 9 | user: 10 | name: "{{ssh_user}}" 11 | generate_ssh_key: yes 12 | ssh_key_bits: 2048 13 | ssh_key_file: .ssh/id_rsa 14 | - name: Distribute public keys and populate known_hosts 15 | gather_facts: no 16 | hosts: all 17 | vars_files: 18 | - "{{file_env}}" 19 | tasks: 20 | - name: Add key to authorized_keys 21 | authorized_key: 22 | user: "{{ssh_user}}" 23 | state: present 24 | key: "{{ lookup('file', '~/.ssh/id_rsa.pub') }}" 25 | - name: Fetch hostkey 26 | connection: local 27 | command: "ssh-keyscan {{ inventory_hostname }}" 28 | register: hostkey 29 | - name: Run ssh-keyscan to add keys to known_hosts 30 | connection: local 31 | known_hosts: 32 | name: "{{ inventory_hostname }}" 33 | key: "{{ item }}" 34 | with_items: "{{ hostkey.stdout_lines }}" 35 | when: hostkey.rc == 0 36 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/README.md: -------------------------------------------------------------------------------- 1 | Provisioning on OpenStack 2 | ========= 3 | 4 | Provisionning of STC stack on top of OpenStack, inspired by https://github.com/ktenzer/openshift-on-openstack-123 5 | 6 | Example Playbook 7 | ---------------- 8 | 9 | 10 | ``` 11 | docker run -ti $(pwd):/work:z quay.io/redhat/stc-openstack-provisioner 12 | 13 | export OS_USERNAME=admin 14 | export OS_PASSWORD=xxxx 15 | export OS_AUTH_URL=xxxx 16 | export OS_PROJECT_NAME=admin 17 | export OS_USER_DOMAIN_NAME=Default 18 | export OS_PROJECT_DOMAIN_NAME=Default 19 | export OS_IDENTITY_API_VERSION=3 20 | 21 | export STC_RHN_PASSWORD=xxxx 22 | export STC_RHN_USERNAME=xxx 23 | export STC_SUBSCRIPTION_POOL_ID=xx 24 | export STC_REGISTRY_TOKEN_USER=xxx 25 | export STC_REGISTRY_TOKEN=xxxx 26 | export STC_FLAVOR=mini # Only supported flavor at the moment is mini 27 | 28 | cd /work 29 | ./playbooks/bb00-openstack_provisioning.yml 30 | ``` 31 | 32 | 33 | License 34 | ------- 35 | 36 | Apache 2.0 37 | 38 | Author Information 39 | ------------------ 40 | 41 | Robert Bohne 42 | robert.bohne@redhat.com -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # defaults file for provisioning_openstack 3 | 4 | heat_template_path: heat/openshift.yaml -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # handlers file for provisioning_openstack -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/meta/main.yml: -------------------------------------------------------------------------------- 1 | galaxy_info: 2 | author: your name 3 | description: your description 4 | company: your company (optional) 5 | 6 | # If the issue tracker for your role is not on github, uncomment the 7 | # next line and provide a value 8 | # issue_tracker_url: http://example.com/issue/tracker 9 | 10 | # Some suggested licenses: 11 | # - BSD (default) 12 | # - MIT 13 | # - GPLv2 14 | # - GPLv3 15 | # - Apache 16 | # - CC-BY 17 | license: license (GPLv2, CC-BY, etc) 18 | 19 | min_ansible_version: 2.4 20 | 21 | # If this a Container Enabled role, provide the minimum Ansible Container version. 22 | # min_ansible_container_version: 23 | 24 | # Optionally specify the branch Galaxy will use when accessing the GitHub 25 | # repo for this role. During role install, if no tags are available, 26 | # Galaxy will use this branch. During import Galaxy will access files on 27 | # this branch. If Travis integration is configured, only notifications for this 28 | # branch will be accepted. Otherwise, in all cases, the repo's default branch 29 | # (usually master) will be used. 30 | #github_branch: 31 | 32 | # 33 | # Provide a list of supported platforms, and for each platform a list of versions. 34 | # If you don't wish to enumerate all versions for a particular platform, use 'all'. 35 | # To view available platforms and versions (or releases), visit: 36 | # https://galaxy.ansible.com/api/v1/platforms/ 37 | # 38 | # platforms: 39 | # - name: Fedora 40 | # versions: 41 | # - all 42 | # - 25 43 | # - name: SomePlatform 44 | # versions: 45 | # - all 46 | # - 1.0 47 | # - 7 48 | # - 99.99 49 | 50 | galaxy_tags: [] 51 | # List tags for your role here, one per line. A tag is a keyword that describes 52 | # and categorizes the role. Users find roles by searching for tags. Be sure to 53 | # remove the '[]' above, if you add tags to this list. 54 | # 55 | # NOTE: A tag is limited to a single word comprised of alphanumeric characters. 56 | # Maximum 20 tags per role. 57 | 58 | dependencies: [] 59 | # List your role dependencies here, one per line. Be sure to remove the '[]' above, 60 | # if you add dependencies to this list. -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/provisioner-image/Dockerfile: -------------------------------------------------------------------------------- 1 | #FROM registry.access.redhat.com/rhel7/rhel 2 | FROM registry.access.redhat.com/openshift3/jenkins-slave-base-rhel7 3 | ARG RH_ORG_ID 4 | ARG RH_ACTIVATIONKEY 5 | ARG RH_POOL_ID 6 | 7 | 8 | RUN subscription-manager register --org=$RH_ORG_ID --activationkey=$RH_ACTIVATIONKEY --name=temp-containerbuild-$(date +"%s") && \ 9 | # ToDo: subscription-manager attach --pool=... FAILED with: 10 | # This unit has already had the subscription matching pool ID "8a85f99c65c8c91b0166c4c531662125" attached. 11 | subscription-manager attach --pool=$RH_POOL_ID ;\ 12 | subscription-manager repos --disable=* && \ 13 | subscription-manager repos --enable=rhel-7-server-rpms \ 14 | --enable=rhel-7-server-extras-rpms \ 15 | --enable=rhel-7-server-ose-3.11-rpms \ 16 | --enable=rhel-7-server-ansible-2.6-rpms \ 17 | --enable=rhel-7-server-openstack-14-rpms \ 18 | # --enable=rhel-7-server-openstack-14-devtools-rpms \ 19 | && \ 20 | yum install -y openshift-ansible python2-openstacksdk.noarch \ 21 | python2-shade.noarch python2-openstackclient.noarch \ 22 | telnet && \ 23 | subscription-manager unregister 24 | 25 | # yum install -y ansible \ 26 | # openssh-clients.x86_64 \ 27 | # python2-openstacksdk.noarch \ 28 | # python2-shade.noarch \ 29 | # python2-openstackclient.noarch \ 30 | # telnet 31 | # # Important for os_loadbalancer 32 | # python2-urllib3.noarch python2-chardet.noarch 33 | #RUN subscription-manager unregister 34 | 35 | 36 | # Tunnel: ssh -o "DynamicForward 127.0.0.1:65432" -i /work/q-root-id_rsa q.bohne.io -fN 37 | # export ALL_PROXY=socks5h://127.0.0.1:65432 38 | # ENV ALL_PROXY=socks5h://127.0.0.1:65432 39 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/provisioner-image/README.md: -------------------------------------------------------------------------------- 1 | # STC OpenStack Provisioner 2 | 3 | ## Build 4 | 5 | ``` 6 | docker pull registry.access.redhat.com/rhel7/rhel 7 | docker build \ 8 | --build-arg RH_ORG_ID=$RHN_ORG_ID \ 9 | --build-arg RH_ACTIVATIONKEY=$RHN_ACTIVATIONKEY \ 10 | --build-arg RH_POOL_ID=$STC_SUBSCRIPTION_POOL_ID \ 11 | -t quay.io/redhat/stc-openstack-provisioner:latest \ 12 | . 13 | 14 | docker tag quay.io/redhat/stc-openstack-provisioner:latest quay.io/redhat/stc-openstack-provisioner:$(docker run -ti quay.io/redhat/stc-openstack-provisioner:latest yum info openshift-ansible | grep Version | cut -f2 -d':'|tr -d ' '|tr -d '\r') 15 | 16 | docker push quay.io/redhat/stc-openstack-provisioner 17 | 18 | ``` 19 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/provisioner-image/build.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | docker pull registry.access.redhat.com/rhel7/rhel 4 | docker build \ 5 | --build-arg RH_ORG_ID=$RHN_ORG_ID \ 6 | --build-arg RH_ACTIVATIONKEY=$RHN_ACTIVATIONKEY \ 7 | --build-arg RH_POOL_ID=$STC_SUBSCRIPTION_POOL_ID \ 8 | -t quay.io/redhat/stc-openstack-provisioner:latest \ 9 | . 10 | 11 | docker tag quay.io/redhat/stc-openstack-provisioner:latest quay.io/redhat/stc-openstack-provisioner:$(docker run -ti quay.io/redhat/stc-openstack-provisioner:latest yum info openshift-ansible | grep Version | cut -f2 -d':'|tr -d ' '|tr -d '\r') 12 | 13 | docker push quay.io/redhat/stc-openstack-provisioner -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/create-inventory-mini.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - set_fact: 4 | int_iaas_image: "{{ lookup('env','STC_IAAS_IMAGE') | default('rhel-server-7.6-x86_64-kvm',true) }}" 5 | int_iaas_machine_size: "{{ lookup('env','STC_IAAS_MACHINE_SIZE') | default('m1.xlarge',true) }}" 6 | int_iaas_container_storage_disk: "{{ lookup('env','STC_IAAS_CONTAINER_STORAGE_DISK') | default('15',true) }}" 7 | int_iaas_glusterfs_disk: "{{ lookup('env','STC_IAAS_GLUSTERFS_DISK') | default('100',true) }}" 8 | int_iaas_internal_network: "{{ lookup('env','STC_IAAS_INTERNAL_NETWORK') | default('admin',true) }}" 9 | int_dns_provider: "{{ lookup('env','STC_DNS_PROVIDER') | default('nip.io',true) }}" 10 | 11 | - name: Add masters 12 | add_host: 13 | name: "master0" 14 | groups: 15 | - openstack_instances 16 | - masters 17 | - cns 18 | os_security_groups: 19 | - default 20 | - ssh_only 21 | - ose3_master 22 | - ose3_sdn 23 | iaas_machine_size: "{{ int_iaas_machine_size }}" 24 | 25 | iaas_image: "{{ int_iaas_image }}" 26 | iaas_container_storage_disk: "{{ int_iaas_container_storage_disk }}" 27 | iaas_glusterfs_disk: "{{ int_iaas_glusterfs_disk }}" 28 | iaas_internal_network: "{{ int_iaas_internal_network }}" 29 | dns_provider: "{{ int_dns_provider }}" 30 | 31 | ansible_ssh_private_key_file: "/work/q-root-id_rsa" 32 | ansible_user: "cloud-user" 33 | 34 | 35 | - name: Add infras 36 | add_host: 37 | name: "infra0" 38 | groups: 39 | - openstack_instances 40 | - infranodes 41 | - cns 42 | os_security_groups: 43 | - default 44 | - ssh_only 45 | - ose3_node 46 | - ose3_sdn 47 | iaas_machine_size: "{{ int_iaas_machine_size }}" 48 | 49 | iaas_image: "{{ int_iaas_image }}" 50 | iaas_container_storage_disk: "{{ int_iaas_container_storage_disk }}" 51 | iaas_glusterfs_disk: "{{ int_iaas_glusterfs_disk }}" 52 | iaas_internal_network: "{{ int_iaas_internal_network }}" 53 | dns_provider: "{{ int_dns_provider }}" 54 | 55 | - name: Add nodes 56 | add_host: 57 | name: "node0" 58 | groups: 59 | - openstack_instances 60 | - nodes 61 | - cns 62 | os_security_groups: 63 | - default 64 | - ssh_only 65 | - ose3_node 66 | - ose3_sdn 67 | iaas_machine_size: "{{ int_iaas_machine_size }}" 68 | 69 | iaas_image: "{{ int_iaas_image }}" 70 | iaas_container_storage_disk: "{{ int_iaas_container_storage_disk }}" 71 | iaas_glusterfs_disk: "{{ int_iaas_glusterfs_disk }}" 72 | iaas_internal_network: "{{ int_iaas_internal_network }}" 73 | dns_provider: "{{ int_dns_provider }}" 74 | 75 | # Import that bastion is the last host, because of creating the env.yml 76 | - name: bastion 77 | add_host: 78 | name: "bastion" 79 | groups: openstack_instances 80 | iaas_public_ip: true 81 | iaas_container_storage_disk: 0 82 | iaas_glusterfs_disk: 0 83 | os_security_groups: 84 | - default 85 | - ssh_only 86 | - ose3_router 87 | iaas_machine_size: "m1.small" 88 | iaas_image: "{{ int_iaas_image }}" 89 | dns_provider: "{{ int_dns_provider }}" 90 | iaas_internal_network: "{{ int_iaas_internal_network }}" 91 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/deprovisioning-post-once.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Add ssh 4 | os_keypair: 5 | auth: 6 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 7 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 8 | username: "{{ lookup('env','OS_USERNAME') }}" 9 | password: "{{ lookup('env','OS_PASSWORD') }}" 10 | state: absent 11 | name: openshift-stc-key 12 | 13 | # Delete security groups 14 | - name: "Delete security groups" 15 | os_security_group: 16 | state: absent 17 | name: "{{item.name}}" 18 | description: "secgroup {{ item.name }} - managed by ansible" 19 | auth: 20 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 21 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 22 | username: "{{ lookup('env','OS_USERNAME') }}" 23 | password: "{{ lookup('env','OS_PASSWORD') }}" 24 | with_items: 25 | - "{{ os_sec_groups }}" 26 | 27 | # Delete cloud flare dns 28 | - name: Optional cloud clare dns 29 | block: 30 | - name: Delete DNS record bastion 31 | cloudflare_dns: 32 | zone: "{{ cloudflare_zone }}" 33 | record: "bastion{{ cloudflare_name_postfix }}" 34 | type: A 35 | account_email: "{{ cloudflare_account_email }}" 36 | account_api_token: "{{ cloudflare_account_api_token }}" 37 | state: absent 38 | 39 | - name: Delete DNS record *.apps 40 | cloudflare_dns: 41 | zone: "{{ cloudflare_zone }}" 42 | record: "*.apps{{ cloudflare_name_postfix }}" 43 | type: A 44 | account_email: "{{ cloudflare_account_email }}" 45 | account_api_token: "{{ cloudflare_account_api_token }}" 46 | state: absent 47 | 48 | - name: Delete DNS record api 49 | cloudflare_dns: 50 | zone: "{{ cloudflare_zone }}" 51 | record: "api{{ cloudflare_name_postfix }}" 52 | type: A 53 | account_email: "{{ cloudflare_account_email }}" 54 | account_api_token: "{{ cloudflare_account_api_token }}" 55 | state: absent 56 | when: ( dns_provider | default('nip.io') == "cloudflare" ) 57 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/deprovisioning.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete instance 3 | os_server: 4 | api_timeout: 360 # Because my lab is not so fast 5 | state: absent 6 | auth: 7 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 8 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 9 | username: "{{ lookup('env','OS_USERNAME') }}" 10 | password: "{{ lookup('env','OS_PASSWORD') }}" 11 | name: "{{ inventory_hostname }}" 12 | 13 | - name: Delete container storage disk 14 | os_volume: 15 | state: absent 16 | auth: 17 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 18 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 19 | username: "{{ lookup('env','OS_USERNAME') }}" 20 | password: "{{ lookup('env','OS_PASSWORD') }}" 21 | display_name: "{{ inventory_hostname }}_container_storage_disk" 22 | when: ( iaas_container_storage_disk | int > 0 ) 23 | 24 | - name: Delete glusterfs disk 25 | os_volume: 26 | state: absent 27 | auth: 28 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 29 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 30 | username: "{{ lookup('env','OS_USERNAME') }}" 31 | password: "{{ lookup('env','OS_PASSWORD') }}" 32 | display_name: "{{ inventory_hostname }}_glusterfs_disk" 33 | when: ( iaas_glusterfs_disk | int > 0 ) 34 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/provisioning-dns-cloudflare.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Validate CloudFlare env variables 3 | assert: 4 | that: 5 | - lookup('env', item ) is defined 6 | msg: "CloudFlare env variable {{ item }} is NOT defined!" 7 | with_items: 8 | - CLOUDFLARE_ACCOUNT_EMAIL 9 | - CLOUDFLARE_ACCOUNT_API_TOKEN 10 | - CLOUDFLARE_ZONE 11 | - CLOUDFLARE_NAME_POSTFIX 12 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 13 | 14 | - name: Set cloudflare vars 15 | set_fact: 16 | cloudflare_account_email: "{{ lookup('env','CLOUDFLARE_ACCOUNT_EMAIL') }}" 17 | cloudflare_account_api_token: "{{ lookup('env','CLOUDFLARE_ACCOUNT_API_TOKEN') }}" 18 | cloudflare_zone: "{{ lookup('env','CLOUDFLARE_ZONE') }}" 19 | cloudflare_name_postfix: "{{ lookup('env','CLOUDFLARE_NAME_POSTFIX') }}" 20 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 21 | 22 | - name: Create DNS record bastion{{cloudflare_name_postfix}} => {{ instance_data.openstack.public_v4 }} 23 | cloudflare_dns: 24 | zone: "{{ cloudflare_zone }}" 25 | record: "bastion{{ cloudflare_name_postfix }}" 26 | type: A 27 | value: "{{ instance_data.openstack.public_v4 }}" 28 | account_email: "{{ cloudflare_account_email }}" 29 | account_api_token: "{{ cloudflare_account_api_token }}" 30 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 31 | 32 | - name: Create DNS record *.apps{{cloudflare_name_postfix}} => {{ instance_data.openstack.public_v4 }} 33 | cloudflare_dns: 34 | zone: "{{ cloudflare_zone }}" 35 | record: "*.apps{{ cloudflare_name_postfix }}" 36 | type: A 37 | value: "{{ instance_data.openstack.public_v4 }}" 38 | account_email: "{{ cloudflare_account_email }}" 39 | account_api_token: "{{ cloudflare_account_api_token }}" 40 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 41 | 42 | - name: Create DNS record api{{cloudflare_name_postfix}} => {{ instance_data.openstack.public_v4 }} 43 | cloudflare_dns: 44 | zone: "{{ cloudflare_zone }}" 45 | record: "api{{ cloudflare_name_postfix }}" 46 | type: A 47 | value: "{{ instance_data.openstack.public_v4 }}" 48 | account_email: "{{ cloudflare_account_email }}" 49 | account_api_token: "{{ cloudflare_account_api_token }}" 50 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 51 | 52 | - name: Set public name 53 | set_fact: 54 | openshift_master_cluster_public_hostname: "api{{ cloudflare_name_postfix }}.{{ cloudflare_zone }}" 55 | openshift_master_default_subdomain: "apps{{ cloudflare_name_postfix }}.{{ cloudflare_zone }}" 56 | bastion_public_hostname: "bastion{{ cloudflare_name_postfix }}.{{ cloudflare_zone }}" 57 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 58 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/provisioning-dns-nip.io.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set public name 3 | set_fact: 4 | openshift_master_cluster_public_hostname: "api.{{ instance_data.openstack.public_v4 }}.nip.io" 5 | openshift_master_default_subdomain: "apps.{{ instance_data.openstack.public_v4 }}.nip.io" 6 | bastion_public_hostname: "bastion.{{ instance_data.openstack.public_v4 }}.nip.io" 7 | when: ( instance_data.openstack.public_v4 != "" and inventory_hostname == "bastion" ) 8 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/provisioning-pre-once.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: "{{ playbook_dir }}" 4 | command: "ssh-keygen -t rsa -f {{ playbook_dir }}/id_rsa -P ''" 5 | args: 6 | creates: "{{ playbook_dir }}/id_rsa" 7 | 8 | - name: Add ssh 9 | os_keypair: 10 | auth: 11 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 12 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 13 | username: "{{ lookup('env','OS_USERNAME') }}" 14 | password: "{{ lookup('env','OS_PASSWORD') }}" 15 | state: present 16 | public_key_file: "{{ playbook_dir }}/id_rsa.pub" 17 | name: openshift-stc-key 18 | 19 | - name: "Create security groups" 20 | os_security_group: 21 | name: "{{item.name}}" 22 | description: "secgroup {{ item.name }} - managed by ansible" 23 | auth: 24 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 25 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 26 | username: "{{ lookup('env','OS_USERNAME') }}" 27 | password: "{{ lookup('env','OS_PASSWORD') }}" 28 | with_items: 29 | - "{{ os_sec_groups }}" 30 | 31 | - name: "Create security group rules" 32 | os_security_group_rule: 33 | auth: 34 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 35 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 36 | username: "{{ lookup('env','OS_USERNAME') }}" 37 | password: "{{ lookup('env','OS_PASSWORD') }}" 38 | security_group: "{{ item.0.name }}" 39 | protocol: "{{ item.1.proto }}" 40 | port_range_min: "{{ item.1.port }}" 41 | port_range_max: "{{ item.1.port }}" 42 | remote_ip_prefix: 0.0.0.0/0 43 | with_subelements: 44 | - "{{ os_sec_groups }}" 45 | - rules 46 | 47 | 48 | # - stat: path=ose_host_key 49 | # register: st 50 | 51 | # - os_keypair: 52 | # auth: 53 | # auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 54 | # project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 55 | # username: "{{ lookup('env','OS_USERNAME') }}" 56 | # password: "{{ lookup('env','OS_PASSWORD') }}" 57 | # state: absent 58 | # name: ose_host_key 59 | # when: st.stat.exists == False 60 | 61 | # - os_keypair: 62 | # auth: 63 | # auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 64 | # project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 65 | # username: "{{ lookup('env','OS_USERNAME') }}" 66 | # password: "{{ lookup('env','OS_PASSWORD') }}" 67 | # name: ose_host_key 68 | # register: return 69 | # when: st.stat.exists == False 70 | # - name: Save private key to {{inventory_dir}} 71 | # copy: 72 | # content: "{{return.key.private_key}}" 73 | # dest: "{{inventory_dir}}/{{return.key.name}}" 74 | # mode: 0600 75 | # when: st.stat.exists == False 76 | 77 | # - name: Save public key to {{inventory_dir}} 78 | # copy: 79 | # content: "{{return.key.public_key}}" 80 | # dest: "{{inventory_dir}}/{{return.key.name}}.pub" 81 | # when: st.stat.exists == False 82 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/provisioning-prepare-bastion.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Wait for bastion {{ ansible_host }}:22 (SSH)" 3 | connection: local 4 | wait_for: 5 | port: 22 6 | host: "{{ ansible_host }}" 7 | search_regex: OpenSSH 8 | delay: 10 9 | 10 | - name: Copy ssh key to bastion 11 | connection: ssh 12 | copy: 13 | src: "{{ playbook_dir }}/id_rsa" 14 | dest: ~/.ssh/id_rsa 15 | mode: 0600 16 | 17 | - name: Copy ssh pub to bastion 18 | connection: ssh 19 | copy: 20 | src: "{{ playbook_dir }}/id_rsa.pub" 21 | dest: ~/.ssh/id_rsa.pub 22 | mode: 0600 23 | 24 | - name: Create ~/stc bastion 25 | connection: ssh 26 | file: 27 | path: ~/stc/ 28 | state: directory 29 | mode: 0755 30 | 31 | - name: Checkout STC on bastion 32 | connection: ssh 33 | unarchive: 34 | src: https://github.com/RedHat-EMEA-SSA-Team/stc/archive/master.tar.gz 35 | dest: ~/stc/ 36 | extra_opts: [--strip-components=1] 37 | remote_src: yes 38 | 39 | # - debug: var=hostvars 40 | 41 | - name: Create STC env.yml 42 | connection: ssh 43 | template: 44 | src: env.yml.j2 45 | dest: ~/stc/env.yml 46 | mode: 0660 47 | 48 | - name: Create STC secrets.yml 49 | connection: ssh 50 | template: 51 | src: secrets.yml.j2 52 | dest: ~/stc/secrets.yml 53 | mode: 0660 54 | 55 | - name: Register and subscribe. 56 | connection: ssh 57 | become: true 58 | redhat_subscription: 59 | state: present 60 | username: "{{ lookup('env','STC_RHN_USERNAME') }}" 61 | password: "{{ lookup('env','STC_RHN_PASSWORD') }}" 62 | pool_ids: 63 | - "{{ lookup('env','STC_SUBSCRIPTION_POOL_ID') }}" 64 | 65 | - name: Disable all RHSM repositories 66 | connection: ssh 67 | become: true 68 | rhsm_repository: 69 | name: '*' 70 | state: disabled 71 | 72 | - name: Disable all repositories except rhel-7-server-rpms 73 | connection: ssh 74 | become: true 75 | rhsm_repository: 76 | name: "{{ item }}" 77 | state: enabled 78 | with_items: 79 | - "rhel-7-server-rpms" 80 | - "rhel-7-server-extras-rpms" 81 | - "rhel-7-server-ose-3.11-rpms" 82 | - "rhel-7-server-ansible-2.6-rpms" 83 | 84 | - name: Install necessary packages 85 | connection: ssh 86 | become: true 87 | yum: 88 | name: 89 | - git 90 | - ansible 91 | - tmux 92 | - nc 93 | - screen 94 | state: present -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/provisioning.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # tasks file for provisioning_openstack 3 | 4 | # - include_tasks: validate-parameters.yml 5 | 6 | # Just for informations: os_loadbalancer, os_listener, os_pool and os_member need Ansible 2.7. 7 | # And addional packages: python2-urllib3.noarch python2-chardet.noarch 8 | # All thoses ansible modules use the openstack octavia lbaas API. 9 | 10 | 11 | 12 | - name: Create instance 13 | os_server: 14 | api_timeout: 360 # Because my lab is not so fast 15 | state: present 16 | auth: 17 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 18 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 19 | username: "{{ lookup('env','OS_USERNAME') }}" 20 | password: "{{ lookup('env','OS_PASSWORD') }}" 21 | name: "{{ inventory_hostname }}" 22 | image: "{{ iaas_image }}" 23 | flavor: "{{ iaas_machine_size }}" 24 | auto_ip: "{{ iaas_public_ip | default(false) }}" 25 | key_name: openshift-stc-key 26 | security_groups: "{{ os_security_groups }}" 27 | network: "{{ iaas_internal_network }}" 28 | terminate_volume: true 29 | boot_from_volume: true 30 | userdata: | 31 | #cloud-config 32 | # set the locale 33 | locale: en_US.UTF-8 34 | # timezone: set the timezone for this instance 35 | timezone: UTC 36 | # hostname: {{ inventory_hostname }} 37 | # fqdn: {{ inventory_hostname }} 38 | register: return 39 | 40 | - name: Set some facts 41 | set_fact: 42 | instance_data: "{{ return }}" 43 | 44 | # - debug: 45 | # var: instance_data 46 | 47 | # - debug: var=instance_data.openstack.private_v4 48 | # - debug: var=instance_data.openstack.public_v4 49 | 50 | 51 | - name: Create container storage disk 52 | os_volume: 53 | state: present 54 | auth: 55 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 56 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 57 | username: "{{ lookup('env','OS_USERNAME') }}" 58 | password: "{{ lookup('env','OS_PASSWORD') }}" 59 | size: "{{ iaas_container_storage_disk }}" 60 | display_name: "{{ inventory_hostname }}_container_storage_disk" 61 | when: ( iaas_container_storage_disk | int > 0 ) 62 | 63 | - name: Attach container storage disk to server 64 | os_server_volume: 65 | state: present 66 | auth: 67 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 68 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 69 | username: "{{ lookup('env','OS_USERNAME') }}" 70 | password: "{{ lookup('env','OS_PASSWORD') }}" 71 | server: "{{ inventory_hostname }}" 72 | volume: "{{ inventory_hostname }}_container_storage_disk" 73 | when: ( iaas_container_storage_disk | int > 0 ) 74 | register: return 75 | 76 | # If I attach a disk to an server, it is important to reset the facts 77 | # because to get the device name (/dev/vd?) 78 | - name: Set some facts 79 | set_fact: 80 | instance_data: "{{ return }}" 81 | when: ( return.changed == true ) 82 | 83 | - name: Create glusterfs disk 84 | os_volume: 85 | state: present 86 | auth: 87 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 88 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 89 | username: "{{ lookup('env','OS_USERNAME') }}" 90 | password: "{{ lookup('env','OS_PASSWORD') }}" 91 | size: "{{ iaas_glusterfs_disk }}" 92 | display_name: "{{ inventory_hostname }}_glusterfs_disk" 93 | when: ( iaas_glusterfs_disk | int > 0 ) 94 | 95 | - name: Attach glusterfs disk to server 96 | os_server_volume: 97 | state: present 98 | auth: 99 | auth_url: "{{ lookup('env','OS_AUTH_URL') }}" 100 | project_name: "{{ lookup('env','OS_PROJECT_NAME') }}" 101 | username: "{{ lookup('env','OS_USERNAME') }}" 102 | password: "{{ lookup('env','OS_PASSWORD') }}" 103 | server: "{{ inventory_hostname }}" 104 | volume: "{{ inventory_hostname }}_glusterfs_disk" 105 | when: ( iaas_glusterfs_disk | int > 0 ) 106 | register: return 107 | 108 | # If I attach a disk to an server, it is important to reset the facts 109 | # because to get the device name (/dev/vd?) 110 | - name: Set some facts 111 | set_fact: 112 | instance_data: "{{ return }}" 113 | when: ( return.changed == true ) 114 | 115 | # - debug: var=instance_data.openstack.private_v4 116 | # - debug: var=instance_data.openstack.public_v4 117 | # - debug: var=instance_data.openstack.volumes 118 | 119 | - name: Set _container_storage_disk 120 | set_fact: 121 | container_storage_disk: "{{ instance_data.openstack.volumes | selectattr('display_name','equalto', inventory_hostname + '_container_storage_disk' ) | first }}" 122 | when: ( instance_data.openstack.volumes | selectattr('display_name','equalto', inventory_hostname + '_container_storage_disk' )| list | length > 0 ) 123 | 124 | - name: Set glusterfs_disk 125 | set_fact: 126 | glusterfs_disk: "{{ instance_data.openstack.volumes | selectattr('display_name','equalto', inventory_hostname + '_glusterfs_disk' ) | first }}" 127 | when: ( instance_data.openstack.volumes | selectattr('display_name','equalto', inventory_hostname + '_glusterfs_disk' )| list | length > 0 ) 128 | 129 | - name: Set ansible_host if public ip is available 130 | set_fact: 131 | ansible_host: "{{ instance_data.openstack.public_v4 }}" 132 | ansible_ssh_private_key_file: "{{ playbook_dir }}/id_rsa" 133 | ansible_user: "cloud-user" 134 | when: ( instance_data.openstack.public_v4 is defined and instance_data.openstack.public_v4 != "" ) 135 | 136 | - include_tasks: provisioning-dns-{{ dns_provider }}.yml 137 | 138 | - include_tasks: provisioning-prepare-bastion.yml 139 | when: ( inventory_hostname == 'bastion' ) 140 | 141 | - debug: 142 | msg: "Connect to bastion via ssh -i {{ ansible_ssh_private_key_file }} -l {{ ansible_user }} {{ bastion_public_hostname }}" 143 | when: ( inventory_hostname == 'bastion' ) -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tasks/validate-parameters.yml: -------------------------------------------------------------------------------- 1 | 2 | - name: Validate STC_FLAVOR 3 | assert: 4 | that: 5 | - lookup('env','STC_FLAVOR') == 'mini' 6 | msg: "STC_FLAVOR should be 'mini'" 7 | 8 | - name: Validate env variables 9 | assert: 10 | that: 11 | - lookup('env', item ) is defined 12 | msg: "Env variable {{ item }} is NOT defined!" 13 | with_items: 14 | - OS_USERNAME 15 | - OS_PASSWORD 16 | - OS_AUTH_URL 17 | - OS_PROJECT_NAME 18 | - OS_USER_DOMAIN_NAME 19 | - OS_PROJECT_DOMAIN_NAME 20 | - OS_IDENTITY_API_VERSION 21 | - STC_RHN_PASSWORD 22 | - STC_RHN_USERNAME 23 | - STC_SUBSCRIPTION_POOL_ID 24 | 25 | - name: Check connection to openstack api 26 | command: "curl -s -o /dev/null --connect-timeout 3 {{ lookup('env', 'OS_AUTH_URL' ) }}" 27 | args: 28 | warn: false # set warn=false to prevent warning 29 | 30 | 31 | # - name: Validate masters 32 | # assert: 33 | # that: 34 | # - (master_count == 1) or (master_count == 3) 35 | # msg: "Master count is currently {{ master_count }} but must be 1 or 3" 36 | 37 | # - name: Validate infras 38 | # assert: 39 | # that: 40 | # - (infra_count >= 1 ) 41 | # - (infra_count <= 3) 42 | # msg: "Infra count s currently {{ infra_count }} but must be between 1 and 3" 43 | 44 | # - name: Validate nodes 45 | # assert: 46 | # that: 47 | # - (node_count >= 1 ) 48 | # msg: "Node count {{ node_count }} must be >= 1" 49 | 50 | # - name: Validate OpenShift HA 51 | # assert: 52 | # that: 53 | # - (master_count == 3) and (infra_count > 1) 54 | # msg: "OpenShift HA requires 3 masters and at least 2 infra nodes" 55 | # when: openshift_ha 56 | 57 | #- name: Validate Registry HA 58 | # assert: 59 | # that: 60 | # - (registry_replicas > 1 ) 61 | # msg: "OpenShift HA requires at least 2 registry replicas" 62 | # when: openshift_ha 63 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/templates/env.yml.j2: -------------------------------------------------------------------------------- 1 | ocp_version: 3.11 2 | api_dns: {{ openshift_master_cluster_public_hostname }} 3 | apps_dns: {{ openshift_master_default_subdomain }} 4 | bastion: bastion 5 | lb: bastion 6 | masters: 7 | {% for h in groups['masters'] %} 8 | - {{ h }} 9 | {% endfor %} 10 | infranodes: 11 | {% for h in groups['infranodes'] %} 12 | - {{ h }} 13 | {% endfor %} 14 | nodes: 15 | {% for h in groups['nodes'] %} 16 | - {{ h }} 17 | {% endfor %} 18 | cns: 19 | {% for h in groups['cns'] %} 20 | - {{ h }} 21 | {% endfor %} 22 | container_disk: {{ hostvars['master0'].container_storage_disk.device | replace("/dev/","") | default('PLEASE SET') }} 23 | ocs_disk: {{ hostvars['master0'].glusterfs_disk.device | replace("/dev/","") | default('PLEASE SET') }} 24 | ssh_user: {{ ansible_user }} 25 | install_logging: n 26 | install_metrics: n 27 | # ntp_servers: 28 | # - ntp1.hetzner.de 29 | # - ntp2.hetzner.com 30 | # - ntp3.hetzner.net 31 | rhn_username: {{ lookup('env','STC_RHN_USERNAME') | default('PLEASE SET') }} 32 | subscription_pool_id: {{ lookup('env','STC_SUBSCRIPTION_POOL_ID') | default('PLEASE SET') }} 33 | registry_token_user: {{ lookup('env','STC_REGISTRY_TOKEN_USER') | default('PLEASE SET') }} 34 | registry_token: {{ lookup('env','STC_REGISTRY_TOKEN') | default('PLEASE SET') }} 35 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/templates/secrets.yml.j2: -------------------------------------------------------------------------------- 1 | sudo_password: "{{ lookup('env','STC_SUDO_PASSWORD') }}" 2 | rhn_password: "{{ lookup('env','STC_RHN_PASSWORD') | default('PLEASE SET',true) }}" -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tests/inventory: -------------------------------------------------------------------------------- 1 | localhost 2 | 3 | -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/tests/test.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | remote_user: root 4 | roles: 5 | - provisioning_openstack -------------------------------------------------------------------------------- /playbooks/roles/bb0-openstack/vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # vars file for provisioning_openstack 3 | 4 | stack_name: openshift 5 | heat_template: files/openshift_single_lbaas.yaml 6 | bastion_flavor: "m1.small" 7 | master_flavor: "m1.large" 8 | infra_flavor: "m1.large" 9 | node_flavor: "m1.large" 10 | 11 | # In GB 12 | master_container_disk_size: 15 13 | master_glusterfs_disk_size: 100 14 | infra_container_disk_size: 15 15 | infra_glusterfs_disk_size: 100 16 | node_container_disk_size: 15 17 | node_glusterfs_disk_size: 100 18 | 19 | # domain_name: "{{ domain_name }}" 20 | # external_network: "{{ external_network }}" 21 | # service_network: "{{ service_network }}" 22 | # service_subnet: "{{ service_subnet_id }}" 23 | # ssh_key_name: "default" 24 | # image: "{{ image }}" 25 | 26 | 27 | # vars file for init-openstack 28 | # 29 | 30 | os_sec_groups: 31 | - name: ssh_only 32 | rules: 33 | - { proto: 'tcp', port: 22 } 34 | - name: ose3_sdn 35 | rules: 36 | - { proto: 'tcp', port: 4789 } 37 | - { proto: 'udp', port: 4789 } 38 | - name: ose3_node 39 | rules: 40 | - { proto: 'tcp', port: 10250 } 41 | - { proto: 'udp', port: 10250 } 42 | - name: ose3_master 43 | rules: 44 | - { proto: 'tcp', port: 53 } 45 | - { proto: 'tcp', port: 443 } 46 | - { proto: 'tcp', port: 2379 } 47 | - { proto: 'tcp', port: 2380 } 48 | - { proto: 'tcp', port: 4001 } 49 | - { proto: 'tcp', port: 5000 } 50 | - { proto: 'tcp', port: 8443 } 51 | - { proto: 'tcp', port: 24224} 52 | - { proto: 'udp', port: 53 } 53 | - { proto: 'udp', port: 2379 } 54 | - { proto: 'udp', port: 2380 } 55 | - { proto: 'udp', port: 4001 } 56 | - { proto: 'udp', port: 24224 } 57 | - name: ose3_router 58 | rules: 59 | - { proto: 'tcp', port: 80 } 60 | - { proto: 'tcp', port: 443 } 61 | - { proto: 'tcp', port: 8443 } 62 | - name: cloudforms 63 | rules: 64 | - { proto: 'tcp', port: 80 } 65 | - { proto: 'tcp', port: 443 } 66 | - name: nagios 67 | rules: 68 | - { proto: 'tcp', port: 80 } 69 | - { proto: 'tcp', port: 443 } 70 | 71 | -------------------------------------------------------------------------------- /playbooks/roles/check_cleanup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Stop nc from listening 2 | shell: "pkill -f 'nc -l'" 3 | ignore_errors: true 4 | - name: Ensure nc absent 5 | yum: 6 | name: nc 7 | state: absent 8 | -------------------------------------------------------------------------------- /playbooks/roles/check_connectivity/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: cURL proxy whitelisted sites 2 | shell: "curl -s http://{{item}} -o /dev/null" 3 | with_items: "{{proxy_whitelist}}" 4 | register: connectivity 5 | changed_when: false 6 | - name: Check download speed 7 | shell: "curl -s https://raw.githubusercontent.com/sivel/speedtest-cli/master/speedtest.py | python - | grep Download | awk '{print $2}'" 8 | register: bandwidth 9 | changed_when: false 10 | - set_fact: 11 | download_speed: "{{bandwidth.stdout}}" 12 | - debug: 13 | msg: "Download speed is {{download_speed}}" 14 | failed_when: "(download_speed | int) < bandwidth_limit" 15 | -------------------------------------------------------------------------------- /playbooks/roles/check_disks/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - shell: | 2 | lsblk -r | grep -c disk 3 | register: number_of_disks 4 | - name: check number of disks in nodes 5 | fail: 6 | msg: All nodes and masters need 2+ disks 7 | when: "number_of_disks.stdout|int < 2 and inventory_hostname in groups['nodes']" 8 | - name: check number of disks in OCS nodes 9 | fail: 10 | msg: All nodes running OCS need 3+ disks 11 | when: "number_of_disks.stdout|int < 3 and inventory_hostname in groups['glusterfs']" 12 | -------------------------------------------------------------------------------- /playbooks/roles/check_dns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check DNS resolution for OpenShift API endpoint. If fails if it's not resolved OK. 2 | command: nslookup "{{api_dns}}" 3 | ignore_errors: yes 4 | register: api_ns 5 | changed_when: false 6 | - name: Check DNS resolution for OpenShift apps subdomain. If fails if it's not resolved OK. 7 | command: nslookup apps."{{apps_dns}}" 8 | ignore_errors: yes 9 | register: apps_ns 10 | changed_when: false 11 | - debug: 12 | msg: "{{api_dns}} resolved to {{api_ns.stdout}}" 13 | failed_when: "'server can\\'t find' in api_ns.stdout" 14 | - debug: 15 | msg: "apps.{{apps_dns}} resolved to {{apps_ns.stdout}}" 16 | failed_when: "'server can\\'t find' in apps_ns.stdout" 17 | - name: Figure out DNS server from NetworkManager 18 | shell: "for uuid in `nmcli -g UUID connection show --active`; do dns=`nmcli -g ipv4.dns,IP4.DNS connection show $uuid`; if [ -n $dns ]; then echo $dns; break; fi; done" 19 | register: nm_nameserver 20 | changed_when: false 21 | ignore_errors: true 22 | - debug: 23 | msg: "Your nameserver DNS is '{{nm_nameserver.stdout}}'" 24 | failed_when: nm_nameserver == '' 25 | 26 | -------------------------------------------------------------------------------- /playbooks/roles/check_docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare install and validate docker 3 | hosts: nodes 4 | gather_facts: yes 5 | vars_files: 6 | - "{{file_env}}" 7 | roles: 8 | - check_docker_setup 9 | - check_docker_validation 10 | -------------------------------------------------------------------------------- /playbooks/roles/check_docker_setup/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure docker installed 2 | yum: 3 | name: docker 4 | state: latest 5 | - name: Ensure docker proxy settings 6 | lineinfile: 7 | dest: /etc/sysconfig/docker 8 | state: present 9 | line: "{{item[0]}}={{ item[1] }}" 10 | with_together: 11 | - ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY'] 12 | - ["{{proxy_http}}", "{{proxy_https}}", "{{proxy_no}}"] 13 | when: proxy_username is not defined and proxy_http is defined 14 | - name: Ensure docker proxy settings with username and password 15 | lineinfile: 16 | dest: /etc/sysconfig/docker 17 | state: present 18 | line: "{{item[0]}}=http://{{ proxy_username }}:{{ proxy_password }}@{{ item[1] }}" 19 | with_together: 20 | - ['HTTP_PROXY', 'HTTPS_PROXY', 'NO_PROXY'] 21 | - ["{{proxy_http}}", "{{proxy_https}}", "{{proxy_no}}"] 22 | when: proxy_username is defined and proxy_http is defined 23 | - name: Detect Docker storage configuration status 24 | command: grep -q overlay2 /etc/sysconfig/docker-storage 25 | register: docker_storage_test 26 | changed_when: false 27 | failed_when: false 28 | - name: Create docker storage configuration 29 | template: 30 | src: templates/docker-storage-setup.j2 31 | dest: /etc/sysconfig/docker-storage-setup 32 | when: docker_storage_test.rc != 0 33 | 34 | - name: Apply Docker storage configuration changes 35 | command: docker-storage-setup 36 | when: docker_storage_test.rc != 0 37 | 38 | - name: Fail if Docker version is < {{docker_version}} 39 | fail: 40 | msg: 'docker_version must be >= 1.12, yours is set to {{ docker_version }}.' 41 | when: docker_version is version_compare('1.12', '<') 42 | 43 | 44 | - name: Enable and start docker 45 | service: 46 | name: docker 47 | enabled: yes 48 | state: started 49 | -------------------------------------------------------------------------------- /playbooks/roles/check_docker_setup/templates/docker-storage-setup.j2: -------------------------------------------------------------------------------- 1 | WIPE_SIGNATURES=true 2 | STORAGE_DRIVER=overlay2 3 | {%if container_disk is defined%} 4 | DEVS=/dev/{{container_disk}} 5 | {%endif%} 6 | CONTAINER_ROOT_LV_NAME=dockerlv 7 | CONTAINER_ROOT_LV_SIZE=100%FREE 8 | CONTAINER_ROOT_LV_MOUNT_PATH=/var/lib/docker 9 | VG={{docker_vg}} 10 | -------------------------------------------------------------------------------- /playbooks/roles/check_docker_validation/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Pull some basic docker images 2 | command: "docker pull {{item}}:{{docker_prepull_tag}}" 3 | with_items: 4 | - "{{docker_prepull}}" 5 | -------------------------------------------------------------------------------- /playbooks/roles/check_firewall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check that all needed ports are open 2 | wait_for: 3 | host: "{{item[0]}}" 4 | port: "{{item[1]}}" 5 | delay: 0 6 | timeout: 1 7 | when: item[0] != nodes[0] 8 | with_nested: 9 | - "{{ nodes }}" 10 | - "{{ firewall_ports }}" 11 | ignore_errors: true 12 | -------------------------------------------------------------------------------- /playbooks/roles/check_firewall_initialize/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure nc is installed 2 | yum: 3 | name: nc 4 | state: present 5 | - name: Open correct ports from iptables 6 | iptables: 7 | chain: INPUT 8 | protocol: tcp 9 | destination_port: "{{item}}" 10 | jump: ACCEPT 11 | comment: "Accept trafic to {{item}}" 12 | with_items: 13 | - "{{firewall_ports}}" 14 | - name: Start nc -l to all valid ports 15 | shell: "nc -l {{item}} >/dev/null 2>&1 &" 16 | async: -1 17 | poll: -1 18 | with_items: 19 | - "{{firewall_ports}}" 20 | -------------------------------------------------------------------------------- /playbooks/roles/check_glusterfs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Check if NTP enabled 2 | shell: | 3 | timedatectl | grep "NTP enabled: yes" 4 | ignore_errors: true 5 | register: ntp_enabled 6 | - name: Check if NTP synchronized 7 | shell: | 8 | timedatectl | grep "NTP synchronized: yes" 9 | ignore_errors: true 10 | register: ntp_synchronized 11 | - name: Signal if NTP is not enable or synchronized 12 | debug: 13 | msg: | 14 | {{ ntp_enabled.stdout }} 15 | when: ntp_enabled.rc != 0 or ntp_synchronized.rc != 0 16 | 17 | -------------------------------------------------------------------------------- /playbooks/roles/check_hostname/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Fetch fqdn (hostname -f) 3 | command: hostname -f 4 | register: hostname_fqdn 5 | 6 | - name: Fetch hostname (hostname) 7 | command: hostname 8 | register: hostname 9 | 10 | - name: Check hostnames 11 | assert: 12 | that: 13 | - "inventory_hostname == hostname.stdout" 14 | - "inventory_hostname == hostname_fqdn.stdout" 15 | - "hostname.stdout == hostname_fqdn.stdout" 16 | msg: "Please check hostname: ansible inventory_hostname, $(hostname) and $(hostname -f) must be the same!" 17 | -------------------------------------------------------------------------------- /playbooks/roles/check_networking/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: check is net.ipv4.ip_forward turned on 2 | command: sysctl -b net.ipv4.ip_forward 3 | register: ip_forward_check 4 | - name: show error 5 | debug: 6 | msg: "net.ipv4.ip_forward is 0 forcing it to 1" 7 | when: ip_forward_check.stdout == 0 8 | - name: force net.ipv4.ip_forward to 1 9 | sysctl: 10 | name: net.ipv4.ip_forward 11 | value: 1 12 | sysctl_set: yes 13 | state: present 14 | reload: yes 15 | when: ip_forward_check.stdout == 0 16 | 17 | - name: Check state of firewalld 18 | systemd: name=firewalld 19 | register: firewalld_check 20 | 21 | - name: Fail when firewalld is active 22 | fail: 23 | msg: "firewalld is active, but iptables is required" 24 | when: "firewalld_check.status.ActiveState == 'active'" 25 | -------------------------------------------------------------------------------- /playbooks/roles/check_nm/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Ensure that NetworkManager is running 2 | command: systemctl status NetworkManager 3 | ignore_errors: yes 4 | changed_when: false 5 | register: service_NetworkManager_status 6 | - name: Report status of Network Manager 7 | fail: 8 | msg: | 9 | Service NetworkManager is not running. 10 | Output of `systemctl status NetworkManager`: 11 | {{ service_NetworkManager_status.stdout }} 12 | {{ service_NetworkManager_status.stderr }} 13 | when: service_NetworkManager_status | failed 14 | -------------------------------------------------------------------------------- /playbooks/roles/check_ntp/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart chrony 3 | service: 4 | name: chronyd 5 | state: restarted 6 | 7 | - name: Restart ntp 8 | service: 9 | name: ntpd 10 | state: restarted 11 | 12 | 13 | -------------------------------------------------------------------------------- /playbooks/roles/check_ntp/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Determine if chrony is installed 3 | command: rpm -q chrony 4 | failed_when: false 5 | register: chrony_installed 6 | 7 | - name: Determine if ntp is installed 8 | command: rpm -q ntp 9 | failed_when: false 10 | register: ntp_installed 11 | 12 | - name: Install chrony package 13 | package: 14 | name: chrony 15 | state: present 16 | when: chrony_installed.rc != 0 and ntp_installed.rc != 0 17 | 18 | - name: Create chrony conf 19 | template: 20 | src: templates/chrony.j2 21 | dest: /etc/chrony.conf 22 | mode: 0644 23 | notify: Restart chrony 24 | when: ntp_installed.rc != 0 and ntp_servers is defined and (ntp_servers|length > 0) 25 | 26 | - name: Create ntp conf 27 | template: 28 | src: templates/ntp.j2 29 | dest: /etc/ntp.conf 30 | mode: 0644 31 | notify: Restart ntp 32 | when: chrony_installed.rc != 0 and ntp_servers is defined and (ntp_servers|length > 0) 33 | 34 | - name: Start and enable chronyd/ntpd 35 | command: timedatectl set-ntp true 36 | 37 | -------------------------------------------------------------------------------- /playbooks/roles/check_ntp/templates/chrony.j2: -------------------------------------------------------------------------------- 1 | # Use public servers from the pool.ntp.org project. 2 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 3 | {% for ntp in ntp_servers %} 4 | server {{ntp}} iburst 5 | {% endfor %} 6 | 7 | # Record the rate at which the system clock gains/losses time. 8 | driftfile /var/lib/chrony/drift 9 | 10 | # Allow the system clock to be stepped in the first three updates 11 | # if its offset is larger than 1 second. 12 | makestep 1.0 3 13 | 14 | # Enable kernel synchronization of the real-time clock (RTC). 15 | rtcsync 16 | 17 | # Enable hardware timestamping on all interfaces that support it. 18 | #hwtimestamp * 19 | 20 | # Increase the minimum number of selectable sources required to adjust 21 | # the system clock. 22 | #minsources 2 23 | 24 | # Allow NTP client access from local network. 25 | #allow 192.168.0.0/16 26 | 27 | # Serve time even if not synchronized to a time source. 28 | #local stratum 10 29 | 30 | # Specify file containing keys for NTP authentication. 31 | #keyfile /etc/chrony.keys 32 | 33 | # Specify directory for log files. 34 | logdir /var/log/chrony 35 | 36 | # Select which information is logged. 37 | #log measurements statistics tracking 38 | -------------------------------------------------------------------------------- /playbooks/roles/check_ntp/templates/ntp.j2: -------------------------------------------------------------------------------- 1 | # For more information about this file, see the man pages 2 | # ntp.conf(5), ntp_acc(5), ntp_auth(5), ntp_clock(5), ntp_misc(5), ntp_mon(5). 3 | 4 | driftfile /var/lib/ntp/drift 5 | 6 | # Permit time synchronization with our time source, but do not 7 | # permit the source to query or modify the service on this system. 8 | restrict default nomodify notrap nopeer noquery 9 | 10 | # Permit all access over the loopback interface. This could 11 | # be tightened as well, but to do so would effect some of 12 | # the administrative functions. 13 | restrict 127.0.0.1 14 | restrict ::1 15 | 16 | # Hosts on local network are less restricted. 17 | #restrict 192.168.1.0 mask 255.255.255.0 nomodify notrap 18 | 19 | # Use public servers from the pool.ntp.org project. 20 | # Please consider joining the pool (http://www.pool.ntp.org/join.html). 21 | {% for ntp in ntp_servers %} 22 | server {{ntp}} iburst 23 | {% endfor %} 24 | 25 | 26 | #broadcast 192.168.1.255 autokey # broadcast server 27 | #broadcastclient # broadcast client 28 | #broadcast 224.0.1.1 autokey # multicast server 29 | #multicastclient 224.0.1.1 # multicast client 30 | #manycastserver 239.255.254.254 # manycast server 31 | #manycastclient 239.255.254.254 autokey # manycast client 32 | 33 | # Enable public key cryptography. 34 | #crypto 35 | 36 | includefile /etc/ntp/crypto/pw 37 | 38 | # Key file containing the keys and key identifiers used when operating 39 | # with symmetric key cryptography. 40 | keys /etc/ntp/keys 41 | 42 | # Specify the key identifiers which are trusted. 43 | #trustedkey 4 8 42 44 | 45 | # Specify the key identifier to use with the ntpdc utility. 46 | #requestkey 8 47 | 48 | # Specify the key identifier to use with the ntpq utility. 49 | #controlkey 8 50 | 51 | # Enable writing of statistics records. 52 | #statistics clockstats cryptostats loopstats peerstats 53 | 54 | # Disable the monitoring facility to prevent amplification attacks using ntpdc 55 | # monlist command when default restrict does not include the noquery flag. See 56 | # CVE-2013-5211 for more details. 57 | # Note: Monitoring will not be disabled with the limited restriction flag. 58 | disable monitor 59 | 60 | -------------------------------------------------------------------------------- /playbooks/roles/check_os/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - assert: 2 | that: 3 | - (ansible_distribution == 'RedHat' and ansible_distribution_version == '7.4') or 4 | (ansible_distribution == 'RedHat' and ansible_distribution_version == '7.5') or 5 | (ansible_distribution == 'RedHat' and ansible_distribution_version == '7.6') 6 | msg: "The only supported platforms for this release are RHEL 7.4 or RHEL 7.5 or RHEL 7.6" 7 | -------------------------------------------------------------------------------- /playbooks/roles/check_packages_bastion/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install packages 2 | yum: 3 | name: "{{item}}" 4 | state: latest 5 | with_items: 6 | - "{{ packages }}" 7 | - "{{ packages_jumphost }}" 8 | - name: update bastion packages 9 | yum: 10 | name: '*' 11 | state: latest 12 | -------------------------------------------------------------------------------- /playbooks/roles/check_packages_nodes/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: Install packages 2 | yum: 3 | name: "{{item}}" 4 | state: latest 5 | with_items: 6 | - "{{packages}}" 7 | - name: Update packages 8 | yum: 9 | name: '*' 10 | state: latest 11 | - name: Shutdown 12 | shell: sleep 2 && shutdown -r now "Reboot triggered by Ansible" 13 | async: 1 14 | poll: 0 15 | ignore_errors: true 16 | - name: Wait for host to come back up 17 | local_action: 18 | module: wait_for 19 | host: "{{ inventory_hostname }}" 20 | port: 22 21 | delay: 5 22 | timeout: 300 23 | - name: Update packages 24 | command: yum update -y 25 | -------------------------------------------------------------------------------- /playbooks/roles/check_proxy/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: ensure proxy settings uppercase with username and password 2 | lineinfile: 3 | dest: /etc/environment 4 | state: present 5 | line: "{{item[0]}}={{ proxy_username }}:{{ proxy_password }}@{{ item[1] }}" 6 | with_together: 7 | - "{{ proxy_uc_envs }}" 8 | - "{{ proxies }}" 9 | when: proxy_username is defined and proxy_http is defined 10 | - name: ensure proxy settings uppercase 11 | lineinfile: 12 | dest: /etc/environment 13 | state: present 14 | line: "{{item[0]}}={{ item[1] }}" 15 | with_together: 16 | - "{{ proxy_uc_envs }}" 17 | - "{{ proxies }}" 18 | when: proxy_username is not defined and proxy_http is defined 19 | - name: ensure no proxy settings uppercase 20 | lineinfile: 21 | dest: /etc/environment 22 | state: present 23 | line: "NO_PROXY=127.0.0.1,localhost,.svc,{{proxy_no | default('')}}" 24 | when: proxy_http is defined 25 | - name: ensure proxy settings lowercase with username and password 26 | lineinfile: 27 | dest: /etc/environment 28 | state: present 29 | line: "{{item[0]}}={{ proxy_username }}:{{ proxy_password }}@{{ item[1] }}" 30 | with_together: 31 | - "{{ proxy_lc_envs }}" 32 | - "{{ proxies }}" 33 | when: proxy_username is defined and proxy_http is defined 34 | - name: ensure proxy settings lowercase 35 | lineinfile: 36 | dest: /etc/environment 37 | state: present 38 | line: "{{item[0]}}={{ item[1] }}" 39 | with_together: 40 | - "{{ proxy_lc_envs }}" 41 | - "{{ proxies }}" 42 | when: proxy_username is not defined and proxy_http is defined 43 | - name: ensure no proxy settings lowercase 44 | lineinfile: 45 | dest: /etc/environment 46 | state: present 47 | line: "no_proxy=127.0.0.1,localhost,.svc,{{proxy_no | default('')}}" 48 | when: proxy_http is defined 49 | 50 | 51 | 52 | -------------------------------------------------------------------------------- /playbooks/roles/check_selinux/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - name: check if selinux is running and enforced 2 | command: getenforce 3 | register: sestatus 4 | changed_when: false 5 | 6 | - name: Check SELinux 7 | assert: 8 | that: 9 | - "sestatus.stdout == 'Enforcing'" 10 | msg: "Please set SELinux to Enforcing" 11 | -------------------------------------------------------------------------------- /playbooks/roles/check_sizing/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - shell: echo 'Currently CPU has {{ ansible_processor_vcpus }} vCPUs and memory is {{ (ansible_memory_mb.real.total / 1024) | round | int }} GBs. Node sizing is {{ node_vcpus }} for vCPUs, and {{ (node_mem / 1024) | round | int }} for memory.' 2 | register: status_msg 3 | - debug: msg="{{ status_msg.stdout }}" 4 | - fail: 5 | msg: "Not enough CPU cores or memory." 6 | when: sizing == "fixed" and (((ansible_memory_mb.real.total / 1024) | round | int)<((node_mem / 1024) | round | int) or ansible_processor_vcpus= (disk_free_space_bytes | int) 22 | msg: "There is no enough root space available on this node, required {{disk_free_space}} available {{root_gb_available}}" 23 | -------------------------------------------------------------------------------- /playbooks/roles/check_subscription/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create /tmp/stc-repos-should-enabled 3 | copy: 4 | content: "{{ repos | sort | join('\n') }}\n" 5 | dest: /tmp/stc-repos-should-enabled 6 | 7 | - name: Create /tmp/stc-repos-enabled 8 | shell: "subscription-manager repos --list-enabled | grep 'Repo ID' | cut -f2 -d':' | tr -d ' ' | sort > /tmp/stc-repos-enabled" 9 | args: 10 | creates: /tmp/stc-repos-enabled 11 | 12 | - name: Compare stc-repos-should-enabled vs stc-repos-enabled 13 | shell: "diff -Nuar /tmp/stc-repos-should-enabled /tmp/stc-repos-enabled" 14 | ignore_errors: true 15 | register: ret 16 | 17 | - name: Check enabled repositories 18 | assert: 19 | that: 20 | - "ret.rc == 0" 21 | msg: 22 | - "Please check enabled repositories" 23 | - "{{ ret.stdout_lines }}" 24 | 25 | - name: Cleanup tmp files 26 | file: 27 | state: absent 28 | path: "{{ item }}" 29 | with_items: 30 | - /tmp/stc-repos-should-enabled 31 | - /tmp/stc-repos-enabled 32 | tags: 33 | - cleanup 34 | -------------------------------------------------------------------------------- /playbooks/roles/print/tasks/main.yml: -------------------------------------------------------------------------------- 1 | - debug: 2 | msg: | 3 | {{name}} 4 | {{value}} 5 | {{file}} 6 | - name: Print var to file 7 | local_action: | 8 | shell echo '{{name}}: "{{value}}"' >> {{file}} 9 | -------------------------------------------------------------------------------- /playbooks/subscription-register.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - hosts: all 4 | gather_facts: false 5 | become: true 6 | serial: 1 7 | tasks: 8 | - fail: 9 | msg: "Please adjust file and add subscription cred" 10 | 11 | # - name: Register and subscribe to multiple pools. 12 | # redhat_subscription: 13 | # state: present 14 | # username: joe_user 15 | # password: somepass 16 | # pool_ids: 17 | # - 0123456789abcdef0123456789abcdef 18 | # - 1123456789abcdef0123456789abcdef 19 | # 20 | 21 | # - name: Update the consumed subscriptions from the previous example (remove Red Hat Virtualization subscription) 22 | # redhat_subscription: 23 | # state: present 24 | # activationkey: 1-222333444 25 | # org_id: 222333444 26 | # pool: '^Red Hat Enterprise Server$' 27 | -------------------------------------------------------------------------------- /playbooks/subscription-unregister.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | --- 3 | - hosts: all 4 | gather_facts: false 5 | become: true 6 | tasks: 7 | - name: Unregister host 8 | redhat_subscription: 9 | state: absent 10 | 11 | -------------------------------------------------------------------------------- /playbooks/templates/hosts-v3.10.j2: -------------------------------------------------------------------------------- 1 | # Create an OSEv3 group that contains the masters and nodes groups 2 | [OSEv3:children] 3 | masters 4 | nodes 5 | etcd 6 | {% if lb is defined %} 7 | lb 8 | {% endif %} 9 | bastion 10 | {% if cns is defined %} 11 | glusterfs 12 | {% endif %} 13 | 14 | # Set variables common for all OSEv3 hosts 15 | [OSEv3:vars] 16 | ansible_ssh_user={{ssh_user}} 17 | ansible_become={% if ssh_user == "root" %}no{% else %}yes{% endif %} 18 | 19 | # https://github.com/openshift/openshift-ansible/blob/master/DEPLOYMENT_TYPES.md 20 | deployment_type=openshift-enterprise 21 | containerized=false 22 | 23 | # Skip env validation 24 | openshift_disable_check=disk_availability,memory_availability 25 | 26 | # Configure usage of openshift_clock role. 27 | openshift_clock_enabled=true 28 | 29 | # Set upgrade restart mode for full system restarts 30 | openshift_rolling_restart_mode=system 31 | 32 | # Enable cockpit 33 | osm_use_cockpit=false 34 | osm_cockpit_plugins=['cockpit-kubernetes', 'cockpit-pcp', 'setroubleshoot-server'] 35 | 36 | # Docker / Registry Configuration 37 | openshift_docker_disable_push_dockerhub=True 38 | openshift_docker_options="--log-driver=journald --log-level=warn --ipv6=false" 39 | openshift_docker_insecure_registries=docker-registry.default.svc,docker-registry.default.svc.cluster.local 40 | 41 | # Native high availability cluster method with optional load balancer. 42 | 43 | openshift_master_cluster_method=native 44 | openshift_master_cluster_hostname={{api_dns}} 45 | openshift_master_cluster_public_hostname={{api_dns}} 46 | openshift_master_api_port=8443 47 | openshift_master_console_port=8443 48 | 49 | 50 | # Configure nodeIP in the node config 51 | # This is needed in cases where node traffic is desired to go over an 52 | # interface other than the default network interface. 53 | 54 | # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') 55 | os_sdn_network_plugin_name=redhat/openshift-ovs-multitenant 56 | 57 | # Configure SDN cluster network and kubernetes service CIDR blocks. These 58 | # network blocks should be private and should not conflict with network blocks 59 | # in your infrastructure that pods may require access to. Can not be changed 60 | # after deployment. 61 | osm_cluster_network_cidr=10.1.0.0/16 62 | openshift_portal_net=172.30.0.0/16 63 | osm_host_subnet_length=8 64 | 65 | #Proxy 66 | {% if proxy_http is defined %} 67 | openshift_http_proxy={% if proxy_username is defined %}{{proxy_username}}{% if proxy_password is defined %}:{{proxy_password}}{% endif %}@{% endif %}{{proxy_http}} 68 | {% endif %} 69 | {% if proxy_https is defined %} 70 | openshift_https_proxy={% if proxy_username is defined %}{{proxy_username}}{% if proxy_password is defined %}:{{proxy_password}}{% endif %}@{% endif %}{{proxy_https}} 71 | {% endif %} 72 | {% if proxy_no is defined %} 73 | openshift_no_proxy='{{proxy_no}}' 74 | {% endif %} 75 | openshift_generate_no_proxy_hosts=true 76 | 77 | # htpasswd auth 78 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 79 | 80 | # Provide local certificate paths which will be deployed to masters 81 | openshift_master_overwrite_named_certificates=true 82 | 83 | # Install the openshift examples 84 | openshift_install_examples=true 85 | openshift_examples_modify_imagestreams=true 86 | 87 | # default subdomain to use for exposed routes 88 | openshift_master_default_subdomain={{ apps_dns | replace("*.","") }} 89 | 90 | # Openshift Registry Options 91 | openshift_hosted_registry_storage_kind=glusterfs 92 | openshift_hosted_registry_replicas=1 93 | 94 | #OCS 95 | openshift_storage_glusterfs_namespace=ocs 96 | openshift_storage_glusterfs_name=ocs 97 | openshift_storage_glusterfs_wipe=True 98 | openshift_storage_glusterfs_storageclass=true 99 | openshift_storage_glusterfs_storageclass_default=true 100 | openshift_storage_glusterfs_image=registry.access.redhat.com/rhgs3/rhgs-server-rhel7 101 | openshift_storage_glusterfs_heketi_image=registry.access.redhat.com/rhgs3/rhgs-volmanager-rhel7 102 | openshift_storage_glusterfs_block_deploy=True 103 | openshift_storage_glusterfs_block_host_vol_create=true 104 | openshift_storage_glusterfs_block_host_vol_size=50 105 | openshift_storage_glusterfs_block_storageclass=true 106 | 107 | # Metrics deployment 108 | openshift_metrics_install_metrics=true 109 | openshift_metrics_hawkular_hostname=metrics.{{ apps_dns | replace("*.","")}} 110 | openshift_metrics_cassandra_replicas=1 111 | openshift_metrics_cassandra_limits_memory=2Gi 112 | openshift_metrics_hawkular_replicas=1 113 | openshift_metrics_duration=5 114 | openshift_metrics_cassandra_pvc_size=5Gi 115 | openshift_metrics_cassandra_storage_type=dynamic 116 | openshift_metrics_cassandra_pvc_storage_class_name=glusterfs-ocs-block 117 | 118 | # Logging deployment 119 | openshift_logging_install_logging=true 120 | openshift_logging_kibana_hostname=logging.{{ apps_dns | replace("*.","") }} 121 | openshift_logging_use_ops=false 122 | openshift_logging_public_master_url=https://{{api_dns}}:8443 123 | openshift_logging_curator_default_days=7 124 | openshift_logging_es_pvc_size=10Gi 125 | openshift_logging_es_pvc_dynamic=true 126 | openshift_logging_es_pvc_storage_class_name=glusterfs-ocs-block 127 | openshift_logging_es_memory_limit=8Gi 128 | 129 | # Prometheus 130 | openshift_cluster_monitoring_operator_install=true 131 | openshift_cluster_monitoring_operator_prometheus_storage_capacity=5Gi 132 | openshift_cluster_monitoring_operator_node_selector={"node-role.kubernetes.io/infra":"true"} 133 | 134 | # Service brokers 135 | 136 | #openshift_service_catalog_image_version=latest 137 | #ansible_service_broker_local_registry_whitelist=['.*-apb$'] 138 | #openshift_template_service_broker_namespaces=['openshift'] 139 | 140 | # Grafana 141 | openshift_grafana_storage_type=pvc 142 | openshift_grafana_sc_name=glusterfs-ocs 143 | openshift_grafana_storage_volume_size=1Gi 144 | openshift_grafana_node_selector={"node-role.kubernetes.io/infra":"true"} 145 | 146 | 147 | [masters] 148 | {% for master in masters %} 149 | {{master}} 150 | {% endfor %} 151 | 152 | [etcd] 153 | {% for master in masters %} 154 | {{master}} 155 | {% endfor %} 156 | 157 | {% if lb is defined %} 158 | [lb] 159 | {{lb}} 160 | {% endif %} 161 | 162 | {% if cns is defined %} 163 | [glusterfs] 164 | {% for cns_node in cns %} 165 | {{cns_node}} glusterfs_ip={{ cns_hosts[loop.index0] }} glusterfs_devices='["/dev/{{ocs_disk}}"]' 166 | {% endfor %} 167 | {% endif %} 168 | 169 | [nodes] 170 | {% for master in masters %} 171 | {{master}} openshift_node_group_name='{% if infranodes is defined %}node-config-master{% endif %}{% if infranodes is not defined %}node-config-master-infra{% endif %}' 172 | {% endfor %} 173 | 174 | {% if infranodes is defined %} 175 | {% for infra_node in infranodes %} 176 | {{infra_node}} openshift_node_group_name='node-config-infra' 177 | {% endfor %} 178 | {% endif %} 179 | 180 | {% for node in nodes %} 181 | {{node}} openshift_node_group_name='node-config-compute' 182 | {% endfor %} 183 | 184 | [bastion] 185 | {% if bastion is defined %} 186 | {{bastion}} 187 | {% endif %} 188 | -------------------------------------------------------------------------------- /playbooks/templates/hosts-v3.11.j2: -------------------------------------------------------------------------------- 1 | # Create an OSEv3 group that contains the masters and nodes groups 2 | [OSEv3:children] 3 | masters 4 | nodes 5 | etcd 6 | {% if lb is defined %} 7 | lb 8 | {% endif %} 9 | {% if bastion is defined %} 10 | bastion 11 | {% endif %} 12 | {% if cns is defined %} 13 | glusterfs 14 | {% endif %} 15 | 16 | # Set variables common for all OSEv3 hosts 17 | [OSEv3:vars] 18 | ansible_ssh_user={{ssh_user}} 19 | ansible_become={% if ssh_user == "root" %}no{% else %}yes{% endif %} 20 | 21 | 22 | # https://github.com/openshift/openshift-ansible/blob/master/DEPLOYMENT_TYPES.md 23 | deployment_type=openshift-enterprise 24 | oreg_url=registry.redhat.io/openshift3/ose-${component}:${version} 25 | {% if registry_token_user is defined and registry_token is defined %} 26 | oreg_auth_user={{registry_token_user}} 27 | oreg_auth_password={{registry_token}} 28 | {% else %} 29 | # Please set: 30 | # oreg_auth_user=... 31 | # oreg_auth_password=..... 32 | {% endif %} 33 | containerized=false 34 | 35 | # Skip env validation 36 | openshift_disable_check=disk_availability,memory_availability 37 | 38 | # Configure usage of openshift_clock role. 39 | openshift_clock_enabled=true 40 | 41 | # Set upgrade restart mode for full system restarts 42 | openshift_rolling_restart_mode=system 43 | 44 | # Enable cockpit 45 | osm_use_cockpit=false 46 | osm_cockpit_plugins=['cockpit-kubernetes', 'cockpit-pcp', 'setroubleshoot-server'] 47 | 48 | # Docker / Registry Configuration 49 | openshift_docker_disable_push_dockerhub=True 50 | openshift_docker_options="--log-driver=journald --log-level=warn --ipv6=false" 51 | openshift_docker_insecure_registries=docker-registry.default.svc,docker-registry.default.svc.cluster.local 52 | 53 | # Native high availability cluster method with optional load balancer. 54 | 55 | openshift_master_cluster_method=native 56 | openshift_master_cluster_hostname={{api_dns}} 57 | openshift_master_cluster_public_hostname={{api_dns}} 58 | openshift_master_api_port=8443 59 | openshift_master_console_port=8443 60 | 61 | 62 | # Configure nodeIP in the node config 63 | # This is needed in cases where node traffic is desired to go over an 64 | # interface other than the default network interface. 65 | 66 | # Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') 67 | os_sdn_network_plugin_name=redhat/openshift-ovs-multitenant 68 | 69 | # Configure SDN cluster network and kubernetes service CIDR blocks. These 70 | # network blocks should be private and should not conflict with network blocks 71 | # in your infrastructure that pods may require access to. Can not be changed 72 | # after deployment. 73 | osm_cluster_network_cidr=10.1.0.0/16 74 | openshift_portal_net=172.30.0.0/16 75 | osm_host_subnet_length=8 76 | 77 | #Proxy 78 | {% if proxy_http is defined %} 79 | openshift_http_proxy={% if proxy_username is defined %}{{proxy_username}}{% if proxy_password is defined %}:{{proxy_password}}{% endif %}@{% endif %}{{proxy_http}} 80 | {% endif %} 81 | {% if proxy_https is defined %} 82 | openshift_https_proxy={% if proxy_username is defined %}{{proxy_username}}{% if proxy_password is defined %}:{{proxy_password}}{% endif %}@{% endif %}{{proxy_https}} 83 | {% endif %} 84 | {% if proxy_no is defined %} 85 | openshift_no_proxy='{{proxy_no}}' 86 | {% endif %} 87 | openshift_generate_no_proxy_hosts=true 88 | 89 | # htpasswd auth 90 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 91 | 92 | # Provide local certificate paths which will be deployed to masters 93 | openshift_master_overwrite_named_certificates=true 94 | 95 | # Install the openshift examples 96 | openshift_install_examples=true 97 | openshift_examples_modify_imagestreams=true 98 | 99 | # default subdomain to use for exposed routes 100 | openshift_master_default_subdomain={{ apps_dns | replace("*.","") }} 101 | 102 | {%- if lb is defined and infranodes is defined -%} 103 | {%- set hosts_http=[] -%} 104 | {%- set hosts_https=[] -%} 105 | {% for infra_node in infranodes -%} 106 | {% set str='{"name":"infra' ~ loop.index ~ '","address":"'~ infra_node ~ ':80","opts":"check"}' -%} 107 | {{ hosts_http.append('{"name":"infra' ~ loop.index ~ '","address":"'~ node_ip_map[infra_node] ~ ':80","opts":"check"}') }} 108 | {{ hosts_https.append('{"name":"infra' ~ loop.index ~ '","address":"'~ node_ip_map[infra_node] ~ ':443","opts":"check"}') }} 109 | {%- endfor %} 110 | 111 | openshift_loadbalancer_additional_frontends=[{"name":"atomic-openshift-infra-http","mode":"tcp","options":["tcplog"],"binds":["*:80"],"default_backend":"atomic-openshift-infra-http"},{"name":"atomic-openshift-infra-https","mode":"tcp","options":["tcplog"],"binds":["*:443"],"default_backend":"atomic-openshift-infra-https"}] 112 | 113 | openshift_loadbalancer_additional_backends=[{"name":"atomic-openshift-infra-http","mode":"tcp","option":"tcplog","balance":"roundrobin","servers":[{{ hosts_http | join(',') }}]},{"name":"atomic-openshift-infra-https","mode":"tcp","option":"tcplog","balance":"roundrobin","servers":[{{hosts_https| join(',')}}]}] 114 | 115 | r_openshift_loadbalancer_os_firewall_allow=[{"service":"haproxy stats","port":"9000/tcp"},{"service":"haproxy balance","port":"8443/tcp"},{"service":"haproxy infra http balance","port":"80/tcp"},{"service":"haproxy infra https balance","port":"443/tcp"}] 116 | 117 | {% endif %} 118 | 119 | # Openshift Registry Options 120 | openshift_hosted_registry_storage_kind=glusterfs 121 | openshift_hosted_registry_replicas=1 122 | 123 | #OCS 124 | openshift_storage_glusterfs_namespace=ocs 125 | openshift_storage_glusterfs_name=ocs 126 | openshift_storage_glusterfs_wipe=True 127 | openshift_storage_glusterfs_storageclass=true 128 | openshift_storage_glusterfs_storageclass_default=true 129 | 130 | # Fix for: https://access.redhat.com/solutions/3949971 131 | openshift_storage_glusterfs_image=registry.redhat.io/rhgs3/rhgs-server-rhel7:{{ocs_version_tag}} 132 | openshift_storage_glusterfs_block_image=registry.redhat.io/rhgs3/rhgs-gluster-block-prov-rhel7:{{ocs_version_tag}} 133 | openshift_storage_glusterfs_heketi_image=registry.redhat.io/rhgs3/rhgs-volmanager-rhel7:{{ocs_version_tag}} 134 | 135 | 136 | openshift_storage_glusterfs_block_deploy=True 137 | openshift_storage_glusterfs_block_host_vol_create=true 138 | openshift_storage_glusterfs_block_host_vol_size=50 139 | openshift_storage_glusterfs_block_storageclass=true 140 | 141 | # Metrics deployment 142 | openshift_metrics_install_metrics={{ true if install_metrics == 'y' else false }} 143 | openshift_metrics_hawkular_hostname=metrics.{{ apps_dns | replace("*.","")}} 144 | openshift_metrics_cassandra_replicas=1 145 | openshift_metrics_cassandra_limits_memory=2Gi 146 | openshift_metrics_hawkular_replicas=1 147 | openshift_metrics_duration=5 148 | openshift_metrics_cassandra_pvc_size=5Gi 149 | openshift_metrics_cassandra_storage_type=pv 150 | openshift_metrics_cassandra_pvc_storage_class_name=glusterfs-ocs-block 151 | 152 | # Logging deployment 153 | openshift_logging_install_logging={{ true if install_logging == 'y' else false }} 154 | openshift_logging_kibana_hostname=logging.{{ apps_dns | replace("*.","") }} 155 | openshift_logging_use_ops=false 156 | openshift_logging_public_master_url=https://{{api_dns}}:8443 157 | openshift_logging_curator_default_days=5 158 | openshift_logging_es_pvc_size=5Gi 159 | openshift_logging_es_pvc_dynamic=true 160 | openshift_logging_es_pvc_storage_class_name=glusterfs-ocs-block 161 | openshift_logging_es_memory_limit=8Gi 162 | openshift_logging_kibana_nodeselector={"node-role.kubernetes.io/infra": "true"} 163 | openshift_logging_curator_nodeselector={"node-role.kubernetes.io/infra": "true"} 164 | openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra": "true"} 165 | 166 | # Prometheus 167 | openshift_cluster_monitoring_operator_install=true 168 | openshift_cluster_monitoring_operator_prometheus_storage_enabled=true 169 | openshift_cluster_monitoring_operator_prometheus_storage_class_name=glusterfs-ocs-block 170 | openshift_cluster_monitoring_operator_alertmanager_storage_enabled=true 171 | openshift_cluster_monitoring_operator_alertmanager_storage_class_name=glusterfs-ocs-block 172 | openshift_cluster_monitoring_operator_prometheus_storage_capacity=5Gi 173 | openshift_cluster_monitoring_operator_node_selector={"node-role.kubernetes.io/infra":"true"} 174 | 175 | # Service brokers 176 | 177 | #openshift_service_catalog_image_version=latest 178 | #ansible_service_broker_local_registry_whitelist=['.*-apb$'] 179 | #openshift_template_service_broker_namespaces=['openshift'] 180 | 181 | # Operator Lifecycle Manager 182 | # openshift_enable_olm=true 183 | # openshift_additional_registry_credentials=[{'host':'registry.connect.redhat.com','user':'your_user','password':'your_pwd','test_image':'mongodb/enterprise-operator:0.3.2'}] 184 | 185 | 186 | 187 | 188 | [masters] 189 | {% for master in masters %} 190 | {{master}} 191 | {% endfor %} 192 | 193 | [etcd] 194 | {% for master in masters %} 195 | {{master}} 196 | {% endfor %} 197 | 198 | {% if lb is defined %} 199 | [lb] 200 | {{lb}} 201 | {% endif %} 202 | 203 | {% if cns is defined %} 204 | [glusterfs] 205 | {% for cns_node in cns %} 206 | {{cns_node}} glusterfs_ip={{ cns_hosts[loop.index0] }} glusterfs_devices='["/dev/{{ocs_disk}}"]' 207 | {% endfor %} 208 | {% endif %} 209 | 210 | 211 | [nodes] 212 | {% for master in masters %} 213 | {{master}} openshift_node_group_name='{% if infranodes is defined %}node-config-master{% endif %}{% if infranodes is not defined %}node-config-master-infra{% endif %}' 214 | {% endfor %} 215 | 216 | {% if infranodes is defined %} 217 | {% for infra_node in infranodes %} 218 | {{infra_node}} openshift_node_group_name='node-config-infra' 219 | {% endfor %} 220 | {% endif %} 221 | 222 | {% for node in nodes %} 223 | {{node}} openshift_node_group_name='node-config-compute' 224 | {% endfor %} 225 | 226 | {% if bastion is defined %} 227 | [bastion] 228 | {{bastion}} 229 | {% endif %} 230 | -------------------------------------------------------------------------------- /playbooks/validate.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Validate proxy 3 | hosts: all 4 | gather_facts: yes 5 | tags: proxy 6 | vars: 7 | proxy_uc_envs: 8 | - HTTP_PROXY 9 | - HTTPS_PROXY 10 | proxy_lc_envs: 11 | - http_proxy 12 | - https_proxy 13 | proxies: 14 | - "{{ proxy_http }}" 15 | - "{{ proxy_https }}" 16 | vars_files: 17 | - "{{file_env}}" 18 | - "{{file_secrets}}" 19 | roles: 20 | - check_proxy 21 | 22 | - name: ensure bastion packages. 23 | hosts: bastion 24 | tags: packages 25 | gather_facts: yes 26 | vars_files: 27 | - "{{file_env}}" 28 | roles: 29 | - check_packages_bastion 30 | 31 | - name: ensure nodes packages. 32 | hosts: nodes 33 | tags: packages 34 | gather_facts: no 35 | vars_files: 36 | - "{{file_env}}" 37 | roles: 38 | - check_subscription 39 | - check_packages_nodes 40 | 41 | - name: ensure nodes networking. 42 | hosts: all 43 | tags: networking 44 | gather_facts: no 45 | vars_files: 46 | - "{{file_env}}" 47 | roles: 48 | - check_networking 49 | 50 | 51 | - name: Validate environment 52 | gather_facts: no 53 | hosts: all 54 | tags: validate 55 | vars_files: 56 | - "{{file_env}}" 57 | - "{{file_secrets}}" 58 | tasks: 59 | - import_role: 60 | name: check_hostname 61 | - import_role: 62 | name: check_disks 63 | - import_role: 64 | name: check_os 65 | - import_role: 66 | name: check_connectivity 67 | - import_role: 68 | name: check_sizing 69 | when: inventory_hostname in groups['nodes'] 70 | - import_role: 71 | name: check_dns 72 | - import_role: 73 | name: check_selinux 74 | - import_role: 75 | name: check_ntp 76 | when: ntp_servers is defined 77 | - import_role: 78 | name: check_storage 79 | when: inventory_hostname in groups['nodes'] 80 | - import_role: 81 | name: check_nm 82 | - import_role: 83 | name: check_glusterfs 84 | when: inventory_hostname in groups['glusterfs'] | default([]) 85 | 86 | 87 | - name: Initialize firewall check 88 | hosts: nodes 89 | tags: firewall 90 | gather_facts: no 91 | vars_files: 92 | - "{{file_env}}" 93 | roles: 94 | - check_firewall_initialize 95 | 96 | - name: Execute firewall check 97 | hosts: localhost 98 | tags: firewall 99 | gather_facts: yes 100 | vars_files: 101 | - "{{file_env}}" 102 | roles: 103 | - check_firewall 104 | 105 | - name: Validate docker 106 | hosts: nodes 107 | tags: docker 108 | gather_facts: yes 109 | vars_files: 110 | - "{{file_env}}" 111 | roles: 112 | - check_docker_setup 113 | - check_docker_validation 114 | 115 | - name: Cleanup validation trash 116 | hosts: all 117 | tags: cleanup 118 | gather_facts: yes 119 | vars_files: 120 | - "{{file_env}}" 121 | roles: 122 | - check_cleanup 123 | -------------------------------------------------------------------------------- /playbooks/validate_config.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Collect host information 3 | hosts: localhost 4 | connection: local 5 | gather_facts: no 6 | vars_files: 7 | - "{{file_env}}" 8 | vars_prompt: 9 | - name: sudo_password 10 | prompt: Password for sudo, leave blank if passwordless sudo (will be encypted) 11 | private: yes 12 | tasks: 13 | - name: Cleanup 14 | file: 15 | path: "{{item}}" 16 | state: absent 17 | with_items: 18 | - "{{file_ip_data}}" 19 | - "{{file_inventory}}" 20 | - "{{file_secrets}}" 21 | - name: Ensure secrets file 22 | copy: 23 | dest: "{{file_secrets}}" 24 | content: | 25 | sudo_password: "{{sudo_password}}" 26 | - name: Verify Masters IP addresses 27 | local_action: | 28 | shell ping -c 1 {{item}} | grep "PING"| sed -e 's/[^a-zA-Z0-9.-]/ /g' | awk '{print $3 ": {{item }}" }' >> {{file_ip_data}} 29 | with_items: "{{masters}}" 30 | - name: Verify Infranodes IP addresses 31 | local_action: | 32 | shell ping -c 1 {{item}} | grep "PING"| sed -e 's/[^a-zA-Z0-9.-]/ /g' | awk '{print $3 ": {{item }}" }' >> {{file_ip_data}} 33 | with_items: "{{infranodes | default([])}}" 34 | - name: Verify Nodes IP addresses 35 | local_action: | 36 | shell ping -c 1 {{item}} | grep "PING"| sed -e 's/[^a-zA-Z0-9.-]/ /g' | awk '{print $3 ": {{item }}" }' >> {{file_ip_data}} 37 | with_items: "{{nodes}}" 38 | - name: Verify CNS IP addresses 39 | local_action: | 40 | shell ping -c 1 {{item}} | grep "PING"| sed -e 's/[^a-zA-Z0-9.-]/ /g' | awk '{print $3 }' 41 | with_items: "{{cns | default([])}}" 42 | register: cns_data 43 | - local_action: | 44 | shell cat "{{file_ip_data}}" 45 | register: ip_data 46 | - name: Build node_ip_map 47 | set_fact: 48 | node_ip_map: >- 49 | {% set splited = item.split(':') | map('trim') | list -%} 50 | {{ node_ip_map | default({}) | combine({splited[1]: splited[0]}) }} 51 | with_items: "{{ ip_data.stdout_lines }}" 52 | - name: Print IP addresses 53 | debug: 54 | msg: | 55 | Manager to find following IP adresses: 56 | "{{ip_data.stdout }}" 57 | 58 | - name: register cns hosts 59 | set_fact: 60 | cns_hosts: [] 61 | when: cns is defined 62 | 63 | - name: register cns ip 64 | set_fact: 65 | cns_hosts: "{{cns_hosts}} + ['{{ item.stdout }}']" 66 | with_items: "{{cns_data.results}}" 67 | when: cns is defined 68 | 69 | - name: debug cns hosts 70 | debug: var=cns_hosts 71 | when: cns is defined 72 | 73 | - name: Create Inventory file based on dynamic collected information. 74 | template: 75 | src: "templates/hosts-v{{'%0.2f'| format(ocp_version|float)}}.j2" 76 | dest: "{{file_inventory}}" 77 | -------------------------------------------------------------------------------- /playbooks/vars/bb4.yml: -------------------------------------------------------------------------------- 1 | registry: bastion:5000 2 | registry_packages: 3 | - docker-distribution 4 | - skopeo 5 | - openssl 6 | registry_path: /etc/docker-distribution/registry 7 | registry_conf: config.yml 8 | registry_secret: changeme 9 | proxy_packages: 10 | - squid 11 | proxy_cidr: 192.168.0.0/24 12 | 13 | -------------------------------------------------------------------------------- /playbooks/vars/bb6.yml: -------------------------------------------------------------------------------- 1 | dev_project: dev-project 2 | test_project: test-project 3 | prod_project: prod-project 4 | tag_prod: toprod 5 | tag_test: totest 6 | builder_image: redhat-openjdk18-openshift:1.1 7 | service_name: hello-ocp 8 | source: https://github.com/tahonen/hello-springboot.git 9 | source_branch: master 10 | -------------------------------------------------------------------------------- /setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | OCP_VERSION=3.11 4 | CNS_NODES=3 5 | ANSIBLE_VERSION=2.6 6 | 7 | cat << EOF 8 | ____ _____ ____ 9 | / ___|_ _/ ___| 10 | \___ \ | || | 11 | ___) || || |___ 12 | |____/ |_| \____| 13 | 14 | 15 | EOF 16 | 17 | 18 | echo 19 | echo "Welcome to STC OpenShift Installation Validator" 20 | echo "You find help for these questions here:" 21 | echo " https://github.com/RedHat-EMEA-SSA-Team/stc/blob/master/docs/bb0.adoc#prepare-configuration-file" 22 | 23 | if [ ! -f env.yml ]; then 24 | 25 | echo "Defaults value are shown in []" 26 | echo 27 | 28 | 29 | echo "Please select OCP Version to install: 3.11, 3.10" 30 | echo "[3.11] 3.10" 31 | read ocp_version 32 | 33 | case "$ocp_version" in 34 | 3.11|3.10) OCP_VERSION=$ocp_version 35 | ;; 36 | esac 37 | 38 | 39 | sed -Ei "s/ocp_version: (.*)/ocp_version: \"$OCP_VERSION\"/" playbooks/group_vars/all 40 | 41 | echo "ocp_version: $OCP_VERSION" > env.yml 42 | 43 | echo "*** selected $OCP_VERSION " 44 | echo 45 | 46 | 47 | 48 | while [ -z $api_dns ] 49 | do 50 | echo "Please insert Cluster hostname (API DNS):" 51 | read -r api_dns 52 | done 53 | 54 | echo "api_dns: $api_dns" >> env.yml 55 | 56 | while [ -z $apps_dns ] 57 | do 58 | echo "Please insert Wilcard DNS for Apps:" 59 | read -r apps_dns 60 | done 61 | 62 | echo "apps_dns: $apps_dns" >> env.yml 63 | 64 | 65 | echo 66 | echo "Cluster Topology Setup" 67 | echo 68 | 69 | 70 | 71 | while [ "$flavor" != "standard" -a "$flavor" != "mini" -a "$flavor" != "full" ] 72 | do 73 | echo "Please select STC Flavor" 74 | echo "[standard] mini full" 75 | read -r flavor 76 | if [ -z $flavor ]; then 77 | flavor="standard" 78 | fi 79 | done 80 | 81 | echo 82 | echo "Selected $flavor Flavor" 83 | echo 84 | 85 | while [ -z $bastion ] 86 | do 87 | echo "Please insert Bastion Node hostname:" 88 | read -r bastion 89 | done 90 | 91 | echo "bastion: $bastion" >> env.yml 92 | echo "lb: $bastion" >> env.yml 93 | 94 | case "$flavor" in 95 | standard) 96 | n_masters=3 97 | n_nodes=3 98 | ;; 99 | mini) 100 | n_masters=1 101 | n_infranodes=1 102 | n_nodes=1 103 | ;; 104 | full) 105 | n_masters=3 106 | n_infranodes=3 107 | n_nodes=3 108 | ;; 109 | esac 110 | 111 | echo "masters:" >> env.yml 112 | 113 | for (( c=1; c<=$n_masters; c++ )) 114 | do 115 | while [ -z $master_i ] 116 | do 117 | echo "Please insert Master $c hostname:" 118 | read -r master_i 119 | done 120 | echo "- $master_i" >> env.yml 121 | [[ "$flavor" == "mini" ]] && cns_hosts+=($master_i) 122 | master_i="" 123 | 124 | done 125 | 126 | 127 | 128 | 129 | 130 | if [ -n "$n_infranodes" ]; then 131 | echo "infranodes:" >> env.yml 132 | 133 | for (( c=1; c<=$n_infranodes; c++ )) 134 | do 135 | while [ -z $infranode_i ] 136 | do 137 | echo "Please insert Infranode $c hostname:" 138 | read -r infranode_i 139 | done 140 | echo "- $infranode_i" >> env.yml 141 | cns_hosts+=($infranode_i) 142 | infranode_i="" 143 | done 144 | 145 | fi 146 | 147 | 148 | echo "nodes:" >> env.yml 149 | 150 | 151 | for (( c=1; c<=$n_nodes; c++ )) 152 | do 153 | while [ -z $node_i ] 154 | do 155 | echo "Please insert Node $c hostname:" 156 | read -r node_i 157 | done 158 | echo "- $node_i" >> env.yml 159 | [[ "$flavor" != "full" ]] && cns_hosts+=($node_i) 160 | node_i="" 161 | 162 | done 163 | 164 | echo "cns:" >> env.yml 165 | 166 | for (( c=0; c<$CNS_NODES; c++ )) 167 | do 168 | echo "- ${cns_hosts[$c]}" >> env.yml 169 | done 170 | 171 | 172 | echo "Is there any Proxy to use for OpenShift and Container Runtime?" 173 | echo "y [n]" 174 | read has_proxy 175 | 176 | if [[ $has_proxy == "y" ]]; then 177 | while [ -z $proxy_http ] 178 | do 179 | echo "Please insert HTTP Proxy:" 180 | read -r proxy_http 181 | done 182 | 183 | echo "proxy_http: $proxy_http" >> env.yml 184 | 185 | 186 | while [ -z $proxy_https ] 187 | do 188 | echo "Please insert HTTPS Proxy:" 189 | read -r proxy_https 190 | done 191 | 192 | echo "proxy_https: $proxy_https" >> env.yml 193 | 194 | echo "Please insert No Proxy (leave blank if any, automatically adding localhost,127.0.0.1,.svc)" 195 | read -r proxy_no 196 | 197 | if [ -n "$proxy_no" ]; then 198 | echo "proxy_no: $proxy_no" >> env.yml 199 | fi 200 | 201 | echo "Please insert Proxy Username (leave blank if any)" 202 | read -r proxy_username 203 | 204 | if [ -n "$proxy_username" ]; then 205 | echo "proxy_username: $proxy_username" >> env.yml 206 | fi 207 | 208 | echo "Please insert Proxy Password (leave blank if any)" 209 | read -r proxy_password 210 | 211 | if [ -n "$proxy_password" ]; then 212 | echo "proxy_password: $proxy_password" >> env.yml 213 | fi 214 | 215 | fi 216 | 217 | while [ -z $container_disk ] 218 | do 219 | echo "Please insert host device used container storage. (sdb, vdb...). Using lsblk to get information." 220 | read -r container_disk 221 | done 222 | 223 | echo "container_disk: $container_disk" >> env.yml 224 | 225 | 226 | 227 | while [ -z $ocs_disk ] 228 | do 229 | echo "Please insert host device used for OCS? (sdc, vdc...). Using lsblk to get information." 230 | read -r ocs_disk 231 | done 232 | 233 | echo "ocs_disk: $ocs_disk" >> env.yml 234 | 235 | while [ -z $ssh_user ] 236 | do 237 | echo "Please insert SSH username to be used by Ansible:" 238 | read -r ssh_user 239 | done 240 | 241 | echo "ssh_user: $ssh_user" >> env.yml 242 | 243 | while [ "$install_logging" != "y" -a "$install_logging" != "n" ] 244 | do 245 | echo "Do you want to install Log aggregation (EFK stack)" 246 | echo "[y] n" 247 | read -r install_logging 248 | [[ -z $install_logging ]] && install_logging="y" 249 | done 250 | 251 | echo "install_logging: $install_logging" >> env.yml 252 | 253 | while [ "$install_metrics" != "y" -a "$install_metrics" != "n" ] 254 | do 255 | echo "Do you want to install Metrics (Cassandra-Hawkular stack)" 256 | echo "[y] n" 257 | read -r install_metrics 258 | [[ -z $install_metrics ]] && install_metrics="y" 259 | done 260 | 261 | echo "install_metrics: $install_metrics" >> env.yml 262 | 263 | echo "Do you want to configure NTP servers? (NTP will be installed anyway if not present)" 264 | echo "y [n]" 265 | read ntp 266 | 267 | if [[ $ntp == "y" ]]; then 268 | echo "ntp_servers:" >> env.yml 269 | echo "Please insert number of NTP server to configure, default 1" 270 | read ntp_servers 271 | NTP=1 272 | if [ -n "$ntp_servers" -a "$ntp_servers" -gt 1 -a "$ntp_servers" -lt 6 ]; then 273 | NTP=$ntp_servers 274 | echo "zio $ntp_servers" 275 | fi 276 | 277 | for (( c=1; c<=$NTP; c++ )) 278 | do 279 | while [ -z $ntp_i ] 280 | do 281 | echo "Please insert NTP server n.$c:" 282 | read -r ntp_i 283 | done 284 | echo "- $ntp_i" >> env.yml 285 | ntp_i="" 286 | done 287 | fi 288 | 289 | if [ "$OCP_VERSION" == "3.11" ]; then 290 | 291 | echo "Do you have any Authentication Token for the Red Hat Registry? (this avoid plain text password in inventory)" 292 | echo 293 | echo "Please refers to the Official Documentation on how to do it:" 294 | echo "https://docs.openshift.com/container-platform/3.11/install_config/configuring_red_hat_registry.html#install-config-configuring-red-hat-registry" 295 | echo 296 | echo "[y] n" 297 | read registry_token 298 | 299 | if [ -z "$registry_token" ]; then 300 | echo "Please insert Registry Service Accounts Token Username" 301 | read -r oreg_token_user 302 | echo 303 | echo "registry_token_user: $oreg_token_user" >> env.yml 304 | 305 | echo "Please insert Registry Service Accounts Token" 306 | read -r oreg_token 307 | echo "registry_token: $oreg_token" >> env.yml 308 | fi 309 | fi 310 | 311 | echo 312 | echo "Generated configuration:" 313 | echo 314 | echo '********************* STC Conf file *********************' 315 | cat env.yml 316 | echo '****************** End STC Conf file ********************' 317 | echo 318 | 319 | else 320 | echo 321 | echo "A env.yml file di already present" 322 | echo "These values will be used:" 323 | echo 324 | echo '********************* STC Conf file *********************' 325 | cat env.yml 326 | echo '****************** End STC Conf file ********************' 327 | echo 328 | fi 329 | 330 | while [ "$install" != "y" -a "$install" != "n" ]; 331 | do 332 | echo "Do you want to proceed?" 333 | echo "y n" 334 | read -r install 335 | done 336 | 337 | if [ "$install" == "n" ]; then 338 | 339 | echo "Aborting installation, please restart" 340 | exit 1 341 | fi 342 | 343 | OCP_VERSION=`grep ocp_version env.yml | awk '{print $2;}';` 344 | echo '*** Check enabled repos' 345 | REPOS="Repo ID: rhel-7-server-ansible-2.6-rpms 346 | Repo ID: rhel-7-server-extras-rpms 347 | Repo ID: rhel-7-server-ose-${OCP_VERSION}-rpms 348 | Repo ID: rhel-7-server-rpms" 349 | 350 | if [ "$OCP_VERSION" == "3.10" ]; then 351 | REPOS="Repo ID: rhel-7-server-ansible-2.4-rpms 352 | Repo ID: rhel-7-server-extras-rpms 353 | Repo ID: rhel-7-server-ose-${OCP_VERSION}-rpms 354 | Repo ID: rhel-7-server-rpms" 355 | fi 356 | 357 | echo -e "$REPOS" | sort > /tmp/stc-repos-should-enabled 358 | sudo subscription-manager repos --list-enabled | grep 'Repo ID' | sort > /tmp/stc-repos-enabled 359 | diff -Nuar /tmp/stc-repos-should-enabled /tmp/stc-repos-enabled > /tmp/stc-repo-diff 360 | RC_DIFF=$? 361 | 362 | if [ $RC_DIFF -ne 0 ]; then 363 | echo "Please check repos! Diff:" 364 | cat /tmp/stc-repo-diff 365 | exit $RC_DIFF; 366 | fi 367 | 368 | echo '*** install git and ansible' 369 | sudo yum install -y git ansible tmux nc screen 370 | echo '*** validate given configuration (env.yml)' 371 | ansible-playbook playbooks/validate_config.yml 372 | echo '*** encrypt secrets file' 373 | ansible-vault encrypt secrets.yml 374 | echo '*** enable SSH authentication between hosts' 375 | export ANSIBLE_HOST_KEY_CHECKING=False 376 | ansible-playbook -i inventory -k --ask-vault-pass playbooks/prepare_ssh.yml 377 | echo '*** copy created inventory as default inventory' 378 | sudo cp inventory /etc/ansible/hosts 379 | --------------------------------------------------------------------------------