├── .gitignore ├── Dockerfile ├── LICENSE ├── README.md ├── ansible.cfg ├── inventory └── aws │ └── hosts │ ├── ec2.ini │ └── ec2.py ├── playbooks ├── certificate_repos.yml ├── cleanup.yml ├── cloudformation_setup.yml ├── files │ ├── check_pod_complete.sh │ ├── check_registry_running.sh │ ├── cloudformation.json │ ├── docker-storage-setup.sh │ ├── get_token.sh │ ├── image_stream_version_check.sh │ ├── libdss.sh │ ├── ose3.repo │ ├── router_scale.sh │ └── smoke_project.sh ├── filter_plugins │ ├── oo_filters.py │ └── training_filters.py ├── group_setup.yml ├── host_repos.yml ├── library │ └── redhat_subscription.py ├── lookup_plugins │ └── ec2_zones_by_region.py ├── openshift_setup.yml ├── post_setup.yml ├── projects_setup.yml ├── register_hosts.yml ├── roles ├── subscriptions_and_repos.yml ├── tasks │ ├── cloudformation.yml │ ├── group_setup.yml │ ├── registry_wait.yml │ ├── smoke_projects.yml │ └── validator.yml ├── templates │ ├── aos.repo.j2 │ ├── hexboard_template.json.j2 │ ├── pv.yaml.j2 │ ├── pvc.yaml.j2 │ ├── registry.sh.j2 │ ├── user_data_master.j2 │ └── user_data_node.j2 ├── test.yml ├── test2.yml ├── uninstall.yml └── vars.yml └── run.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | *.swp 3 | *.swo 4 | .ansible 5 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM fedora:23 2 | MAINTAINER RyanJ 3 | 4 | ENV DEMO_ANSIBLE_VERSION=demo-ansible-2.3.0 \ 5 | DEMO_ANSIBLE_REPO=https://github.com/2015-Middleware-Keynote/demo-ansible \ 6 | OPENSHIFT_ANSIBLE_VERSION=openshift-ansible-3.0.94-1-hotfix \ 7 | OPENSHIFT_ANSIBLE_REPO=https://github.com/thoraxe/openshift-ansible.git \ 8 | # ANSIBLE_RPM_URL=https://dl.fedoraproject.org/pub/epel/7/x86_64/a/ansible1.9-1.9.6-2.el7.noarch.rpm \ 9 | ANSIBLE_RPM_URL=https://kojipkgs.fedoraproject.org/packages/ansible/1.9.4/1.el7/noarch/ansible-1.9.4-1.el7.noarch.rpm \ 10 | ANSIBLE_RPM_NAME=ansible \ 11 | HOME=/opt/src 12 | 13 | VOLUME /opt/src/keys 14 | 15 | RUN set -ex && \ 16 | dnf update -y && \ 17 | INSTALL_PKGS="git bzip2 python python-boto python-click pyOpenSSL" && \ 18 | dnf install -y --setopt=tsflags=nodocs $INSTALL_PKGS $ANSIBLE_RPM_URL && \ 19 | rpm -V $INSTALL_PKGS $ANSIBLE_RPM_NAME && \ 20 | git clone $DEMO_ANSIBLE_REPO -b $DEMO_ANSIBLE_VERSION ${HOME}/demo-ansible && \ 21 | git clone $OPENSHIFT_ANSIBLE_REPO -b $OPENSHIFT_ANSIBLE_VERSION ${HOME}/openshift-ansible && \ 22 | dnf clean all -y && \ 23 | rm -rf /usr/share/man /tmp/* 24 | 25 | WORKDIR ${HOME}/demo-ansible 26 | 27 | ENTRYPOINT [ "./run.py" ] 28 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Apache License 2 | Version 2.0, January 2004 3 | http://www.apache.org/licenses/ 4 | 5 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 6 | 7 | 1. Definitions. 8 | 9 | "License" shall mean the terms and conditions for use, reproduction, 10 | and distribution as defined by Sections 1 through 9 of this document. 11 | 12 | "Licensor" shall mean the copyright owner or entity authorized by 13 | the copyright owner that is granting the License. 14 | 15 | "Legal Entity" shall mean the union of the acting entity and all 16 | other entities that control, are controlled by, or are under common 17 | control with that entity. For the purposes of this definition, 18 | "control" means (i) the power, direct or indirect, to cause the 19 | direction or management of such entity, whether by contract or 20 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 21 | outstanding shares, or (iii) beneficial ownership of such entity. 22 | 23 | "You" (or "Your") shall mean an individual or Legal Entity 24 | exercising permissions granted by this License. 25 | 26 | "Source" form shall mean the preferred form for making modifications, 27 | including but not limited to software source code, documentation 28 | source, and configuration files. 29 | 30 | "Object" form shall mean any form resulting from mechanical 31 | transformation or translation of a Source form, including but 32 | not limited to compiled object code, generated documentation, 33 | and conversions to other media types. 34 | 35 | "Work" shall mean the work of authorship, whether in Source or 36 | Object form, made available under the License, as indicated by a 37 | copyright notice that is included in or attached to the work 38 | (an example is provided in the Appendix below). 39 | 40 | "Derivative Works" shall mean any work, whether in Source or Object 41 | form, that is based on (or derived from) the Work and for which the 42 | editorial revisions, annotations, elaborations, or other modifications 43 | represent, as a whole, an original work of authorship. For the purposes 44 | of this License, Derivative Works shall not include works that remain 45 | separable from, or merely link (or bind by name) to the interfaces of, 46 | the Work and Derivative Works thereof. 47 | 48 | "Contribution" shall mean any work of authorship, including 49 | the original version of the Work and any modifications or additions 50 | to that Work or Derivative Works thereof, that is intentionally 51 | submitted to Licensor for inclusion in the Work by the copyright owner 52 | or by an individual or Legal Entity authorized to submit on behalf of 53 | the copyright owner. For the purposes of this definition, "submitted" 54 | means any form of electronic, verbal, or written communication sent 55 | to the Licensor or its representatives, including but not limited to 56 | communication on electronic mailing lists, source code control systems, 57 | and issue tracking systems that are managed by, or on behalf of, the 58 | Licensor for the purpose of discussing and improving the Work, but 59 | excluding communication that is conspicuously marked or otherwise 60 | designated in writing by the copyright owner as "Not a Contribution." 61 | 62 | "Contributor" shall mean Licensor and any individual or Legal Entity 63 | on behalf of whom a Contribution has been received by Licensor and 64 | subsequently incorporated within the Work. 65 | 66 | 2. Grant of Copyright License. Subject to the terms and conditions of 67 | this License, each Contributor hereby grants to You a perpetual, 68 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 69 | copyright license to reproduce, prepare Derivative Works of, 70 | publicly display, publicly perform, sublicense, and distribute the 71 | Work and such Derivative Works in Source or Object form. 72 | 73 | 3. Grant of Patent License. Subject to the terms and conditions of 74 | this License, each Contributor hereby grants to You a perpetual, 75 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 76 | (except as stated in this section) patent license to make, have made, 77 | use, offer to sell, sell, import, and otherwise transfer the Work, 78 | where such license applies only to those patent claims licensable 79 | by such Contributor that are necessarily infringed by their 80 | Contribution(s) alone or by combination of their Contribution(s) 81 | with the Work to which such Contribution(s) was submitted. If You 82 | institute patent litigation against any entity (including a 83 | cross-claim or counterclaim in a lawsuit) alleging that the Work 84 | or a Contribution incorporated within the Work constitutes direct 85 | or contributory patent infringement, then any patent licenses 86 | granted to You under this License for that Work shall terminate 87 | as of the date such litigation is filed. 88 | 89 | 4. Redistribution. You may reproduce and distribute copies of the 90 | Work or Derivative Works thereof in any medium, with or without 91 | modifications, and in Source or Object form, provided that You 92 | meet the following conditions: 93 | 94 | (a) You must give any other recipients of the Work or 95 | Derivative Works a copy of this License; and 96 | 97 | (b) You must cause any modified files to carry prominent notices 98 | stating that You changed the files; and 99 | 100 | (c) You must retain, in the Source form of any Derivative Works 101 | that You distribute, all copyright, patent, trademark, and 102 | attribution notices from the Source form of the Work, 103 | excluding those notices that do not pertain to any part of 104 | the Derivative Works; and 105 | 106 | (d) If the Work includes a "NOTICE" text file as part of its 107 | distribution, then any Derivative Works that You distribute must 108 | include a readable copy of the attribution notices contained 109 | within such NOTICE file, excluding those notices that do not 110 | pertain to any part of the Derivative Works, in at least one 111 | of the following places: within a NOTICE text file distributed 112 | as part of the Derivative Works; within the Source form or 113 | documentation, if provided along with the Derivative Works; or, 114 | within a display generated by the Derivative Works, if and 115 | wherever such third-party notices normally appear. The contents 116 | of the NOTICE file are for informational purposes only and 117 | do not modify the License. You may add Your own attribution 118 | notices within Derivative Works that You distribute, alongside 119 | or as an addendum to the NOTICE text from the Work, provided 120 | that such additional attribution notices cannot be construed 121 | as modifying the License. 122 | 123 | You may add Your own copyright statement to Your modifications and 124 | may provide additional or different license terms and conditions 125 | for use, reproduction, or distribution of Your modifications, or 126 | for any such Derivative Works as a whole, provided Your use, 127 | reproduction, and distribution of the Work otherwise complies with 128 | the conditions stated in this License. 129 | 130 | 5. Submission of Contributions. Unless You explicitly state otherwise, 131 | any Contribution intentionally submitted for inclusion in the Work 132 | by You to the Licensor shall be under the terms and conditions of 133 | this License, without any additional terms or conditions. 134 | Notwithstanding the above, nothing herein shall supersede or modify 135 | the terms of any separate license agreement you may have executed 136 | with Licensor regarding such Contributions. 137 | 138 | 6. Trademarks. This License does not grant permission to use the trade 139 | names, trademarks, service marks, or product names of the Licensor, 140 | except as required for reasonable and customary use in describing the 141 | origin of the Work and reproducing the content of the NOTICE file. 142 | 143 | 7. Disclaimer of Warranty. Unless required by applicable law or 144 | agreed to in writing, Licensor provides the Work (and each 145 | Contributor provides its Contributions) on an "AS IS" BASIS, 146 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 147 | implied, including, without limitation, any warranties or conditions 148 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 149 | PARTICULAR PURPOSE. You are solely responsible for determining the 150 | appropriateness of using or redistributing the Work and assume any 151 | risks associated with Your exercise of permissions under this License. 152 | 153 | 8. Limitation of Liability. In no event and under no legal theory, 154 | whether in tort (including negligence), contract, or otherwise, 155 | unless required by applicable law (such as deliberate and grossly 156 | negligent acts) or agreed to in writing, shall any Contributor be 157 | liable to You for damages, including any direct, indirect, special, 158 | incidental, or consequential damages of any character arising as a 159 | result of this License or out of the use or inability to use the 160 | Work (including but not limited to damages for loss of goodwill, 161 | work stoppage, computer failure or malfunction, or any and all 162 | other commercial damages or losses), even if such Contributor 163 | has been advised of the possibility of such damages. 164 | 165 | 9. Accepting Warranty or Additional Liability. While redistributing 166 | the Work or Derivative Works thereof, You may choose to offer, 167 | and charge a fee for, acceptance of support, warranty, indemnity, 168 | or other liability obligations and/or rights consistent with this 169 | License. However, in accepting such obligations, You may act only 170 | on Your own behalf and on Your sole responsibility, not on behalf 171 | of any other Contributor, and only if You agree to indemnify, 172 | defend, and hold each Contributor harmless for any liability 173 | incurred by, or claims asserted against, such Contributor by reason 174 | of your accepting any such warranty or additional liability. 175 | 176 | END OF TERMS AND CONDITIONS 177 | 178 | APPENDIX: How to apply the Apache License to your work. 179 | 180 | To apply the Apache License to your work, attach the following 181 | boilerplate notice, with the fields enclosed by brackets "{}" 182 | replaced with your own identifying information. (Don't include 183 | the brackets!) The text should be enclosed in the appropriate 184 | comment syntax for the file format. We also recommend that a 185 | file or class name and description of purpose be included on the 186 | same "printed page" as the copyright notice for easier 187 | identification within third-party archives. 188 | 189 | Copyright {yyyy} {name of copyright owner} 190 | 191 | Licensed under the Apache License, Version 2.0 (the "License"); 192 | you may not use this file except in compliance with the License. 193 | You may obtain a copy of the License at 194 | 195 | http://www.apache.org/licenses/LICENSE-2.0 196 | 197 | Unless required by applicable law or agreed to in writing, software 198 | distributed under the License is distributed on an "AS IS" BASIS, 199 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 200 | See the License for the specific language governing permissions and 201 | limitations under the License. 202 | 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # DEPRECATION NOTICE 2 | This repository is no longer maintained. 3 | 4 | # OpenShift 3 Demo Environment Provisioner 5 | These Ansible scripts were originally created to stand up an environment running the demo from the 6 | [2015 JBoss Middleware Keynote](https://www.youtube.com/watch?v=wWNVpFibayA) at Red Hat Summit. 7 | 8 | At this point, this script repository serves two purposes: 9 | 10 | 1. Creating an environment that will pre-build and pre-configure the Hexboard 11 | application for you to be able to conduct a scale-out demo 12 | 13 | 2. Creating an environment suitable for running a workshop with many users 14 | 15 | ## Overview 16 | These scripts stand up an environment running on [Amazon Web 17 | Services](https://aws.amazon.com). They use CloudFormations, EC2, VPC, Route 53, 18 | and IAM services within AWS. They provision several RHEL7-based servers that are 19 | participating in an HA [OpenShift 3](https://openshift.com/enterprise) 20 | environment that has persistent storage for its infrastructure components. 21 | 22 | Additionally, the scripts set up OpenShift's metrics and logging aggregation 23 | services. 24 | 25 | The scripts can create workshop users, too. 26 | 27 | ## Prerequisites 28 | In order to use these scripts, you will need to set a few things up. 29 | 30 | - An AWS IAM account with the following permissions: 31 | - Policies can be defined for Users, Groups or Roles 32 | - Navigate to: AWS Dashboard -> Identity & Access Management -> Select Users or Groups or Roles -> Permissions -> Inline Policies -> Create Policy -> Custom Policy 33 | - Policy Name: openshift (your preference) 34 | - Policy Document: 35 | ``` 36 | { 37 | "Version": "2012-10-17", 38 | "Statement": [ 39 | { 40 | "Sid": "Stmt1459269951000", 41 | "Effect": "Allow", 42 | "Action": [ 43 | "cloudformation:*", 44 | "iam:*", 45 | "route53:*", 46 | "elasticloadbalancing:*", 47 | "ec2:*", 48 | "cloudwatch:*", 49 | "autoscaling:*" 50 | ], 51 | "Resource": [ 52 | "*" 53 | ] 54 | } 55 | ] 56 | } 57 | ``` 58 | Finer-grained permissions are possible, and pull requests are welcome. 59 | 60 | - AWS credentials for the account above must be exported through the 61 | `AWS_ACCESS_KEY_ID` and `AWS_SECRET_ACCESS_KEY` environment variables in 62 | your shell. 63 | - A route53 [public hosted 64 | zone](http://docs.aws.amazon.com/Route53/latest/DeveloperGuide/CreatingHostedZone.html) 65 | is required for the scripts to create the various DNS entries for the 66 | resources it creates. 67 | - An EC2 SSH keypair should be created in advance and you should save the key 68 | file to your system. 69 | - A Red Hat Customer Portal account that has appropriate OpenShift subscriptions 70 | - Red Hat employee subscriptions can be used 71 | - Instead of RHCP and RHSM, you may use certificate-based software repositories. 72 | This feature is designed for Red Hat internal use only. 73 | 74 | ## Software Requirements 75 | ### Packaged Software 76 | - [Python](https://www.python.org) version 2.7.x (3.x untested and may not work) 77 | - [Python Click](https://github.com/mitsuhiko/click) version 4.0 or greater 78 | - [Python Boto](http://docs.pythonboto.org) version 2.38.0 or greater 79 | - [pyOpenSSL](https://github.com/pyca/pyopenssl) version 0.15.1 or greater 80 | - [Ansible](https://github.com/ansible/ansible) **version 1.9.6** 81 | 82 | Ansible [1.9.6 is available for RHEL7/CentOS7/Fedora/RPM via EPEL](https://dl.fedoraproject.org/pub/epel/7/x86_64/a/ansible1.9-1.9.6-2.el7.noarch.rpm) 83 | 84 | Python and the Python dependencies may be installed via your OS' package manager 85 | (eg: python-click on Fedora/CentOS/RHEL) or via 86 | [pip](https://pypi.python.org/pypi/pip). [Python 87 | virtualenv](https://pypi.python.org/pypi/virtualenv) can also work. 88 | 89 | ### GitHub Repositories 90 | While the demo-ansible scripts are contained in a GitHub repository, the rest of 91 | the OpenShift installer lives in a separate repository. You will need both of 92 | them, and very specific versions of each. 93 | 94 | - `demo-ansible` 95 | - [2015-Middleware-Keynote/demo-ansible](https://github.com/2015-Middleware-Keynote/demo-ansible) 96 | - You will want to use `master` until we implement tags on this repository 97 | - You will want to check out tag `demo-ansible-2.4.0` 98 | - `openshift-ansible` 99 | - [thoraxe/openshift-ansible](https://github.com/thoraxe/openshift-ansible) 100 | - You will want to check out tag `openshift-ansible-3.2.13-1-hotfix` 101 | 102 | The folders for these repositories are expected to live in the same 103 | subdirectory. An example tree structure is below: 104 | ``` 105 | /home/user/ansible-scripts 106 | |-- demo-ansible 107 | |-- openshift-ansible 108 | ``` 109 | 110 | In this case, you could do something like the following: 111 | ``` 112 | cd /home/user/ansible-scripts 113 | git clone https://github.com/2015-Middleware-Keynote/demo-ansible.git 114 | cd demo-ansible 115 | git checkout demo-ansible-2.4.0 116 | cd .. 117 | git clone https://github.com/thoraxe/openshift-ansible.git 118 | cd openshift-ansible 119 | git fetch origin :remotes/origin/openshift-ansible-3.2.13-1-hotfix 120 | git checkout openshift-ansible-3.2.13-1-hotfix 121 | ``` 122 | 123 | ## Usage 124 | ### Export the EC2 Credentials 125 | You will need to export your EC2 credentials before attempting to use the 126 | scripts: 127 | ``` 128 | export AWS_ACCESS_KEY_ID=foo 129 | export AWS_SECRET_ACCESS_KEY=bar 130 | ``` 131 | 132 | ### Add the SSH Key to the SSH Agent 133 | If your operating system has an SSH agent and you are not using your default 134 | configured SSH key, you will need to add the private key to your SSH agent: 135 | ``` 136 | ssh-add 137 | ``` 138 | 139 | Note that if you use an SSH config that specifies what keys to use for what 140 | hosts this step may not be necessary. 141 | 142 | ### `run.py` 143 | There is a Python script, run.py, that takes options and calls Ansible to run 144 | the various playbooks. 145 | 146 | #### Defaults 147 | List the options for run.py: 148 | ``` 149 | cd /path/to/demo-ansible 150 | ./run.py --help 151 | ``` 152 | 153 | The options will show you the various defaults. Of special note is the Amazon 154 | EC2 AMI ID as well as the region. Here is a list of the AMIs and region IDs that 155 | should be used: 156 | 157 | | AMI | Amazon Region | 158 | | --- | --- | 159 | | ami-2051294a* | us-east-1* | 160 | | ami-d1315fb1 | us-west-1 | 161 | | ami-775e4f16 | us-west-2 | 162 | | ami-8b8c57f8 | eu-west-1 | 163 | | ami-875042eb | eu-central-1 | 164 | | ami-0dd8f963 | ap-northeast-1 | 165 | | ami-44db152a | ap-northeast-2 | 166 | | ami-3f03c55c | ap-southeast-1 | 167 | | ami-e0c19f83 | ap-southeast-2 | 168 | | ami-27b3094b | sa-east-1 | 169 | * is default 170 | 171 | Most of the defaults are sensible for a small environment. To use them, simply 172 | execute `run.py` by itself: 173 | ``` 174 | ./run.py 175 | ``` 176 | 177 | You will be prompted for a Route 53 zone to place DNS entries into, and a Red 178 | Hat customer portal login and password. It is expected that your SSH keypair 179 | name is "default". 180 | 181 | #### Other Examples 182 | Stand up an environment without being prompted for confirmation and overriding 183 | the cluster id, and keypair: 184 | ``` 185 | ./run.py --no-confirm --cluster-id my_cluster --keypair my_keypair \ 186 | --r53-zone my.hosted.domain --rhsm-user my_redhat_user --rhsm-pass my_redhat_pass 187 | ``` 188 | 189 | ## Access the Environment 190 | If the installation and configuration completes successfully, you will see 191 | something like the following: 192 | ``` 193 | Your cluster provisioned successfully. The console is available at https://openshift..:443 194 | You can SSH into a master using the same SSH key with: ssh -i /path/to/key.pem openshift@openshift-master.. 195 | **After logging into the OpenShift console** you will need to visit https://metrics.. and accept the Hawkular SSL certificate 196 | You can access Kibana at https://kibana.. 197 | ``` 198 | 199 | Be sure to visit the metrics URL to accept the SSL certificate **AFTER** logging 200 | into the OpenShift console. 201 | 202 | ## Cleanup 203 | `run.py` has a `--cleanup` option that can be used to delete all of the 204 | resources it created. You will need to specify all of the same options that you 205 | used to create your environment when you use `--cleanup`. 206 | 207 | ## Troubleshooting 208 | You may see various errors from Ansible during the installation. These are 209 | normal. Unless you see the word `FATAL` or `aborting`, there is nothing to worry 210 | about. 211 | 212 | ### Failed Installation and Configuration 213 | #### Cloudformation 214 | For whatever reason, on occasion the Cloudformation template will fail to 215 | provision correctly. There is no way to recover from this. Go into the AWS 216 | console and find the Cloudformation stack and delete it. Start over. 217 | 218 | #### Docker 219 | There is a known issue where sometimes EC2's underlying storage subsystem is 220 | unstable and `docker-storage-setup` will fail to run correctly because the 221 | underlying LVM setup fails. This will manifest as a `FATAL` Ansible error where 222 | the Docker daemon fails to start. 223 | 224 | While re-running `run.py` with the same options can work, the resulting 225 | environment has some nodes with a non-optimal Docker configuration. 226 | 227 | If you encounter this particular error, it is adviseable to delete everything 228 | and try again. 229 | 230 | #### Other Errors 231 | Generally, Ansible is pretty forgiving about other errors. If you have another 232 | un-listed error, simply execute `run.py` again with the same exact options. 233 | 234 | ### Failed Cleanup 235 | #### Cloudformation 236 | On occasion the Cloudformation stack will fail to delete properly. Simply go 237 | into the AWS console and find the Cloudformation stack and delete it. 238 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | # config file for ansible -- http://ansible.com/ 2 | # ============================================== 3 | [defaults] 4 | #callback_plugins = ../openshift-ansible/ansible-profile/callback_plugins 5 | forks = 50 6 | host_key_checking = False 7 | hostfile = inventory/aws/hosts/ec2.py 8 | remote_user = openshift 9 | gathering = smart 10 | fact_caching = jsonfile 11 | fact_caching_connection = .ansible/cached_facts 12 | fact_caching_timeout = 900 13 | #log_path = /tmp/ansible.log 14 | 15 | [privilege_escalation] 16 | become = True 17 | 18 | [ssh_connection] 19 | ssh_args = -o ControlMaster=auto -o ControlPersist=900s -o GSSAPIAuthentication=no 20 | control_path = /var/tmp/%%h-%%r 21 | #pipelining = True 22 | -------------------------------------------------------------------------------- /inventory/aws/hosts/ec2.ini: -------------------------------------------------------------------------------- 1 | # Ansible EC2 external inventory script settings 2 | # 3 | 4 | [ec2] 5 | 6 | # to talk to a private eucalyptus instance uncomment these lines 7 | # and edit edit eucalyptus_host to be the host name of your cloud controller 8 | #eucalyptus = True 9 | #eucalyptus_host = clc.cloud.domain.org 10 | 11 | # AWS regions to make calls to. Set this to 'all' to make request to all regions 12 | # in AWS and merge the results together. Alternatively, set this to a comma 13 | # separated list of regions. E.g. 'us-east-1,us-west-1,us-west-2' 14 | regions = us-east-1,us-west-1,us-west-2,sa-east-1,eu-west-1,eu-central-1,ap-northeast-1,ap-southeast-1,ap-southeast-1,ap-southeast-2 15 | regions_exclude = us-gov-west-1,cn-north-1 16 | 17 | # When generating inventory, Ansible needs to know how to address a server. 18 | # Each EC2 instance has a lot of variables associated with it. Here is the list: 19 | # http://docs.pythonboto.org/en/latest/ref/ec2.html#module-boto.ec2.instance 20 | # Below are 2 variables that are used as the address of a server: 21 | # - destination_variable 22 | # - vpc_destination_variable 23 | 24 | # This is the normal destination variable to use. If you are running Ansible 25 | # from outside EC2, then 'public_dns_name' makes the most sense. If you are 26 | # running Ansible from within EC2, then perhaps you want to use the internal 27 | # address, and should set this to 'private_dns_name'. 28 | destination_variable = public_dns_name 29 | 30 | # For server inside a VPC, using DNS names may not make sense. When an instance 31 | # has 'subnet_id' set, this variable is used. If the subnet is public, setting 32 | # this to 'ip_address' will return the public IP address. For instances in a 33 | # private subnet, this should be set to 'private_ip_address', and Ansible must 34 | # be run from with EC2. 35 | #vpc_destination_variable = ip_address 36 | vpc_destination_variable = public_dns_name 37 | 38 | # To tag instances on EC2 with the resource records that point to them from 39 | # Route53, uncomment and set 'route53' to True. 40 | route53 = True 41 | 42 | # Additionally, you can specify the list of zones to exclude looking up in 43 | # 'route53_excluded_zones' as a comma-separated list. 44 | # route53_excluded_zones = samplezone1.com, samplezone2.com 45 | 46 | # API calls to EC2 are slow. For this reason, we cache the results of an API 47 | # call. Set this to the path you want cache files to be written to. Two files 48 | # will be written to this directory: 49 | # - ansible-ec2.cache 50 | # - ansible-ec2.index 51 | cache_path = ~/.ansible/tmp 52 | 53 | # The number of seconds a cache file is considered valid. After this many 54 | # seconds, a new API call will be made, and the cache file will be updated. 55 | # To disable the cache, set this value to 0 56 | cache_max_age = 900 57 | 58 | # These two settings allow flexible ansible host naming based on a format 59 | # string and a comma-separated list of ec2 tags. The tags used must be 60 | # present for all instances, or the code will fail. This overrides both 61 | # destination_variable and vpc_destination_variable. 62 | # destination_format = {0}.{1}.rhcloud.com 63 | # destination_format_tags = Name,environment 64 | -------------------------------------------------------------------------------- /inventory/aws/hosts/ec2.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python2 2 | 3 | ''' 4 | EC2 external inventory script 5 | ================================= 6 | 7 | Generates inventory that Ansible can understand by making API request to 8 | AWS EC2 using the Boto library. 9 | 10 | NOTE: This script assumes Ansible is being executed where the environment 11 | variables needed for Boto have already been set: 12 | export AWS_ACCESS_KEY_ID='AK123' 13 | export AWS_SECRET_ACCESS_KEY='abc123' 14 | 15 | This script also assumes there is an ec2.ini file alongside it. To specify a 16 | different path to ec2.ini, define the EC2_INI_PATH environment variable: 17 | 18 | export EC2_INI_PATH=/path/to/my_ec2.ini 19 | 20 | If you're using eucalyptus you need to set the above variables and 21 | you need to define: 22 | 23 | export EC2_URL=http://hostname_of_your_cc:port/services/Eucalyptus 24 | 25 | For more details, see: http://docs.pythonboto.org/en/latest/boto_config_tut.html 26 | 27 | When run against a specific host, this script returns the following variables: 28 | - ec2_ami_launch_index 29 | - ec2_architecture 30 | - ec2_association 31 | - ec2_attachTime 32 | - ec2_attachment 33 | - ec2_attachmentId 34 | - ec2_client_token 35 | - ec2_deleteOnTermination 36 | - ec2_description 37 | - ec2_deviceIndex 38 | - ec2_dns_name 39 | - ec2_eventsSet 40 | - ec2_group_name 41 | - ec2_hypervisor 42 | - ec2_id 43 | - ec2_image_id 44 | - ec2_instanceState 45 | - ec2_instance_type 46 | - ec2_ipOwnerId 47 | - ec2_ip_address 48 | - ec2_item 49 | - ec2_kernel 50 | - ec2_key_name 51 | - ec2_launch_time 52 | - ec2_monitored 53 | - ec2_monitoring 54 | - ec2_networkInterfaceId 55 | - ec2_ownerId 56 | - ec2_persistent 57 | - ec2_placement 58 | - ec2_platform 59 | - ec2_previous_state 60 | - ec2_private_dns_name 61 | - ec2_private_ip_address 62 | - ec2_publicIp 63 | - ec2_public_dns_name 64 | - ec2_ramdisk 65 | - ec2_reason 66 | - ec2_region 67 | - ec2_requester_id 68 | - ec2_root_device_name 69 | - ec2_root_device_type 70 | - ec2_security_group_ids 71 | - ec2_security_group_names 72 | - ec2_shutdown_state 73 | - ec2_sourceDestCheck 74 | - ec2_spot_instance_request_id 75 | - ec2_state 76 | - ec2_state_code 77 | - ec2_state_reason 78 | - ec2_status 79 | - ec2_subnet_id 80 | - ec2_tenancy 81 | - ec2_virtualization_type 82 | - ec2_vpc_id 83 | 84 | These variables are pulled out of a boto.ec2.instance object. There is a lack of 85 | consistency with variable spellings (camelCase and underscores) since this 86 | just loops through all variables the object exposes. It is preferred to use the 87 | ones with underscores when multiple exist. 88 | 89 | In addition, if an instance has AWS Tags associated with it, each tag is a new 90 | variable named: 91 | - ec2_tag_[Key] = [Value] 92 | 93 | Security groups are comma-separated in 'ec2_security_group_ids' and 94 | 'ec2_security_group_names'. 95 | ''' 96 | 97 | # (c) 2012, Peter Sankauskas 98 | # 99 | # This file is part of Ansible, 100 | # 101 | # Ansible is free software: you can redistribute it and/or modify 102 | # it under the terms of the GNU General Public License as published by 103 | # the Free Software Foundation, either version 3 of the License, or 104 | # (at your option) any later version. 105 | # 106 | # Ansible is distributed in the hope that it will be useful, 107 | # but WITHOUT ANY WARRANTY; without even the implied warranty of 108 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 109 | # GNU General Public License for more details. 110 | # 111 | # You should have received a copy of the GNU General Public License 112 | # along with Ansible. If not, see . 113 | 114 | ###################################################################### 115 | 116 | import sys 117 | import os 118 | import argparse 119 | import re 120 | from time import time 121 | import boto 122 | from boto import ec2 123 | from boto import rds 124 | from boto import route53 125 | import ConfigParser 126 | from collections import defaultdict 127 | 128 | try: 129 | import json 130 | except ImportError: 131 | import simplejson as json 132 | 133 | 134 | class Ec2Inventory(object): 135 | def _empty_inventory(self): 136 | return {"_meta" : {"hostvars" : {}}} 137 | 138 | def __init__(self): 139 | ''' Main execution path ''' 140 | 141 | # Inventory grouped by instance IDs, tags, security groups, regions, 142 | # and availability zones 143 | self.inventory = self._empty_inventory() 144 | 145 | # Index of hostname (address) to instance ID 146 | self.index = {} 147 | 148 | # Read settings and parse CLI arguments 149 | self.read_settings() 150 | self.parse_cli_args() 151 | 152 | # Cache 153 | if self.args.refresh_cache: 154 | self.do_api_calls_update_cache() 155 | elif not self.is_cache_valid(): 156 | self.do_api_calls_update_cache() 157 | 158 | # Data to print 159 | if self.args.host: 160 | data_to_print = self.get_host_info() 161 | 162 | elif self.args.list: 163 | # Display list of instances for inventory 164 | if self.inventory == self._empty_inventory(): 165 | data_to_print = self.get_inventory_from_cache() 166 | else: 167 | data_to_print = self.json_format_dict(self.inventory, True) 168 | 169 | print data_to_print 170 | 171 | 172 | def is_cache_valid(self): 173 | ''' Determines if the cache files have expired, or if it is still valid ''' 174 | 175 | if os.path.isfile(self.cache_path_cache): 176 | mod_time = os.path.getmtime(self.cache_path_cache) 177 | current_time = time() 178 | if (mod_time + self.cache_max_age) > current_time: 179 | if os.path.isfile(self.cache_path_index): 180 | return True 181 | 182 | return False 183 | 184 | 185 | def read_settings(self): 186 | ''' Reads the settings from the ec2.ini file ''' 187 | 188 | config = ConfigParser.SafeConfigParser() 189 | ec2_default_ini_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'ec2.ini') 190 | ec2_ini_path = os.environ.get('EC2_INI_PATH', ec2_default_ini_path) 191 | config.read(ec2_ini_path) 192 | 193 | # is eucalyptus? 194 | self.eucalyptus_host = None 195 | self.eucalyptus = False 196 | if config.has_option('ec2', 'eucalyptus'): 197 | self.eucalyptus = config.getboolean('ec2', 'eucalyptus') 198 | if self.eucalyptus and config.has_option('ec2', 'eucalyptus_host'): 199 | self.eucalyptus_host = config.get('ec2', 'eucalyptus_host') 200 | 201 | # Regions 202 | self.regions = [] 203 | configRegions = config.get('ec2', 'regions') 204 | configRegions_exclude = config.get('ec2', 'regions_exclude') 205 | if (configRegions == 'all'): 206 | if self.eucalyptus_host: 207 | self.regions.append(boto.connect_euca(host=self.eucalyptus_host).region.name) 208 | else: 209 | for regionInfo in ec2.regions(): 210 | if regionInfo.name not in configRegions_exclude: 211 | self.regions.append(regionInfo.name) 212 | else: 213 | self.regions = configRegions.split(",") 214 | 215 | # Destination addresses 216 | self.destination_variable = config.get('ec2', 'destination_variable') 217 | self.vpc_destination_variable = config.get('ec2', 'vpc_destination_variable') 218 | 219 | if config.has_option('ec2', 'destination_format') and \ 220 | config.has_option('ec2', 'destination_format_tags'): 221 | self.destination_format = config.get('ec2', 'destination_format') 222 | self.destination_format_tags = config.get('ec2', 'destination_format_tags').split(',') 223 | else: 224 | self.destination_format = None 225 | self.destination_format_tags = None 226 | 227 | # Route53 228 | self.route53_enabled = config.getboolean('ec2', 'route53') 229 | self.route53_excluded_zones = [] 230 | if config.has_option('ec2', 'route53_excluded_zones'): 231 | self.route53_excluded_zones.extend( 232 | config.get('ec2', 'route53_excluded_zones', '').split(',')) 233 | 234 | # Include RDS instances? 235 | self.rds_enabled = True 236 | if config.has_option('ec2', 'rds'): 237 | self.rds_enabled = config.getboolean('ec2', 'rds') 238 | 239 | # Return all EC2 and RDS instances (if RDS is enabled) 240 | if config.has_option('ec2', 'all_instances'): 241 | self.all_instances = config.getboolean('ec2', 'all_instances') 242 | else: 243 | self.all_instances = False 244 | if config.has_option('ec2', 'all_rds_instances') and self.rds_enabled: 245 | self.all_rds_instances = config.getboolean('ec2', 'all_rds_instances') 246 | else: 247 | self.all_rds_instances = False 248 | 249 | # Cache related 250 | cache_dir = os.path.expanduser(config.get('ec2', 'cache_path')) 251 | if not os.path.exists(cache_dir): 252 | os.makedirs(cache_dir) 253 | 254 | self.cache_path_cache = cache_dir + "/ansible-ec2.cache" 255 | self.cache_path_index = cache_dir + "/ansible-ec2.index" 256 | self.cache_max_age = config.getint('ec2', 'cache_max_age') 257 | 258 | # Configure nested groups instead of flat namespace. 259 | if config.has_option('ec2', 'nested_groups'): 260 | self.nested_groups = config.getboolean('ec2', 'nested_groups') 261 | else: 262 | self.nested_groups = False 263 | 264 | # Configure which groups should be created. 265 | group_by_options = [ 266 | 'group_by_instance_id', 267 | 'group_by_region', 268 | 'group_by_availability_zone', 269 | 'group_by_ami_id', 270 | 'group_by_instance_type', 271 | 'group_by_key_pair', 272 | 'group_by_vpc_id', 273 | 'group_by_security_group', 274 | 'group_by_tag_keys', 275 | 'group_by_tag_none', 276 | 'group_by_route53_names', 277 | 'group_by_rds_engine', 278 | 'group_by_rds_parameter_group', 279 | ] 280 | for option in group_by_options: 281 | if config.has_option('ec2', option): 282 | setattr(self, option, config.getboolean('ec2', option)) 283 | else: 284 | setattr(self, option, True) 285 | 286 | # Do we need to just include hosts that match a pattern? 287 | try: 288 | pattern_include = config.get('ec2', 'pattern_include') 289 | if pattern_include and len(pattern_include) > 0: 290 | self.pattern_include = re.compile(pattern_include) 291 | else: 292 | self.pattern_include = None 293 | except ConfigParser.NoOptionError, e: 294 | self.pattern_include = None 295 | 296 | # Do we need to exclude hosts that match a pattern? 297 | try: 298 | pattern_exclude = config.get('ec2', 'pattern_exclude'); 299 | if pattern_exclude and len(pattern_exclude) > 0: 300 | self.pattern_exclude = re.compile(pattern_exclude) 301 | else: 302 | self.pattern_exclude = None 303 | except ConfigParser.NoOptionError, e: 304 | self.pattern_exclude = None 305 | 306 | # Instance filters (see boto and EC2 API docs). Ignore invalid filters. 307 | self.ec2_instance_filters = defaultdict(list) 308 | if config.has_option('ec2', 'instance_filters'): 309 | for instance_filter in config.get('ec2', 'instance_filters', '').split(','): 310 | instance_filter = instance_filter.strip() 311 | if not instance_filter or '=' not in instance_filter: 312 | continue 313 | filter_key, filter_value = [x.strip() for x in instance_filter.split('=', 1)] 314 | if not filter_key: 315 | continue 316 | self.ec2_instance_filters[filter_key].append(filter_value) 317 | 318 | def parse_cli_args(self): 319 | ''' Command line argument processing ''' 320 | 321 | parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on EC2') 322 | parser.add_argument('--list', action='store_true', default=True, 323 | help='List instances (default: True)') 324 | parser.add_argument('--host', action='store', 325 | help='Get all the variables about a specific instance') 326 | parser.add_argument('--refresh-cache', action='store_true', default=False, 327 | help='Force refresh of cache by making API requests to EC2 (default: False - use cache files)') 328 | self.args = parser.parse_args() 329 | 330 | 331 | def do_api_calls_update_cache(self): 332 | ''' Do API calls to each region, and save data in cache files ''' 333 | 334 | if self.route53_enabled: 335 | self.get_route53_records() 336 | 337 | for region in self.regions: 338 | self.get_instances_by_region(region) 339 | if self.rds_enabled: 340 | self.get_rds_instances_by_region(region) 341 | 342 | self.write_to_cache(self.inventory, self.cache_path_cache) 343 | self.write_to_cache(self.index, self.cache_path_index) 344 | 345 | 346 | def get_instances_by_region(self, region): 347 | ''' Makes an AWS EC2 API call to the list of instances in a particular 348 | region ''' 349 | 350 | try: 351 | if self.eucalyptus: 352 | conn = boto.connect_euca(host=self.eucalyptus_host) 353 | conn.APIVersion = '2010-08-31' 354 | else: 355 | conn = ec2.connect_to_region(region) 356 | 357 | # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported 358 | if conn is None: 359 | print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) 360 | sys.exit(1) 361 | 362 | reservations = [] 363 | if self.ec2_instance_filters: 364 | for filter_key, filter_values in self.ec2_instance_filters.iteritems(): 365 | reservations.extend(conn.get_all_instances(filters = { filter_key : filter_values })) 366 | else: 367 | reservations = conn.get_all_instances() 368 | 369 | for reservation in reservations: 370 | for instance in reservation.instances: 371 | self.add_instance(instance, region) 372 | 373 | except boto.exception.BotoServerError, e: 374 | if not self.eucalyptus: 375 | print "Looks like AWS is down again:" 376 | print e 377 | sys.exit(1) 378 | 379 | def get_rds_instances_by_region(self, region): 380 | ''' Makes an AWS API call to the list of RDS instances in a particular 381 | region ''' 382 | 383 | try: 384 | conn = rds.connect_to_region(region) 385 | if conn: 386 | instances = conn.get_all_dbinstances() 387 | for instance in instances: 388 | self.add_rds_instance(instance, region) 389 | except boto.exception.BotoServerError, e: 390 | if not e.reason == "Forbidden": 391 | print "Looks like AWS RDS is down: " 392 | print e 393 | sys.exit(1) 394 | 395 | def get_instance(self, region, instance_id): 396 | ''' Gets details about a specific instance ''' 397 | if self.eucalyptus: 398 | conn = boto.connect_euca(self.eucalyptus_host) 399 | conn.APIVersion = '2010-08-31' 400 | else: 401 | conn = ec2.connect_to_region(region) 402 | 403 | # connect_to_region will fail "silently" by returning None if the region name is wrong or not supported 404 | if conn is None: 405 | print("region name: %s likely not supported, or AWS is down. connection to region failed." % region) 406 | sys.exit(1) 407 | 408 | reservations = conn.get_all_instances([instance_id]) 409 | for reservation in reservations: 410 | for instance in reservation.instances: 411 | return instance 412 | 413 | def add_instance(self, instance, region): 414 | ''' Adds an instance to the inventory and index, as long as it is 415 | addressable ''' 416 | 417 | # Only want running instances unless all_instances is True 418 | if not self.all_instances and instance.state != 'running': 419 | return 420 | 421 | # Select the best destination address 422 | if self.destination_format and self.destination_format_tags: 423 | dest = self.destination_format.format(*[ getattr(instance, 'tags').get(tag, 'nil') for tag in self.destination_format_tags ]) 424 | elif instance.subnet_id: 425 | dest = getattr(instance, self.vpc_destination_variable, None) 426 | if dest is None: 427 | dest = getattr(instance, 'tags').get(self.vpc_destination_variable, None) 428 | else: 429 | dest = getattr(instance, self.destination_variable, None) 430 | if dest is None: 431 | dest = getattr(instance, 'tags').get(self.destination_variable, None) 432 | 433 | if not dest: 434 | # Skip instances we cannot address (e.g. private VPC subnet) 435 | return 436 | 437 | # if we only want to include hosts that match a pattern, skip those that don't 438 | if self.pattern_include and not self.pattern_include.match(dest): 439 | return 440 | 441 | # if we need to exclude hosts that match a pattern, skip those 442 | if self.pattern_exclude and self.pattern_exclude.match(dest): 443 | return 444 | 445 | # Add to index 446 | self.index[dest] = [region, instance.id] 447 | 448 | # Inventory: Group by instance ID (always a group of 1) 449 | if self.group_by_instance_id: 450 | self.inventory[instance.id] = [dest] 451 | if self.nested_groups: 452 | self.push_group(self.inventory, 'instances', instance.id) 453 | 454 | # Inventory: Group by region 455 | if self.group_by_region: 456 | self.push(self.inventory, region, dest) 457 | if self.nested_groups: 458 | self.push_group(self.inventory, 'regions', region) 459 | 460 | # Inventory: Group by availability zone 461 | if self.group_by_availability_zone: 462 | self.push(self.inventory, instance.placement, dest) 463 | if self.nested_groups: 464 | if self.group_by_region: 465 | self.push_group(self.inventory, region, instance.placement) 466 | self.push_group(self.inventory, 'zones', instance.placement) 467 | 468 | # Inventory: Group by Amazon Machine Image (AMI) ID 469 | if self.group_by_ami_id: 470 | ami_id = self.to_safe(instance.image_id) 471 | self.push(self.inventory, ami_id, dest) 472 | if self.nested_groups: 473 | self.push_group(self.inventory, 'images', ami_id) 474 | 475 | # Inventory: Group by instance type 476 | if self.group_by_instance_type: 477 | type_name = self.to_safe('type_' + instance.instance_type) 478 | self.push(self.inventory, type_name, dest) 479 | if self.nested_groups: 480 | self.push_group(self.inventory, 'types', type_name) 481 | 482 | # Inventory: Group by key pair 483 | if self.group_by_key_pair and instance.key_name: 484 | key_name = self.to_safe('key_' + instance.key_name) 485 | self.push(self.inventory, key_name, dest) 486 | if self.nested_groups: 487 | self.push_group(self.inventory, 'keys', key_name) 488 | 489 | # Inventory: Group by VPC 490 | if self.group_by_vpc_id and instance.vpc_id: 491 | vpc_id_name = self.to_safe('vpc_id_' + instance.vpc_id) 492 | self.push(self.inventory, vpc_id_name, dest) 493 | if self.nested_groups: 494 | self.push_group(self.inventory, 'vpcs', vpc_id_name) 495 | 496 | # Inventory: Group by security group 497 | if self.group_by_security_group: 498 | try: 499 | for group in instance.groups: 500 | key = self.to_safe("security_group_" + group.name) 501 | self.push(self.inventory, key, dest) 502 | if self.nested_groups: 503 | self.push_group(self.inventory, 'security_groups', key) 504 | except AttributeError: 505 | print 'Package boto seems a bit older.' 506 | print 'Please upgrade boto >= 2.3.0.' 507 | sys.exit(1) 508 | 509 | # Inventory: Group by tag keys 510 | if self.group_by_tag_keys: 511 | for k, v in instance.tags.iteritems(): 512 | key = self.to_safe("tag_" + k + "=" + v) 513 | self.push(self.inventory, key, dest) 514 | if self.nested_groups: 515 | self.push_group(self.inventory, 'tags', self.to_safe("tag_" + k)) 516 | self.push_group(self.inventory, self.to_safe("tag_" + k), key) 517 | 518 | # Inventory: Group by Route53 domain names if enabled 519 | if self.route53_enabled and self.group_by_route53_names: 520 | route53_names = self.get_instance_route53_names(instance) 521 | for name in route53_names: 522 | self.push(self.inventory, name, dest) 523 | if self.nested_groups: 524 | self.push_group(self.inventory, 'route53', name) 525 | 526 | # Global Tag: instances without tags 527 | if self.group_by_tag_none and len(instance.tags) == 0: 528 | self.push(self.inventory, 'tag_none', dest) 529 | if self.nested_groups: 530 | self.push_group(self.inventory, 'tags', 'tag_none') 531 | 532 | # Global Tag: tag all EC2 instances 533 | self.push(self.inventory, 'ec2', dest) 534 | 535 | self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) 536 | 537 | 538 | def add_rds_instance(self, instance, region): 539 | ''' Adds an RDS instance to the inventory and index, as long as it is 540 | addressable ''' 541 | 542 | # Only want available instances unless all_rds_instances is True 543 | if not self.all_rds_instances and instance.status != 'available': 544 | return 545 | 546 | # Select the best destination address 547 | dest = instance.endpoint[0] 548 | 549 | if not dest: 550 | # Skip instances we cannot address (e.g. private VPC subnet) 551 | return 552 | 553 | # Add to index 554 | self.index[dest] = [region, instance.id] 555 | 556 | # Inventory: Group by instance ID (always a group of 1) 557 | if self.group_by_instance_id: 558 | self.inventory[instance.id] = [dest] 559 | if self.nested_groups: 560 | self.push_group(self.inventory, 'instances', instance.id) 561 | 562 | # Inventory: Group by region 563 | if self.group_by_region: 564 | self.push(self.inventory, region, dest) 565 | if self.nested_groups: 566 | self.push_group(self.inventory, 'regions', region) 567 | 568 | # Inventory: Group by availability zone 569 | if self.group_by_availability_zone: 570 | self.push(self.inventory, instance.availability_zone, dest) 571 | if self.nested_groups: 572 | if self.group_by_region: 573 | self.push_group(self.inventory, region, instance.availability_zone) 574 | self.push_group(self.inventory, 'zones', instance.availability_zone) 575 | 576 | # Inventory: Group by instance type 577 | if self.group_by_instance_type: 578 | type_name = self.to_safe('type_' + instance.instance_class) 579 | self.push(self.inventory, type_name, dest) 580 | if self.nested_groups: 581 | self.push_group(self.inventory, 'types', type_name) 582 | 583 | # Inventory: Group by VPC 584 | if self.group_by_vpc_id and instance.subnet_group and instance.subnet_group.vpc_id: 585 | vpc_id_name = self.to_safe('vpc_id_' + instance.subnet_group.vpc_id) 586 | self.push(self.inventory, vpc_id_name, dest) 587 | if self.nested_groups: 588 | self.push_group(self.inventory, 'vpcs', vpc_id_name) 589 | 590 | # Inventory: Group by security group 591 | if self.group_by_security_group: 592 | try: 593 | if instance.security_group: 594 | key = self.to_safe("security_group_" + instance.security_group.name) 595 | self.push(self.inventory, key, dest) 596 | if self.nested_groups: 597 | self.push_group(self.inventory, 'security_groups', key) 598 | 599 | except AttributeError: 600 | print 'Package boto seems a bit older.' 601 | print 'Please upgrade boto >= 2.3.0.' 602 | sys.exit(1) 603 | 604 | # Inventory: Group by engine 605 | if self.group_by_rds_engine: 606 | self.push(self.inventory, self.to_safe("rds_" + instance.engine), dest) 607 | if self.nested_groups: 608 | self.push_group(self.inventory, 'rds_engines', self.to_safe("rds_" + instance.engine)) 609 | 610 | # Inventory: Group by parameter group 611 | if self.group_by_rds_parameter_group: 612 | self.push(self.inventory, self.to_safe("rds_parameter_group_" + instance.parameter_group.name), dest) 613 | if self.nested_groups: 614 | self.push_group(self.inventory, 'rds_parameter_groups', self.to_safe("rds_parameter_group_" + instance.parameter_group.name)) 615 | 616 | # Global Tag: all RDS instances 617 | self.push(self.inventory, 'rds', dest) 618 | 619 | self.inventory["_meta"]["hostvars"][dest] = self.get_host_info_dict_from_instance(instance) 620 | 621 | 622 | def get_route53_records(self): 623 | ''' Get and store the map of resource records to domain names that 624 | point to them. ''' 625 | 626 | r53_conn = route53.Route53Connection() 627 | all_zones = r53_conn.get_zones() 628 | 629 | route53_zones = [ zone for zone in all_zones if zone.name[:-1] 630 | not in self.route53_excluded_zones ] 631 | 632 | self.route53_records = {} 633 | 634 | for zone in route53_zones: 635 | rrsets = r53_conn.get_all_rrsets(zone.id) 636 | 637 | for record_set in rrsets: 638 | record_name = record_set.name 639 | 640 | if record_name.endswith('.'): 641 | record_name = record_name[:-1] 642 | 643 | for resource in record_set.resource_records: 644 | self.route53_records.setdefault(resource, set()) 645 | self.route53_records[resource].add(record_name) 646 | 647 | 648 | def get_instance_route53_names(self, instance): 649 | ''' Check if an instance is referenced in the records we have from 650 | Route53. If it is, return the list of domain names pointing to said 651 | instance. If nothing points to it, return an empty list. ''' 652 | 653 | instance_attributes = [ 'public_dns_name', 'private_dns_name', 654 | 'ip_address', 'private_ip_address' ] 655 | 656 | name_list = set() 657 | 658 | for attrib in instance_attributes: 659 | try: 660 | value = getattr(instance, attrib) 661 | except AttributeError: 662 | continue 663 | 664 | if value in self.route53_records: 665 | name_list.update(self.route53_records[value]) 666 | 667 | return list(name_list) 668 | 669 | 670 | def get_host_info_dict_from_instance(self, instance): 671 | instance_vars = {} 672 | for key in vars(instance): 673 | value = getattr(instance, key) 674 | key = self.to_safe('ec2_' + key) 675 | 676 | # Handle complex types 677 | # state/previous_state changed to properties in boto in https://github.com/boto/boto/commit/a23c379837f698212252720d2af8dec0325c9518 678 | if key == 'ec2__state': 679 | instance_vars['ec2_state'] = instance.state or '' 680 | instance_vars['ec2_state_code'] = instance.state_code 681 | elif key == 'ec2__previous_state': 682 | instance_vars['ec2_previous_state'] = instance.previous_state or '' 683 | instance_vars['ec2_previous_state_code'] = instance.previous_state_code 684 | elif type(value) in [int, bool]: 685 | instance_vars[key] = value 686 | elif type(value) in [str, unicode]: 687 | instance_vars[key] = value.strip() 688 | elif type(value) == type(None): 689 | instance_vars[key] = '' 690 | elif key == 'ec2_region': 691 | instance_vars[key] = value.name 692 | elif key == 'ec2__placement': 693 | instance_vars['ec2_placement'] = value.zone 694 | elif key == 'ec2_tags': 695 | for k, v in value.iteritems(): 696 | key = self.to_safe('ec2_tag_' + k) 697 | instance_vars[key] = v 698 | elif key == 'ec2_groups': 699 | group_ids = [] 700 | group_names = [] 701 | for group in value: 702 | group_ids.append(group.id) 703 | group_names.append(group.name) 704 | instance_vars["ec2_security_group_ids"] = ','.join([str(i) for i in group_ids]) 705 | instance_vars["ec2_security_group_names"] = ','.join([str(i) for i in group_names]) 706 | else: 707 | pass 708 | # TODO Product codes if someone finds them useful 709 | #print key 710 | #print type(value) 711 | #print value 712 | 713 | if self.route53_enabled: 714 | instance_vars["ec2_route53_names"] = self.get_instance_route53_names(instance) 715 | 716 | return instance_vars 717 | 718 | def get_host_info(self): 719 | ''' Get variables about a specific host ''' 720 | 721 | if len(self.index) == 0: 722 | # Need to load index from cache 723 | self.load_index_from_cache() 724 | 725 | if not self.args.host in self.index: 726 | # try updating the cache 727 | self.do_api_calls_update_cache() 728 | if not self.args.host in self.index: 729 | # host might not exist anymore 730 | return self.json_format_dict({}, True) 731 | 732 | (region, instance_id) = self.index[self.args.host] 733 | 734 | instance = self.get_instance(region, instance_id) 735 | return self.json_format_dict(self.get_host_info_dict_from_instance(instance), True) 736 | 737 | def push(self, my_dict, key, element): 738 | ''' Push an element onto an array that may not have been defined in 739 | the dict ''' 740 | group_info = my_dict.setdefault(key, []) 741 | if isinstance(group_info, dict): 742 | host_list = group_info.setdefault('hosts', []) 743 | host_list.append(element) 744 | else: 745 | group_info.append(element) 746 | 747 | def push_group(self, my_dict, key, element): 748 | ''' Push a group as a child of another group. ''' 749 | parent_group = my_dict.setdefault(key, {}) 750 | if not isinstance(parent_group, dict): 751 | parent_group = my_dict[key] = {'hosts': parent_group} 752 | child_groups = parent_group.setdefault('children', []) 753 | if element not in child_groups: 754 | child_groups.append(element) 755 | 756 | def get_inventory_from_cache(self): 757 | ''' Reads the inventory from the cache file and returns it as a JSON 758 | object ''' 759 | 760 | cache = open(self.cache_path_cache, 'r') 761 | json_inventory = cache.read() 762 | return json_inventory 763 | 764 | 765 | def load_index_from_cache(self): 766 | ''' Reads the index from the cache file sets self.index ''' 767 | 768 | cache = open(self.cache_path_index, 'r') 769 | json_index = cache.read() 770 | self.index = json.loads(json_index) 771 | 772 | 773 | def write_to_cache(self, data, filename): 774 | ''' Writes data in JSON format to a file ''' 775 | 776 | json_data = self.json_format_dict(data, True) 777 | cache = open(filename, 'w') 778 | cache.write(json_data) 779 | cache.close() 780 | 781 | 782 | def to_safe(self, word): 783 | ''' Converts 'bad' characters in a string to underscores so they can be 784 | used as Ansible groups ''' 785 | 786 | return re.sub("[^A-Za-z0-9\-]", "_", word) 787 | 788 | 789 | def json_format_dict(self, data, pretty=False): 790 | ''' Converts a dict to a JSON object and dumps it as a formatted 791 | string ''' 792 | 793 | if pretty: 794 | return json.dumps(data, sort_keys=True, indent=2) 795 | else: 796 | return json.dumps(data) 797 | 798 | 799 | # Run the script 800 | Ec2Inventory() 801 | 802 | -------------------------------------------------------------------------------- /playbooks/certificate_repos.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - include: cloudformation_setup.yml 4 | 5 | - include: group_setup.yml 6 | 7 | - name: Certificate repository configuration 8 | hosts: cluster_hosts 9 | gather_facts: yes 10 | tasks: 11 | - include: ../../aos-ansible/playbooks/roles/ops_mirror_bootstrap/tasks/main.yml 12 | vars: 13 | omb_aos_repo: "{{ aos_repo }}" 14 | when: use_certificate_repos | bool 15 | 16 | - include: ../../aos-ansible/playbooks/roles/qe_registry_bootstrap/tasks/main.yml 17 | vars: 18 | omb_aos_repo: "{{ aos_repo }}" 19 | qe_openshift_kerberos_user: "{{ kerberos_user }}" 20 | qe_openshift_token: "{{ kerberos_token }}" 21 | when: use_certificate_repos | bool and prerelease | bool 22 | 23 | 24 | 25 | -------------------------------------------------------------------------------- /playbooks/cleanup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # to get the storage volume info and set up groups in case this playbook is run by itself 3 | - include: cloudformation_setup.yml 4 | - include: group_setup.yml 5 | 6 | - name: Unregister host(s) 7 | hosts: cluster_hosts 8 | serial: 1 9 | tasks: 10 | - name: Unregister host 11 | redhat_subscription: 12 | username: "{{ rhsm_user }}" 13 | password: "{{ rhsm_pass }}" 14 | state: absent 15 | when: not (skip_subscription_management | bool) 16 | 17 | # We have to clean out the things using volumes before we can 18 | # delete the volumes 19 | - name: Clean the OpenShift environment 20 | hosts: project_master 21 | gather_facts: no 22 | vars_files: 23 | - vars.yml 24 | vars: 25 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 26 | tasks: 27 | - name: Change the oc context 28 | command: "oc config use-context {{ default_context }}" 29 | 30 | - name: Switch to default project 31 | command: oc project default 32 | 33 | # delete the docker registry and then the PV/PVC 34 | - name: Remove docker registry 35 | command: oc delete all -l docker-registry=default 36 | ignore_errors: true 37 | 38 | - name: Remove registry pv and pvc 39 | command: oc delete pvc/registry-pvc pv/registry-pv 40 | ignore_errors: true 41 | 42 | - name: Remove the logging project 43 | command: oc delete project logging 44 | ignore_errors: true 45 | 46 | - name: Remove the logging pv 47 | command: oc delete pv/logging-pv 48 | ignore_errors: true 49 | 50 | - name: Remove everything in the openshift-infra project 51 | command: oc delete all --all -n openshift-infra 52 | ignore_errors: true 53 | 54 | # I don't know of a good way to determine when the volumes are 55 | # un-mounted, so instead we just sleep for 60 seconds 56 | - name: Pause to wait for volumes to be unmounted 57 | pause: 58 | seconds: 90 59 | 60 | - name: Delete the EC2 items 61 | hosts: localhost 62 | connection: local 63 | become: no 64 | gather_facts: no 65 | vars_files: 66 | - vars.yml 67 | tasks: 68 | - name: Delete the Route53 entry for master 69 | route53: 70 | command: delete 71 | zone: "{{ r53_zone }}" 72 | record: "openshift-master.{{ r53_host_zone }}" 73 | ttl: 60 74 | type: A 75 | value: "{{ hostvars[groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_master'].0]['ec2_ip_address'] }}" 76 | overwrite: yes 77 | ignore_errors: true 78 | 79 | - name: Detach the registry, metrics, and logging volumes 80 | ec2_vol: 81 | id: "{{ item }}" 82 | instance: None 83 | region: "{{ ec2_region }}" 84 | with_items: 85 | - "{{ hostvars['localhost']['registry_volume'] }}" 86 | - "{{ hostvars['localhost']['metrics_volume'] }}" 87 | - "{{ hostvars['localhost']['logging_volume'] }}" 88 | ignore_errors: true 89 | 90 | - name: Delete the registry, metrics, and logging volumes 91 | ec2_vol: 92 | id: "{{ item }}" 93 | state: absent 94 | region: "{{ ec2_region }}" 95 | with_items: 96 | - "{{ hostvars['localhost']['registry_volume'] }}" 97 | - "{{ hostvars['localhost']['metrics_volume'] }}" 98 | - "{{ hostvars['localhost']['logging_volume'] }}" 99 | ignore_errors: true 100 | 101 | - name: Destroy the CloudFormation Stack 102 | cloudformation: 103 | region: "{{ ec2_region }}" 104 | stack_name: openshift-demo-{{ cluster_id }} 105 | state: absent 106 | template: files/cloudformation.json 107 | -------------------------------------------------------------------------------- /playbooks/cloudformation_setup.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - name: 'Bootstrapping or Refreshing Environment' 4 | hosts: localhost 5 | connection: local 6 | sudo: no 7 | gather_facts: no 8 | vars_files: 9 | - vars.yml 10 | vars: 11 | vpc_subnet_azs: "{{ lookup('ec2_zones_by_region', ec2_region) }}" 12 | #vpc_subnet_count: "{{ vpc_subnet_azs | oo_split | length }}" 13 | vpc_subnet_count: 1 14 | tasks: 15 | - include: tasks/validator.yml 16 | - include: tasks/cloudformation.yml 17 | 18 | -------------------------------------------------------------------------------- /playbooks/files/check_pod_complete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit on any error 4 | set -e 5 | 6 | oc get pod -o go-template='{{ range .items }}{{ .metadata.name }} {{ range .status.conditions }}{{ .reason }}{{ end}} {{ "\n" }}{{ end }}' | grep $1 | grep "Complete" 7 | -------------------------------------------------------------------------------- /playbooks/files/check_registry_running.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # check that registry deployment is nonzero 6 | DEPLOYMENT_VERSION=$(oc get dc/docker-registry -o go-template='{{.status.latestVersion}}') 7 | 8 | if [ $DEPLOYMENT_VERSION -ne "0" ] 9 | then 10 | # find docker registry pod 11 | POD_NAME=$(oc get pod -o go-template='{{ range .items }}{{ .metadata.name }}{{"\n"}}{{ end }}' | grep docker-registry-$DEPLOYMENT_VERSION) 12 | 13 | # check if running 14 | STATUS=$(oc get pod $POD_NAME -o go-template='{{ .status.phase }}') 15 | 16 | [ $STATUS == "Running" ] 17 | fi 18 | -------------------------------------------------------------------------------- /playbooks/files/docker-storage-setup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | #-- 4 | # Copyright 2014-2015 Red Hat, Inc. 5 | # 6 | # Licensed under the Apache License, Version 2.0 (the "License"); 7 | # you may not use this file except in compliance with the License. 8 | # You may obtain a copy of the License at 9 | # 10 | # http://www.apache.org/licenses/LICENSE-2.0 11 | # 12 | # Unless required by applicable law or agreed to in writing, software 13 | # distributed under the License is distributed on an "AS IS" BASIS, 14 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 | # See the License for the specific language governing permissions and 16 | # limitations under the License. 17 | #++ 18 | 19 | # Purpose: This script grows the root filesystem and sets up LVM volumes 20 | # for docker metadata and data. 21 | # Author: Andy Grimm 22 | 23 | set -e 24 | 25 | # This section reads the config file /etc/sysconfig/docker-storage-setup 26 | # Currently supported options: 27 | # DEVS: A quoted, space-separated list of devices to be used. This currently 28 | # expects the devices to be unpartitioned drives. If "VG" is not 29 | # specified, then use of the root disk's extra space is implied. 30 | # 31 | # VG: The volume group to use for docker storage. Defaults to the volume 32 | # group where the root filesystem resides. If VG is specified and the 33 | # volume group does not exist, it will be created (which requires that 34 | # "DEVS" be nonempty, since we don't currently support putting a second 35 | # partition on the root disk). 36 | # 37 | # The options below should be specified as values acceptable to 'lvextend -L': 38 | # 39 | # ROOT_SIZE: The size to which the root filesystem should be grown. 40 | # 41 | # DATA_SIZE: The desired size for the docker data LV. Defaults to using all 42 | # free space in the VG after the root LV and docker metadata LV 43 | # have been allocated/grown. 44 | # 45 | # Other possibilities: 46 | # * Support lvm raid setups for docker data? This would not be very difficult 47 | # if given multiple PVs and another variable; options could be just a simple 48 | # "mirror" or "stripe", or something more detailed. 49 | 50 | # In lvm thin pool, effectively data LV is named as pool LV. lvconvert 51 | # takes the data lv name and uses it as pool lv name. And later even to 52 | # resize the data lv, one has to use pool lv name. So name data lv 53 | # appropriately. 54 | # 55 | # Note: lvm2 version should be same or higher than lvm2-2.02.112 for lvm 56 | # thin pool functionality to work properly. 57 | POOL_LV_NAME="docker-pool" 58 | DATA_LV_NAME=$POOL_LV_NAME 59 | META_LV_NAME="${POOL_LV_NAME}meta" 60 | 61 | DOCKER_STORAGE="/etc/sysconfig/docker-storage" 62 | STORAGE_DRIVERS="devicemapper overlay" 63 | 64 | DOCKER_METADATA_DIR="/var/lib/docker" 65 | 66 | # Will have currently configured storage options in $DOCKER_STORAGE 67 | CURRENT_STORAGE_OPTIONS="" 68 | 69 | get_docker_version() { 70 | local version 71 | 72 | # docker version command exits with error as daemon is not running at this 73 | # point of time. So continue despite the error. 74 | version=`docker version --format='{{.Client.Version}}' 2>/dev/null` || true 75 | echo $version 76 | } 77 | 78 | get_deferred_removal_string() { 79 | local version major minor 80 | 81 | if ! version=$(get_docker_version);then 82 | return 0 83 | fi 84 | [ -z "$version" ] && return 0 85 | 86 | major=$(echo $version | cut -d "." -f1) 87 | minor=$(echo $version | cut -d "." -f2) 88 | [ -z "$major" ] && return 0 89 | [ -z "$minor" ] && return 0 90 | 91 | # docker 1.7 onwards supports deferred device removal. Enable it. 92 | if [ $major -gt 1 ] || ([ $major -eq 1 ] && [ $minor -ge 7 ]);then 93 | echo "--storage-opt dm.use_deferred_removal=true" 94 | fi 95 | } 96 | 97 | get_deferred_deletion_string() { 98 | local version major minor 99 | 100 | if ! version=$(get_docker_version);then 101 | return 0 102 | fi 103 | [ -z "$version" ] && return 0 104 | 105 | major=$(echo $version | cut -d "." -f1) 106 | minor=$(echo $version | cut -d "." -f2) 107 | [ -z "$major" ] && return 0 108 | [ -z "$minor" ] && return 0 109 | 110 | # docker 1.9 onwards supports deferred device removal. Enable it. 111 | if [ $major -gt 1 ] || ([ $major -eq 1 ] && [ $minor -ge 9 ]);then 112 | echo "--storage-opt dm.use_deferred_deletion=true" 113 | fi 114 | } 115 | 116 | extra_options_has_dm_fs() { 117 | local option 118 | for option in ${EXTRA_DOCKER_STORAGE_OPTIONS}; do 119 | if grep -q "dm.fs=" <<< $option; then 120 | return 0 121 | fi 122 | done 123 | return 1 124 | } 125 | 126 | # Wait for a device for certain time interval. If device is found 0 is 127 | # returned otherwise 1. 128 | wait_for_dev() { 129 | local devpath=$1 130 | local timeout=$DEVICE_WAIT_TIMEOUT 131 | 132 | if [ -z "$DEVICE_WAIT_TIMEOUT" ] || [ "$DEVICE_WAIT_TIMEOUT" == "0" ];then 133 | return 0 134 | fi 135 | 136 | [ -b "$devpath" ] && return 0 137 | 138 | while [ $timeout -gt 0 ]; do 139 | Info "Waiting for device $devpath to be available. Wait time remaining is $timeout seconds" 140 | if [ $timeout -le 5 ];then 141 | sleep $timeout 142 | else 143 | sleep 5 144 | fi 145 | timeout=$((timeout-5)) 146 | [ -b "$devpath" ] && return 0 147 | done 148 | 149 | Info "Timed out waiting for device $devpath" 150 | return 1 151 | } 152 | 153 | get_devicemapper_config_options() { 154 | local storage_options 155 | local dm_fs="--storage-opt dm.fs=xfs" 156 | 157 | # docker expects device mapper device and not lvm device. Do the conversion. 158 | eval $( lvs --nameprefixes --noheadings -o lv_name,kernel_major,kernel_minor $VG | while read line; do 159 | eval $line 160 | if [ "$LVM2_LV_NAME" = "$DATA_LV_NAME" ]; then 161 | echo POOL_DEVICE_PATH=/dev/mapper/$( cat /sys/dev/block/${LVM2_LV_KERNEL_MAJOR}:${LVM2_LV_KERNEL_MINOR}/dm/name ) 162 | fi 163 | done ) 164 | 165 | if extra_options_has_dm_fs; then 166 | # dm.fs option defined in ${EXTRA_DOCKER_STORAGE_OPTIONS} 167 | dm_fs="" 168 | fi 169 | 170 | storage_options="DOCKER_STORAGE_OPTIONS=\"--storage-driver devicemapper ${dm_fs} --storage-opt dm.thinpooldev=$POOL_DEVICE_PATH $(get_deferred_removal_string) $(get_deferred_deletion_string) ${EXTRA_DOCKER_STORAGE_OPTIONS}\"" 171 | echo $storage_options 172 | } 173 | 174 | get_overlay_config_options() { 175 | echo "DOCKER_STORAGE_OPTIONS=\"--storage-driver overlay ${EXTRA_DOCKER_STORAGE_OPTIONS}\"" 176 | } 177 | 178 | write_storage_config_file () { 179 | local storage_options 180 | 181 | if [ "$STORAGE_DRIVER" == "devicemapper" ]; then 182 | if ! storage_options=$(get_devicemapper_config_options); then 183 | return 1 184 | fi 185 | elif [ "$STORAGE_DRIVER" == "overlay" ];then 186 | if ! storage_options=$(get_overlay_config_options); then 187 | return 1 188 | fi 189 | fi 190 | 191 | cat < $DOCKER_STORAGE.tmp 192 | $storage_options 193 | EOF 194 | 195 | mv $DOCKER_STORAGE.tmp $DOCKER_STORAGE 196 | } 197 | 198 | create_metadata_lv() { 199 | # If metadata lvm already exists (failures from previous run), then 200 | # don't create it. 201 | # TODO: Modify script to cleanup meta and data lvs if failure happened 202 | # later. Don't exit with error leaving partially created lvs behind. 203 | 204 | if lvs -a $VG/${META_LV_NAME} --noheadings &>/dev/null; then 205 | Info "Metadata volume $META_LV_NAME already exists. Not creating a new one." 206 | return 0 207 | fi 208 | 209 | # Reserve 0.1% of the free space in the VG for docker metadata. 210 | # Calculating the based on actual data size might be better, but is 211 | # more difficult do to the range of possible inputs. 212 | VG_SIZE=$( vgs --noheadings --nosuffix --units s -o vg_size $VG ) 213 | META_SIZE=$(( $VG_SIZE / 1000 + 1 )) 214 | if [ ! -n "$META_LV_SIZE" ]; then 215 | lvcreate -y -L ${META_SIZE}s -n $META_LV_NAME $VG 216 | fi 217 | } 218 | 219 | convert_size_in_bytes() { 220 | local size=$1 prefix suffix 221 | 222 | # if it is all numeric, it is valid as by default it will be MiB. 223 | if [[ $size =~ ^[[:digit:]]+$ ]]; then 224 | echo $(($size*1024*1024)) 225 | return 0 226 | fi 227 | 228 | # supprt G, G[bB] or Gi[bB] inputs. 229 | prefix=${size%[bBsSkKmMgGtTpPeE]i[bB]} 230 | prefix=${prefix%[bBsSkKmMgGtTpPeE][bB]} 231 | prefix=${prefix%[bBsSkKmMgGtTpPeE]} 232 | 233 | # if prefix is not all numeric now, it is an error. 234 | if ! [[ $prefix =~ ^[[:digit:]]+$ ]]; then 235 | return 1 236 | fi 237 | 238 | suffix=${data_size#$prefix} 239 | 240 | case $suffix in 241 | b*|B*) echo $prefix;; 242 | s*|S*) echo $(($prefix*512));; 243 | k*|K*) echo $(($prefix*2**10));; 244 | m*|M*) echo $(($prefix*2**20));; 245 | g*|G*) echo $(($prefix*2**30));; 246 | t*|T*) echo $(($prefix*2**40));; 247 | p*|P*) echo $(($prefix*2**50));; 248 | e*|E*) echo $(($prefix*2**60));; 249 | *) return 1;; 250 | esac 251 | } 252 | 253 | data_size_in_bytes() { 254 | local data_size=$1 255 | local bytes vg_size free_space percent 256 | 257 | # -L compatible syntax 258 | if [[ $DATA_SIZE != *%* ]]; then 259 | bytes=`convert_size_in_bytes $data_size` 260 | [ $? -ne 0 ] && return 1 261 | # If integer overflow took place, value is too large to handle. 262 | if [ $bytes -lt 0 ];then 263 | Error "DATA_SIZE=$data_size is too large to handle." 264 | return 1 265 | fi 266 | echo $bytes 267 | return 0 268 | fi 269 | 270 | if [[ $DATA_SIZE == *%FREE ]];then 271 | free_space=$(vgs --noheadings --nosuffix --units b -o vg_free $VG) 272 | percent=${DATA_SIZE%\%FREE} 273 | echo $((percent*free_space/100)) 274 | return 0 275 | fi 276 | 277 | if [[ $DATA_SIZE == *%VG ]];then 278 | vg_size=$(vgs --noheadings --nosuffix --units b -o vg_size $VG) 279 | percent=${DATA_SIZE%\%VG} 280 | echo $((percent*vg_size/100)) 281 | fi 282 | return 0 283 | } 284 | 285 | check_min_data_size_condition() { 286 | local min_data_size_bytes data_size_bytes free_space 287 | 288 | [ -z $MIN_DATA_SIZE ] && return 0 289 | 290 | if ! check_numeric_size_syntax $MIN_DATA_SIZE; then 291 | Fatal "MIN_DATA_SIZE value $MIN_DATA_SIZE is invalid." 292 | fi 293 | 294 | if ! min_data_size_bytes=$(convert_size_in_bytes $MIN_DATA_SIZE);then 295 | Fatal "Failed to convert MIN_DATA_SIZE to bytes" 296 | fi 297 | 298 | # If integer overflow took place, value is too large to handle. 299 | if [ $min_data_size_bytes -lt 0 ];then 300 | Fatal "MIN_DATA_SIZE=$MIN_DATA_SIZE is too large to handle." 301 | fi 302 | 303 | free_space=$(vgs --noheadings --nosuffix --units b -o vg_free $VG) 304 | 305 | if [ $free_space -lt $min_data_size_bytes ];then 306 | Fatal "There is not enough free space in volume group $VG to create data volume of size MIN_DATA_SIZE=${MIN_DATA_SIZE}." 307 | fi 308 | 309 | if ! data_size_bytes=$(data_size_in_bytes $DATA_SIZE);then 310 | Fatal "Failed to convert desired data size to bytes" 311 | fi 312 | 313 | if [ $data_size_bytes -lt $min_data_size_bytes ]; then 314 | # Increasing DATA_SIZE to meet minimum data size requirements. 315 | Info "DATA_SIZE=${DATA_SIZE} is smaller than MIN_DATA_SIZE=${MIN_DATA_SIZE}. Will create data volume of size specified by MIN_DATA_SIZE." 316 | DATA_SIZE=$MIN_DATA_SIZE 317 | fi 318 | } 319 | 320 | create_data_lv() { 321 | if [ ! -n "$DATA_SIZE" ]; then 322 | Fatal "Data volume creation failed. No DATA_SIZE specified" 323 | fi 324 | 325 | if ! check_data_size_syntax $DATA_SIZE; then 326 | Fatal "DATA_SIZE value $DATA_SIZE is invalid." 327 | fi 328 | 329 | check_min_data_size_condition 330 | 331 | # TODO: Error handling when DATA_SIZE > available space. 332 | if [[ $DATA_SIZE == *%* ]]; then 333 | lvcreate -y -l $DATA_SIZE -n $DATA_LV_NAME $VG 334 | else 335 | lvcreate -y -L $DATA_SIZE -n $DATA_LV_NAME $VG 336 | fi 337 | } 338 | 339 | create_lvm_thin_pool () { 340 | if [ -z "$DEVS" ] && [ -z "$VG_EXISTS" ]; then 341 | Fatal "Specified volume group $VG does not exist, and no devices were specified" 342 | fi 343 | 344 | # First create metadata lv. Down the line let lvm2 create it automatically. 345 | create_metadata_lv 346 | create_data_lv 347 | 348 | if [ -n "$CHUNK_SIZE" ]; then 349 | CHUNK_SIZE_ARG="-c $CHUNK_SIZE" 350 | fi 351 | lvconvert -y --zero n $CHUNK_SIZE_ARG --thinpool $VG/$DATA_LV_NAME --poolmetadata $VG/$META_LV_NAME 352 | } 353 | 354 | get_configured_thin_pool() { 355 | local options tpool opt 356 | 357 | options=$CURRENT_STORAGE_OPTIONS 358 | [ -z "$options" ] && return 0 359 | 360 | # This assumes that thin pool is specified as dm.thinpooldev=foo. There 361 | # are no spaces in between. 362 | for opt in $options; do 363 | if [[ $opt =~ dm.thinpooldev* ]];then 364 | tpool=${opt#*=} 365 | echo "$tpool" 366 | return 0 367 | fi 368 | done 369 | } 370 | 371 | check_docker_storage_metadata() { 372 | local docker_devmapper_meta_dir="$DOCKER_METADATA_DIR/devicemapper/metadata/" 373 | 374 | [ ! -d "$docker_devmapper_meta_dir" ] && return 0 375 | 376 | # Docker seems to be already using devicemapper storage driver. Error out. 377 | Error "Docker has been previously configured for use with devicemapper graph driver. Not creating a new thin pool as existing docker metadata will fail to work with it. Manual cleanup is required before this will succeed." 378 | Info "Docker state can be reset by stopping docker and by removing ${DOCKER_METADATA_DIR} directory. This will destroy existing docker images and containers and all the docker metadata." 379 | exit 1 380 | } 381 | 382 | setup_lvm_thin_pool () { 383 | local tpool 384 | 385 | # Check if a thin pool is already configured in /etc/sysconfig/docker-storage. 386 | # If yes, wait for that thin pool to come up. 387 | tpool=`get_configured_thin_pool` 388 | 389 | if [ -n "$tpool" ]; then 390 | Info "Found an already configured thin pool $tpool in ${DOCKER_STORAGE}" 391 | if ! wait_for_dev "$tpool"; then 392 | Fatal "Already configured thin pool $tpool is not available. If thin pool exists and is taking longer to activate, set DEVICE_WAIT_TIMEOUT to a higher value and retry. If thin pool does not exist any more, remove ${DOCKER_STORAGE} and retry" 393 | fi 394 | fi 395 | 396 | # At this point of time, a volume group should exist for lvm thin pool 397 | # operations to succeed. Make that check and fail if that's not the case. 398 | if ! vg_exists "$VG";then 399 | Fatal "No valid volume group found. Exiting." 400 | else 401 | VG_EXISTS=1 402 | fi 403 | 404 | if ! lvm_pool_exists; then 405 | check_docker_storage_metadata 406 | create_lvm_thin_pool 407 | write_storage_config_file 408 | else 409 | # At this point /etc/sysconfig/docker-storage file should exist. If user 410 | # deleted this file accidently without deleting thin pool, recreate it. 411 | if [ ! -f "$DOCKER_STORAGE" ];then 412 | Info "$DOCKER_STORAGE file is missing. Recreating it." 413 | write_storage_config_file 414 | fi 415 | fi 416 | 417 | # Enable or disable automatic pool extension 418 | if [ "$AUTO_EXTEND_POOL" == "yes" ];then 419 | enable_auto_pool_extension ${VG} ${POOL_LV_NAME} 420 | else 421 | disable_auto_pool_extension ${VG} ${POOL_LV_NAME} 422 | fi 423 | } 424 | 425 | setup_overlay () { 426 | write_storage_config_file 427 | } 428 | 429 | lvm_pool_exists() { 430 | local lv_data 431 | local lvname lv lvsize 432 | 433 | lv_data=$( lvs --noheadings -o lv_name,lv_attr --separator , $VG | sed -e 's/^ *//') 434 | SAVEDIFS=$IFS 435 | for lv in $lv_data; do 436 | IFS=, 437 | read lvname lvattr <<< "$lv" 438 | # pool logical volume has "t" as first character in its attributes 439 | if [ "$lvname" == "$POOL_LV_NAME" ] && [[ $lvattr == t* ]]; then 440 | IFS=$SAVEDIFS 441 | return 0 442 | fi 443 | done 444 | IFS=$SAVEDIFS 445 | 446 | return 1 447 | } 448 | 449 | # If a /etc/sysconfig/docker-storage file is present and if it contains 450 | # dm.datadev or dm.metadatadev entries, that means we have used old mode 451 | # in the past. 452 | is_old_data_meta_mode() { 453 | if [ ! -f "$DOCKER_STORAGE" ];then 454 | return 1 455 | fi 456 | 457 | if ! grep -e "^DOCKER_STORAGE_OPTIONS=.*dm\.datadev" -e "^DOCKER_STORAGE_OPTIONS=.*dm\.metadatadev" $DOCKER_STORAGE > /dev/null 2>&1;then 458 | return 1 459 | fi 460 | 461 | return 0 462 | } 463 | 464 | grow_root_pvs() { 465 | # If root is not in a volume group, then there are no root pvs and nothing 466 | # to do. 467 | [ -z "$ROOT_PVS" ] && return 0 468 | 469 | # Grow root pvs only if user asked for it through config file. 470 | [ "$GROWPART" != "true" ] && return 471 | 472 | if [ ! -x "/usr/bin/growpart" ];then 473 | Error "GROWPART=true is specified and /usr/bin/growpart executable is not available. Install /usr/bin/growpart and try again." 474 | return 1 475 | fi 476 | 477 | # Note that growpart is only variable here because we may someday support 478 | # using separate partitions on the same disk. Today we fail early in that 479 | # case. Also note that the way we are doing this, it should support LVM 480 | # RAID for the root device. In the mirrored or striped case, we are growing 481 | # partitions on all disks, so as long as they match, growing the LV should 482 | # also work. 483 | for pv in $ROOT_PVS; do 484 | # Split device & partition. Ick. 485 | growpart $( echo $pv | sed -r 's/([^0-9]*)([0-9]+)/\1 \2/' ) || true 486 | pvresize $pv 487 | done 488 | } 489 | 490 | grow_root_lv_fs() { 491 | if [ -n "$ROOT_SIZE" ]; then 492 | # TODO: Error checking if specified size is <= current size 493 | lvextend -r -L $ROOT_SIZE $ROOT_DEV || true 494 | fi 495 | } 496 | 497 | # Determines if a device is already added in a volume group as pv. Returns 498 | # 0 on success. 499 | is_dev_part_of_vg() { 500 | local dev=$1 501 | local vg=$2 502 | 503 | if ! pv_name=$(pvs --noheadings -o pv_name -S pv_name=$dev,vg_name=$vg); then 504 | Fatal "Error running command pvs. Exiting." 505 | fi 506 | 507 | [ -z "$pv_name" ] && return 1 508 | pv_name=`echo $pv_name | tr -d '[ ]'` 509 | [ "$pv_name" == "$dev" ] && return 0 510 | return 1 511 | } 512 | 513 | # Check if passed in vg exists. Returns 0 if volume group exists. 514 | vg_exists() { 515 | local check_vg=$1 516 | 517 | for vg_name in $(vgs --noheadings -o vg_name); do 518 | if [ "$vg_name" == "$VG" ]; then 519 | return 0 520 | fi 521 | done 522 | return 1 523 | } 524 | 525 | is_block_dev_partition() { 526 | local bdev=$1 527 | 528 | [ ! -b "$bdev" ] && bdev="/dev/${bdev}" 529 | if ! disktype=$(lsblk -n --nodeps --output type ${bdev}); then 530 | Fatal "Failed to run lsblk on device $bdev" 531 | fi 532 | 533 | if [ "$disktype" == "part" ];then 534 | return 0 535 | fi 536 | 537 | return 1 538 | } 539 | 540 | check_wipe_block_dev_sig() { 541 | local bdev=$1 542 | local sig 543 | 544 | [ ! -b "$bdev" ] && bdev="/dev/${bdev}" 545 | 546 | if ! sig=$(wipefs -p $bdev); then 547 | Fatal "Failed to check signatures on device $bdev" 548 | fi 549 | 550 | [ "$sig" == "" ] && return 0 551 | 552 | if [ "$WIPE_SIGNATURES" == "true" ];then 553 | Info "Wipe Signatures is set to true. Any signatures on $bdev will be wiped." 554 | if ! wipefs -a $bdev; then 555 | Fatal "Failed to wipe signatures on device $bdev" 556 | fi 557 | return 0 558 | fi 559 | 560 | while IFS=, read offset uuid label type; do 561 | [ "$offset" == "# offset" ] && continue 562 | Fatal "Found $type signature on device ${bdev} at offset ${offset}. Wipe signatures using wipefs or use WIPE_SIGNATURES=true and retry." 563 | done <<< "$sig" 564 | } 565 | 566 | # Make sure passed in devices are valid block devies. Also make sure they 567 | # are not partitions. 568 | check_block_devs() { 569 | local devs=$1 570 | 571 | for dev in ${devs}; do 572 | # Looks like we allowed just device name (sda) as valid input. In 573 | # such cases /dev/$dev should be a valid block device. 574 | if [ ! -b "$dev" ] && [ ! -b "/dev/$dev" ];then 575 | Fatal "$dev is not a valid block device." 576 | fi 577 | 578 | if is_block_dev_partition ${dev}; then 579 | Fatal "Partition specification unsupported at this time." 580 | fi 581 | done 582 | } 583 | 584 | # Scans all the disks listed in DEVS= and returns the disks which are not 585 | # already part of volume group and are new and require further processing. 586 | scan_disks() { 587 | local new_disks="" 588 | 589 | for dev in $DEVS; do 590 | local basename=$(basename $dev) 591 | local p 592 | 593 | if is_dev_part_of_vg ${dev}1 $VG; then 594 | Info "Device ${dev} is already partitioned and is part of volume group $VG" 595 | continue 596 | fi 597 | 598 | # If signatures are being overridden, then simply return the disk as new 599 | # disk. Even if it is partitioned, partition signatures will be wiped. 600 | if [ "$WIPE_SIGNATURES" == "true" ];then 601 | new_disks="$new_disks $dev" 602 | continue 603 | fi 604 | 605 | # If device does not have partitions, it is a new disk requiring processing. 606 | p=$(awk "\$4 ~ /${basename}./ {print \$4}" /proc/partitions) 607 | if [[ -z "$p" ]]; then 608 | new_disks="$dev $new_disks" 609 | continue 610 | fi 611 | 612 | Fatal "Device $dev is already partitioned and cannot be added to volume group $VG" 613 | done 614 | 615 | echo $new_disks 616 | } 617 | 618 | create_partition() { 619 | local dev="$1" size 620 | 621 | # Use a single partition of a whole device 622 | # TODO: 623 | # * Consider gpt, or unpartitioned volumes 624 | # * Error handling when partition(s) already exist 625 | # * Deal with loop/nbd device names. See growpart code 626 | size=$(( $( awk "\$4 ~ /"$( basename $dev )"/ { print \$3 }" /proc/partitions ) * 2 - 2048 )) 627 | cat < $tmpFile 697 | activation { 698 | thin_pool_autoextend_threshold=${POOL_AUTOEXTEND_THRESHOLD} 699 | thin_pool_autoextend_percent=${POOL_AUTOEXTEND_PERCENT} 700 | 701 | } 702 | EOF 703 | mv $tmpFile ${profileDir}/${profileFile} 704 | lvchange --metadataprofile ${profileName} ${volume_group}/${pool_volume} 705 | } 706 | 707 | disable_auto_pool_extension() { 708 | local volume_group=$1 709 | local pool_volume=$2 710 | local profileName="${volume_group}--${pool_volume}-extend" 711 | local profileFile="${profileName}.profile" 712 | local profileDir 713 | 714 | profileDir=$(lvm dumpconfig | grep "profile_dir" | cut -d "=" -f2 | sed 's/"//g') 715 | [ -n "$profileDir" ] || return 1 716 | 717 | lvchange --detachprofile ${volume_group}/${pool_volume} 718 | rm -f ${profileDir}/${profileFile} 719 | } 720 | 721 | 722 | # Gets the current DOCKER_STORAGE_OPTIONS= string. 723 | get_docker_storage_options() { 724 | local options 725 | 726 | if [ ! -f "$DOCKER_STORAGE" ];then 727 | return 0 728 | fi 729 | 730 | if options=$(grep -e "^DOCKER_STORAGE_OPTIONS=" $DOCKER_STORAGE | sed 's/DOCKER_STORAGE_OPTIONS=//' | sed 's/^ *//' | sed 's/^"//' | sed 's/"$//');then 731 | echo $options 732 | return 0 733 | fi 734 | 735 | return 1 736 | } 737 | 738 | is_valid_storage_driver() { 739 | local driver=$1 d 740 | 741 | for d in $STORAGE_DRIVERS;do 742 | [ "$driver" == "$d" ] && return 0 743 | done 744 | 745 | return 1 746 | } 747 | 748 | # Gets the existing storage driver configured in /etc/sysconfig/docker-storage 749 | get_existing_storage_driver() { 750 | local options driver 751 | 752 | options=$CURRENT_STORAGE_OPTIONS 753 | 754 | [ -z "$options" ] && return 0 755 | 756 | # Check if -storage-driver is there. 757 | if ! driver=$(echo $options | sed -n 's/.*\(--storage-driver [ ]*[a-z]*\).*/\1/p' | sed 's/--storage-driver *//');then 758 | return 1 759 | fi 760 | 761 | # If pattern does not match then driver == options. 762 | if [ -n "$driver" ] && [ ! "$driver" == "$options" ];then 763 | echo $driver 764 | return 0 765 | fi 766 | 767 | # Check if -s is there. 768 | if ! driver=$(echo $options | sed -n 's/.*\(-s [ ]*[a-z]*\).*/\1/p' | sed 's/-s *//');then 769 | return 1 770 | fi 771 | 772 | # If pattern does not match then driver == options. 773 | if [ -n "$driver" ] && [ ! "$driver" == "$options" ];then 774 | echo $driver 775 | return 0 776 | fi 777 | 778 | # We shipped some versions where we did not specify -s devicemapper. 779 | # If dm.thinpooldev= is present driver is devicemapper. 780 | if echo $options | grep -q -e "--storage-opt dm.thinpooldev=";then 781 | echo "devicemapper" 782 | return 0 783 | fi 784 | 785 | #Failed to determine existing storage driver. 786 | return 1 787 | } 788 | 789 | setup_storage() { 790 | local current_driver 791 | 792 | if [ "$STORAGE_DRIVER" == "" ];then 793 | Info "No storage driver specified. Specify one using STORAGE_DRIVER option." 794 | exit 0 795 | fi 796 | 797 | if ! is_valid_storage_driver $STORAGE_DRIVER;then 798 | Fatal "Invalid storage driver: ${STORAGE_DRIVER}." 799 | fi 800 | 801 | # Query and save current storage options 802 | if ! CURRENT_STORAGE_OPTIONS=$(get_docker_storage_options); then 803 | return 1 804 | fi 805 | 806 | if ! current_driver=$(get_existing_storage_driver);then 807 | Fatal "Failed to determine existing storage driver." 808 | fi 809 | 810 | # If storage is configured and new driver should match old one. 811 | if [ -n "$current_driver" ] && [ "$current_driver" != "$STORAGE_DRIVER" ];then 812 | Fatal "Storage is already configured with ${current_driver} driver. Can't configure it with ${STORAGE_DRIVER} driver. To override, remove $DOCKER_STORAGE and retry." 813 | fi 814 | 815 | # Set up lvm thin pool LV 816 | if [ "$STORAGE_DRIVER" == "devicemapper" ]; then 817 | setup_lvm_thin_pool 818 | elif [ "$STORAGE_DRIVER" == "overlay" ];then 819 | setup_overlay 820 | fi 821 | } 822 | 823 | usage() { 824 | cat >&2 <<-FOE 825 | Usage: $1 [OPTIONS] 826 | 827 | Grows the root filesystem and sets up storage for docker 828 | 829 | Options: 830 | -h, --help Print help message 831 | FOE 832 | } 833 | 834 | # Main Script 835 | 836 | if [ $# -gt 0 ]; then 837 | usage $(basename $0) 838 | exit 0 839 | fi 840 | 841 | # Source library. If there is a library present in same dir as d-s-s, source 842 | # that otherwise fall back to standard library. This is useful when modifyin 843 | # libdss.sh in git tree and testing d-s-s. 844 | SRCDIR=`dirname $0` 845 | 846 | if [ -e $SRCDIR/libdss.sh ]; then 847 | source $SRCDIR/libdss.sh 848 | elif [ -e /usr/lib/docker-storage-setup/libdss.sh ]; then 849 | source /usr/lib/docker-storage-setup/libdss.sh 850 | fi 851 | 852 | if [ -e /usr/lib/docker-storage-setup/docker-storage-setup ]; then 853 | source /usr/lib/docker-storage-setup/docker-storage-setup 854 | fi 855 | 856 | # If user has overridden any settings in /etc/sysconfig/docker-storage-setup 857 | # take that into account. 858 | if [ -e /etc/sysconfig/docker-storage-setup ]; then 859 | source /etc/sysconfig/docker-storage-setup 860 | fi 861 | 862 | # Read mounts 863 | ROOT_DEV=$( awk '$2 ~ /^\/$/ && $1 !~ /rootfs/ { print $1 }' /proc/mounts ) 864 | if ! ROOT_VG=$(lvs --noheadings -o vg_name $ROOT_DEV 2>/dev/null);then 865 | Info "Volume group backing root filesystem could not be determined" 866 | ROOT_VG= 867 | else 868 | ROOT_VG=$(echo $ROOT_VG | sed -e 's/^ *//' -e 's/ *$//') 869 | fi 870 | 871 | ROOT_PVS= 872 | if [ -n "$ROOT_VG" ];then 873 | ROOT_PVS=$( pvs --noheadings -o pv_name,vg_name | awk "\$2 ~ /^$ROOT_VG\$/ { print \$1 }" ) 874 | fi 875 | 876 | VG_EXISTS= 877 | if [ -z "$VG" ]; then 878 | if [ -n "$ROOT_VG" ]; then 879 | VG=$ROOT_VG 880 | VG_EXISTS=1 881 | fi 882 | else 883 | if vg_exists "$VG";then 884 | VG_EXISTS=1 885 | fi 886 | fi 887 | 888 | # If there is no volume group specified or no root volume group, there is 889 | # nothing to do in terms of dealing with disks. 890 | if [[ -n "$DEVS" && -n "$VG" ]]; then 891 | check_block_devs ${DEVS} 892 | 893 | # If all the disks have already been correctly partitioned, there is 894 | # nothing more to do 895 | P=$(scan_disks) 896 | if [[ -n "$P" ]]; then 897 | for dev in $P; do 898 | check_wipe_block_dev_sig $dev 899 | done 900 | create_disk_partitions "$P" 901 | create_extend_volume_group 902 | fi 903 | fi 904 | 905 | grow_root_pvs 906 | 907 | # NB: We are growing root here first, because when root and docker share a 908 | # disk, we'll default to giving some portion of remaining space to docker. 909 | # Do this operation only if root is on a logical volume. 910 | [ -n "$ROOT_VG" ] && grow_root_lv_fs 911 | 912 | if is_old_data_meta_mode; then 913 | Fatal "Old mode of passing data and metadata logical volumes to docker is not supported. Exiting." 914 | fi 915 | 916 | setup_storage 917 | -------------------------------------------------------------------------------- /playbooks/files/get_token.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | oc config view --flatten -o template --template '{{with index .users 0}}{{.user.token}}{{end}}' 5 | -------------------------------------------------------------------------------- /playbooks/files/image_stream_version_check.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | # determine the image stream version tag 6 | oc get template $1 -n $2 -o go-template='{{range .parameters}}{{if eq .name "IMAGE_VERSION"}}{{println .value}}{{end}}{{end}}' 7 | -------------------------------------------------------------------------------- /playbooks/files/libdss.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Library for common functions 3 | 4 | # echo info messages on stdout 5 | Info() { 6 | # stdout is used to pass back output from bash functions 7 | # so we use stderr 8 | echo "INFO: ${1}" >&2 9 | } 10 | 11 | # echo error messages on stderr 12 | Error() { 13 | echo "ERROR: ${1}" >&2 14 | } 15 | 16 | # echo error on stderr and exit with error code 1 17 | Fatal() { 18 | Error "${1}" 19 | exit 1 20 | } 21 | 22 | # checks the size specifications acceptable to -L 23 | check_numeric_size_syntax() { 24 | data_size=$1 25 | 26 | # if it is all numeric, it is valid as by default it will be MB. 27 | [[ $data_size =~ ^[[:digit:]]+$ ]] && return 0 28 | 29 | # Numeric digits followed by b or B. (byte specification) 30 | [[ $data_size =~ ^[[:digit:]]+[bB]$ ]] && return 0 31 | 32 | # Numeric digits followed by valid suffix. Will support both G and GB. 33 | [[ $data_size =~ ^[[:digit:]]+[sSkKmMgGtTpPeE][bB]?$ ]] && return 0 34 | 35 | # Numeric digits followed by valid suffix and ib. Ex. Gib or GiB. 36 | [[ $data_size =~ ^[[:digit:]]+[sSkKmMgGtTpPeE]i[bB]$ ]] && return 0 37 | 38 | return 1 39 | } 40 | 41 | check_data_size_syntax() { 42 | local data_size=$1 43 | 44 | # For -l style options, we only support %FREE and %VG option. %PVS and 45 | # %ORIGIN does not seem to make much sense for this use case. 46 | if [[ $data_size == *%FREE ]] || [[ $data_size == *%VG ]];then 47 | return 0 48 | fi 49 | 50 | # -L compatible syntax 51 | check_numeric_size_syntax $data_size && return 0 52 | return 1 53 | } 54 | -------------------------------------------------------------------------------- /playbooks/files/ose3.repo: -------------------------------------------------------------------------------- 1 | [ose3] 2 | name=OpenShift Enterprise 3 3 | baseurl=http://repo.openshift3roadshow.com/rhel-7-server-ose-3.0-rpms 4 | enabled=1 5 | gpgcheck=0 6 | -------------------------------------------------------------------------------- /playbooks/files/router_scale.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # exit on any error 4 | set -e 5 | 6 | # determine router scale 7 | ROUTER_SCALE=$(oc get dc/router -o go-template='{{.spec.replicas}}') 8 | 9 | # find number of running routers 10 | NUM_ROUTERS=$(oc get pod -o go-template='{{range .items}}{{ .metadata.name }} {{.status.phase}}{{"\n"}}{{ end }}' | grep router | grep Running | wc -l) 11 | 12 | [ $NUM_ROUTERS -eq $ROUTER_SCALE ] 13 | -------------------------------------------------------------------------------- /playbooks/files/smoke_project.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SMOKEUSER=$1 4 | 5 | # determine if app has been created 6 | oc get bc/smoke -n $SMOKEUSER-smoke 7 | 8 | # create if not 9 | if [ $? -ne 0 ] 10 | then 11 | oc new-app openshift/php~https://github.com/gshipley/smoke.git -n $SMOKEUSER-smoke 12 | fi 13 | 14 | # determine latest build for the project 15 | BUILDNUM=$(oc get bc/smoke -n $SMOKEUSER-smoke -o template --template '{{.status.lastVersion}}') 16 | 17 | # check status of build 18 | BUILDSTATUS=$(oc get build smoke-$BUILDNUM -n $SMOKEUSER-smoke -o template --template '{{ .status.phase }}') 19 | 20 | # if failed or error, exit immediately 21 | if [[ ($BUILDSTATUS == "Failed" || $BUILDSTATUS == "Error") ]] 22 | then 23 | exit 255 24 | fi 25 | 26 | # if complete, exit now 27 | if [[ $BUILDSTATUS == "Complete" ]] 28 | then 29 | exit 0 30 | fi 31 | 32 | # if not complete, wait up to 5 minutes for build to complete 33 | # 5 minutes = 300 seconds 34 | # 300 seconds = 30 ten second loops 35 | 36 | LOOP=0 37 | while [ $LOOP -lt 30 ] 38 | do 39 | BUILDSTATUS=$(oc get build smoke-$BUILDNUM -n $SMOKEUSER-smoke -o template --template '{{ .status.phase }}') 40 | if [[ $BUILDSTATUS == "Complete" ]] 41 | then 42 | exit 0 43 | fi 44 | LOOP=$((LOOP+1)) 45 | sleep 10 46 | done 47 | 48 | exit 255 49 | -------------------------------------------------------------------------------- /playbooks/filter_plugins/oo_filters.py: -------------------------------------------------------------------------------- 1 | ../../../openshift-ansible/filter_plugins/oo_filters.py -------------------------------------------------------------------------------- /playbooks/filter_plugins/training_filters.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | # -*- coding: utf-8 -*- 3 | # vim: expandtab:tabstop=4:shiftwidth=4 4 | # pylint: disable=missing-docstring, no-self-use, too-few-public-methods 5 | 6 | from ansible import errors 7 | import copy 8 | 9 | def oo_subnets_from_zones(zones, network_prefix, cluster_id): 10 | ''' This filter plugin will create a ec2_vpc subnets list from a list of 11 | zones 12 | ''' 13 | if not issubclass(type(zones), list): 14 | raise errors.AnsibleFilterError("|failed expects to filter on a list") 15 | result = [] 16 | for i, zone in enumerate(zones): 17 | z_info = dict( 18 | cidr = "{0}{1}.0/24".format(network_prefix, i), 19 | az = zone, 20 | resource_tags = dict( 21 | env = cluster_id, 22 | Name = "{0}-subnet-{1}".format(cluster_id, i) 23 | ) 24 | ) 25 | result.append(z_info) 26 | 27 | return result 28 | 29 | 30 | def oo_dict_merge(data, dict_to_merge): 31 | ''' This filter plugin will merge two dicts. 32 | ''' 33 | if not issubclass(type(data), dict): 34 | raise errors.AnsibleFilterError("|failed expects to filter on a dict") 35 | if not issubclass(type(dict_to_merge), dict): 36 | raise errors.AnsibleFilterError("|failed expects dict_to_merge to be a dict") 37 | 38 | new_dict = copy.deepcopy(data) 39 | 40 | for key, value in dict_to_merge.iteritems(): 41 | new_dict[key] = copy.deepcopy(value) 42 | 43 | return new_dict 44 | 45 | 46 | class FilterModule(object): 47 | def filters(self): 48 | return { 49 | "oo_dict_merge": oo_dict_merge, 50 | "oo_subnets_from_zones": oo_subnets_from_zones 51 | } 52 | -------------------------------------------------------------------------------- /playbooks/group_setup.yml: -------------------------------------------------------------------------------- 1 | - name: 'Setup groups' 2 | hosts: localhost 3 | connection: local 4 | become: no 5 | tasks: 6 | - name: wait for ssh 7 | wait_for: "port=22 host={{ item }}" 8 | with_items: groups['tag_openshift-demo_' ~ cluster_id] 9 | 10 | - name: Add masters to requisite groups 11 | add_host: 12 | name: "{{ item }}" 13 | groups: masters, etcd, nodes, cluster_hosts, OSEv3 14 | openshift_node_labels: 15 | region: "{{ cluster_id }}" 16 | env: master 17 | zone: "{{ hostvars[item].ec2_placement }}" 18 | with_items: groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_master'] 19 | 20 | - name: Create group for first master 21 | add_host: 22 | name: "{{ item }}" 23 | groups: project_master 24 | with_items: groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_master'].0 25 | 26 | - name: Add nodes to requisite groups 27 | add_host: 28 | name: "{{ item }}" 29 | groups: nodes, cluster_hosts, OSEv3 30 | openshift_node_labels: 31 | region: "{{ cluster_id }}" 32 | env: "{{ 'infra' if hostvars[item]['ec2_tag_openshift-demo-' ~ cluster_id ~ '-node-type'] == 'infrastructure' else 'demo' }}" 33 | zone: "{{ hostvars[item].ec2_placement }}" 34 | with_items: groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_node'] 35 | 36 | 37 | -------------------------------------------------------------------------------- /playbooks/host_repos.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | - name: Repository configuration 3 | hosts: cluster_hosts 4 | gather_facts: no 5 | vars: 6 | aos_repo: 'https://mirror.ops.rhcloud.com/libra/rhui-rhel-server-7-ose/' 7 | tasks: 8 | - name: Enable rhui extras channel 9 | command: yum-config-manager --enable rhui-REGION-rhel-server-extras 10 | when: use_certificate_repos 11 | 12 | - name: Create AOS yum repository configuration 13 | template: 14 | src: templates/aos.repo.j2 15 | dest: /etc/yum.repos.d/aos.repo 16 | mode: 644 17 | when: use_certificate_repos 18 | 19 | - name: Copy Atomic OpenShift yum repository certificate 20 | copy: 21 | src: '{{ certificate_file }}' 22 | dest: /var/lib/yum/client-cert.pem 23 | when: use_certificate_repos 24 | 25 | - name: Copy Atomic OpenShift yum repository key 26 | copy: 27 | src: '{{ certificate_key }}' 28 | dest: /var/lib/yum/client-key.pem 29 | when: use_certificate_repos 30 | 31 | -------------------------------------------------------------------------------- /playbooks/library/redhat_subscription.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python 2 | 3 | DOCUMENTATION = ''' 4 | --- 5 | module: redhat_subscription 6 | short_description: Manage Red Hat Network registration and subscriptions using the C(subscription-manager) command 7 | description: 8 | - Manage registration and subscription to the Red Hat Network entitlement platform. 9 | version_added: "1.2" 10 | author: "Barnaby Court (@barnabycourt)" 11 | notes: 12 | - In order to register a system, subscription-manager requires either a username and password, or an activationkey. 13 | requirements: 14 | - subscription-manager 15 | options: 16 | state: 17 | description: 18 | - whether to register and subscribe (C(present)), or unregister (C(absent)) a system 19 | required: false 20 | choices: [ "present", "absent" ] 21 | default: "present" 22 | username: 23 | description: 24 | - Red Hat Network username 25 | required: False 26 | default: null 27 | password: 28 | description: 29 | - Red Hat Network password 30 | required: False 31 | default: null 32 | server_hostname: 33 | description: 34 | - Specify an alternative Red Hat Network server 35 | required: False 36 | default: Current value from C(/etc/rhsm/rhsm.conf) is the default 37 | server_insecure: 38 | description: 39 | - Allow traffic over insecure http 40 | required: False 41 | default: Current value from C(/etc/rhsm/rhsm.conf) is the default 42 | rhsm_baseurl: 43 | description: 44 | - Specify CDN baseurl 45 | required: False 46 | default: Current value from C(/etc/rhsm/rhsm.conf) is the default 47 | autosubscribe: 48 | description: 49 | - Upon successful registration, auto-consume available subscriptions 50 | required: False 51 | default: False 52 | activationkey: 53 | description: 54 | - supply an activation key for use with registration 55 | required: False 56 | default: null 57 | org_id: 58 | description: 59 | - Organisation ID to use in conjunction with activationkey 60 | required: False 61 | default: null 62 | version_added: "2.0" 63 | pool: 64 | description: 65 | - Specify a subscription pool name to consume. Regular expressions accepted. 66 | required: False 67 | default: '^$' 68 | ''' 69 | 70 | EXAMPLES = ''' 71 | # Register as user (joe_user) with password (somepass) and auto-subscribe to available content. 72 | - redhat_subscription: state=present username=joe_user password=somepass autosubscribe=true 73 | 74 | # Register with activationkey (1-222333444) and consume subscriptions matching 75 | # the names (Red hat Enterprise Server) and (Red Hat Virtualization) 76 | - redhat_subscription: state=present 77 | activationkey=1-222333444 78 | pool='^(Red Hat Enterprise Server|Red Hat Virtualization)$' 79 | 80 | # Update the consumed subscriptions from the previous example (remove the Red 81 | # Hat Virtualization subscription) 82 | - redhat_subscription: state=present 83 | activationkey=1-222333444 84 | pool='^Red Hat Enterprise Server$' 85 | ''' 86 | 87 | import os 88 | import re 89 | import types 90 | import ConfigParser 91 | import shlex 92 | 93 | 94 | class RegistrationBase(object): 95 | def __init__(self, module, username=None, password=None): 96 | self.module = module 97 | self.username = username 98 | self.password = password 99 | 100 | def configure(self): 101 | raise NotImplementedError("Must be implemented by a sub-class") 102 | 103 | def enable(self): 104 | # Remove any existing redhat.repo 105 | redhat_repo = '/etc/yum.repos.d/redhat.repo' 106 | if os.path.isfile(redhat_repo): 107 | os.unlink(redhat_repo) 108 | 109 | def register(self): 110 | raise NotImplementedError("Must be implemented by a sub-class") 111 | 112 | def unregister(self): 113 | raise NotImplementedError("Must be implemented by a sub-class") 114 | 115 | def unsubscribe(self): 116 | raise NotImplementedError("Must be implemented by a sub-class") 117 | 118 | def update_plugin_conf(self, plugin, enabled=True): 119 | plugin_conf = '/etc/yum/pluginconf.d/%s.conf' % plugin 120 | if os.path.isfile(plugin_conf): 121 | cfg = ConfigParser.ConfigParser() 122 | cfg.read([plugin_conf]) 123 | if enabled: 124 | cfg.set('main', 'enabled', '1') 125 | else: 126 | cfg.set('main', 'enabled', '0') 127 | fd = open(plugin_conf, 'rwa+') 128 | cfg.write(fd) 129 | fd.close() 130 | 131 | def subscribe(self, **kwargs): 132 | raise NotImplementedError("Must be implemented by a sub-class") 133 | 134 | 135 | class Rhsm(RegistrationBase): 136 | def __init__(self, module, username=None, password=None): 137 | RegistrationBase.__init__(self, module, username, password) 138 | self.config = self._read_config() 139 | self.module = module 140 | 141 | def _read_config(self, rhsm_conf='/etc/rhsm/rhsm.conf'): 142 | ''' 143 | Load RHSM configuration from /etc/rhsm/rhsm.conf. 144 | Returns: 145 | * ConfigParser object 146 | ''' 147 | 148 | # Read RHSM defaults ... 149 | cp = ConfigParser.ConfigParser() 150 | cp.read(rhsm_conf) 151 | 152 | # Add support for specifying a default value w/o having to standup some configuration 153 | # Yeah, I know this should be subclassed ... but, oh well 154 | def get_option_default(self, key, default=''): 155 | sect, opt = key.split('.', 1) 156 | if self.has_section(sect) and self.has_option(sect, opt): 157 | return self.get(sect, opt) 158 | else: 159 | return default 160 | 161 | cp.get_option = types.MethodType(get_option_default, cp, ConfigParser.ConfigParser) 162 | 163 | return cp 164 | 165 | def enable(self): 166 | ''' 167 | Enable the system to receive updates from subscription-manager. 168 | This involves updating affected yum plugins and removing any 169 | conflicting yum repositories. 170 | ''' 171 | RegistrationBase.enable(self) 172 | self.update_plugin_conf('rhnplugin', False) 173 | self.update_plugin_conf('subscription-manager', True) 174 | 175 | def configure(self, **kwargs): 176 | ''' 177 | Configure the system as directed for registration with RHN 178 | Raises: 179 | * Exception - if error occurs while running command 180 | ''' 181 | args = ['subscription-manager', 'config'] 182 | 183 | # Pass supplied **kwargs as parameters to subscription-manager. Ignore 184 | # non-configuration parameters and replace '_' with '.'. For example, 185 | # 'server_hostname' becomes '--system.hostname'. 186 | for k,v in kwargs.items(): 187 | if re.search(r'^(system|rhsm)_', k): 188 | args.append('--%s=%s' % (k.replace('_','.'), v)) 189 | 190 | self.module.run_command(args, check_rc=True) 191 | 192 | @property 193 | def is_registered(self): 194 | ''' 195 | Determine whether the current system 196 | Returns: 197 | * Boolean - whether the current system is currently registered to 198 | RHN. 199 | ''' 200 | # Quick version... 201 | if False: 202 | return os.path.isfile('/etc/pki/consumer/cert.pem') and \ 203 | os.path.isfile('/etc/pki/consumer/key.pem') 204 | 205 | args = ['subscription-manager', 'identity'] 206 | rc, stdout, stderr = self.module.run_command(args, check_rc=False) 207 | if rc == 0: 208 | return True 209 | else: 210 | return False 211 | 212 | def register(self, username, password, autosubscribe, activationkey, org_id): 213 | ''' 214 | Register the current system to the provided RHN server 215 | Raises: 216 | * Exception - if error occurs while running command 217 | ''' 218 | args = ['subscription-manager', 'register'] 219 | 220 | # Generate command arguments 221 | if activationkey: 222 | args.extend(['--activationkey', activationkey]) 223 | if org_id: 224 | args.extend(['--org', org_id]) 225 | else: 226 | if autosubscribe: 227 | args.append('--autosubscribe') 228 | if username: 229 | args.extend(['--username', username]) 230 | if password: 231 | args.extend(['--password', password]) 232 | 233 | rc, stderr, stdout = self.module.run_command(args, check_rc=True) 234 | 235 | def unsubscribe(self, serials=None): 236 | ''' 237 | Unsubscribe a system from subscribed channels 238 | Args: 239 | serials(list or None): list of serials to unsubscribe. If 240 | serials is none or an empty list, then 241 | all subscribed channels will be removed. 242 | Raises: 243 | * Exception - if error occurs while running command 244 | ''' 245 | items = [] 246 | if serials is not None and serials: 247 | items = ["--serial=%s" % s for s in serials] 248 | if serials is None: 249 | items = ["--all"] 250 | 251 | if items: 252 | args = ['subscription-manager', 'unsubscribe'] + items 253 | rc, stderr, stdout = self.module.run_command(args, check_rc=True) 254 | return serials 255 | 256 | def unregister(self): 257 | ''' 258 | Unregister a currently registered system 259 | Raises: 260 | * Exception - if error occurs while running command 261 | ''' 262 | args = ['subscription-manager', 'unregister'] 263 | rc, stderr, stdout = self.module.run_command(args, check_rc=True) 264 | 265 | def subscribe_ids(self, pool_ids): 266 | items = ["--pool=%s" % p for p in pool_ids] 267 | args = ['subscription-manager', 'subscribe'] + items 268 | rc, stdout, stderr = self.module.run_command(args, check_rc=True) 269 | return pool_ids 270 | 271 | def subscribe(self, regexp='^$', pool_ids=None): 272 | ''' 273 | Subscribe current system to available pools matching the specified 274 | regular expression or pool_ids 275 | Raises: 276 | * Exception - if error occurs while running command 277 | ''' 278 | if pool_ids is not None: 279 | ids = pool_ids 280 | else: 281 | ids = [p.get_pool_id() for p in RhsmPools(self.module).filter(regexp)] 282 | 283 | return self.subscribe_ids(ids) 284 | 285 | def update_subscriptions(self, regexp, pool_ids): 286 | changed=False 287 | 288 | consumed_pools = RhsmPools(self.module, consumed=True) 289 | if pool_ids is not None: 290 | filtered_consumed = consumed_pools.filter_by_ids(pool_ids) 291 | else: 292 | filtered_consumed = consumed_pools.filter(regexp) 293 | 294 | #raise Exception("filtered_consumed: %s" % [p.get_pool_id() for p in filtered_consumed]) 295 | pool_ids_to_keep = [p.get_pool_id() for p in filtered_consumed] 296 | #raise Exception("pool_ids_to_keep: %s" % pool_ids_to_keep) 297 | serials_to_remove=[p.Serial for p in consumed_pools if p.get_pool_id() not in pool_ids_to_keep] 298 | #raise Exception("serials_to_remove: %s" % serials_to_remove) 299 | serials = self.unsubscribe(serials=serials_to_remove) 300 | #raise Exception("serials: %s" % serials) 301 | 302 | if pool_ids is not None: 303 | available_pool_ids = pool_ids 304 | else: 305 | available_pools = RhsmPools(self.module) 306 | available_pool_ids = [p.get_pool_id() for p in available_pools.filter(regexp)] 307 | 308 | pools_to_subscribe = list(set(available_pool_ids) - set(pool_ids_to_keep)) 309 | #raise Exception("pools_to_sub: %s" % pools_to_subscribe) 310 | subscribed_pool_ids = self.subscribe_ids(pools_to_subscribe) 311 | 312 | if subscribed_pool_ids or serials: 313 | changed=True 314 | return {'changed': changed, 'subscribed_pool_ids': subscribed_pool_ids, 315 | 'unsubscribed_serials': serials} 316 | 317 | 318 | 319 | class RhsmPool(object): 320 | ''' 321 | Convenience class for housing subscription information 322 | ''' 323 | 324 | def __init__(self, module, **kwargs): 325 | self.module = module 326 | for k,v in kwargs.items(): 327 | setattr(self, k, v) 328 | 329 | def __str__(self): 330 | return str(self.__getattribute__('_name')) 331 | 332 | def get_pool_id(self): 333 | return getattr(self, 'PoolId', getattr(self, 'PoolID')) 334 | 335 | 336 | class RhsmPools(object): 337 | """ 338 | This class is used for manipulating pools subscriptions with RHSM 339 | """ 340 | def __init__(self, module, consumed=False): 341 | self.module = module 342 | self.products = self._load_product_list(consumed) 343 | 344 | def __iter__(self): 345 | return self.products.__iter__() 346 | 347 | def _load_product_list(self, consumed=False): 348 | """ 349 | Loads list of all available or consumed pools for system in data structure 350 | 351 | Args: 352 | consumed(bool): if True list consumed pools, else list available pools (default False) 353 | """ 354 | args = "subscription-manager list" 355 | if consumed: 356 | args += " --consumed" 357 | else: 358 | args += " --available" 359 | rc, stdout, stderr = self.module.run_command(args, check_rc=True) 360 | 361 | products = [] 362 | is_provides = False 363 | for line in stdout.split('\n'): 364 | # Remove leading+trailing whitespace 365 | line = line.strip() 366 | # An empty line implies the end of a output group 367 | if len(line) == 0: 368 | continue 369 | # If a colon ':' is found, parse 370 | elif ':' in line: 371 | (key, value) = line.split(':',1) 372 | key = key.strip().replace(" ", "") # To unify 373 | value = value.strip() 374 | if key in ['ProductName', 'SubscriptionName']: 375 | # Remember the name for later processing 376 | products.append(RhsmPool(self.module, _name=value, key=value)) 377 | is_provides = False 378 | elif products: 379 | # Associate value with most recently recorded product 380 | if key == 'Provides': 381 | is_provides = True 382 | products[-1].__setattr__(key, [value]) 383 | else: 384 | is_provides = False 385 | products[-1].__setattr__(key, value) 386 | # FIXME - log some warning? 387 | #else: 388 | # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) 389 | else: 390 | if products and is_provides: 391 | # Associate value with the most recently recorded key 392 | products[-1].Provides.append(line) 393 | # FIXME - log some warning? 394 | #else: 395 | # warnings.warn("Unhandled subscription key/value: %s/%s" % (key,value)) 396 | return products 397 | 398 | def filter_by_ids(self, pool_ids=None): 399 | ''' 400 | Return a list of RhsmPools whose poolID is a member of the 401 | provided pool_ids 402 | ''' 403 | if pool_ids is not None: 404 | for product in self.products: 405 | if product.get_pool_id() in pool_ids: 406 | yield product 407 | 408 | 409 | def filter(self, regexp='^$'): 410 | ''' 411 | Return a list of RhsmPools whose name matches the provided regular expression 412 | ''' 413 | r = re.compile(regexp) 414 | for product in self.products: 415 | if r.search(product._name): 416 | yield product 417 | 418 | 419 | def main(): 420 | 421 | # Load RHSM configuration from file 422 | rhn = Rhsm(None) 423 | 424 | module = AnsibleModule( 425 | argument_spec = dict( 426 | state = dict(default='present', choices=['present', 'absent']), 427 | username = dict(default=None, required=False), 428 | password = dict(default=None, required=False), 429 | server_hostname = dict(default=rhn.config.get_option('server.hostname'), required=False), 430 | server_insecure = dict(default=rhn.config.get_option('server.insecure'), required=False), 431 | rhsm_baseurl = dict(default=rhn.config.get_option('rhsm.baseurl'), required=False), 432 | autosubscribe = dict(default=False, type='bool'), 433 | activationkey = dict(default=None, required=False), 434 | org_id = dict(default=None, required=False), 435 | pool = dict(default='^$', required=False, type='str'), 436 | pool_ids = dict(default=None, required=False, type='list'), 437 | ), 438 | mutually_exclusive = [ 439 | ['pool', 'pool_ids'] 440 | ] 441 | ) 442 | 443 | rhn.module = module 444 | state = module.params['state'] 445 | username = module.params['username'] 446 | password = module.params['password'] 447 | server_hostname = module.params['server_hostname'] 448 | server_insecure = module.params['server_insecure'] 449 | rhsm_baseurl = module.params['rhsm_baseurl'] 450 | autosubscribe = module.params['autosubscribe'] == True 451 | activationkey = module.params['activationkey'] 452 | org_id = module.params['org_id'] 453 | pool = module.params['pool'] 454 | pool_ids = module.params['pool_ids'] 455 | 456 | # Ensure system is registered 457 | if state == 'present': 458 | 459 | # Check for missing parameters ... 460 | if not (activationkey or username or password): 461 | module.fail_json(msg="Missing arguments, must supply an activationkey (%s) or username (%s) and password (%s)" % (activationkey, username, password)) 462 | if not activationkey and not (username and password): 463 | module.fail_json(msg="Missing arguments, If registering without an activationkey, must supply username or password") 464 | 465 | # Register system 466 | if rhn.is_registered: 467 | if pool != '^$' or pool_ids is not None: 468 | try: 469 | result = rhn.update_subscriptions(pool, pool_ids) 470 | except Exception, e: 471 | module.fail_json(msg="Failed to update subscriptions for '%s': %s" % (server_hostname, e)) 472 | else: 473 | module.exit_json(**result) 474 | else: 475 | module.exit_json(changed=False, msg="System already registered.") 476 | else: 477 | try: 478 | rhn.enable() 479 | rhn.configure(**module.params) 480 | rhn.register(username, password, autosubscribe, activationkey, org_id) 481 | subscribed_pool_ids = rhn.subscribe(pool, pool_ids) 482 | except Exception, e: 483 | module.fail_json(msg="Failed to register with '%s': %s" % (server_hostname, e)) 484 | else: 485 | module.exit_json(changed=True, 486 | msg="System successfully registered to '%s'." % server_hostname, 487 | subscribed_pool_ids=subscribed_pool_ids) 488 | # Ensure system is *not* registered 489 | if state == 'absent': 490 | if not rhn.is_registered: 491 | module.exit_json(changed=False, msg="System already unregistered.") 492 | else: 493 | try: 494 | rhn.unsubscribe() 495 | rhn.unregister() 496 | except Exception, e: 497 | module.fail_json(msg="Failed to unregister: %s" % e) 498 | else: 499 | module.exit_json(changed=True, msg="System successfully unregistered from %s." % server_hostname) 500 | 501 | 502 | # import module snippets 503 | from ansible.module_utils.basic import * 504 | main() 505 | -------------------------------------------------------------------------------- /playbooks/lookup_plugins/ec2_zones_by_region.py: -------------------------------------------------------------------------------- 1 | from ansible import utils, errors 2 | import boto.ec2 3 | 4 | class LookupModule(object): 5 | def __init__(self, basedir=None, **kwargs): 6 | self.basedir = basedir 7 | 8 | def run(self, region, inject=None, **kwargs): 9 | try: 10 | conn = boto.ec2.connect_to_region(region) 11 | zones = [z.name for z in conn.get_all_zones()] 12 | return zones 13 | except e: 14 | raise errors.AnsibleError("Could not lookup zones for region: %s\nexception: %s" % (region, e)) 15 | 16 | -------------------------------------------------------------------------------- /playbooks/openshift_setup.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - include: cloudformation_setup.yml 4 | 5 | - include: group_setup.yml 6 | 7 | # The master DNS entry is used because it's valuable to have an easy hostname to SSH into 8 | - name: Configure master DNS entry 9 | hosts: project_master 10 | gather_facts: yes 11 | become: no 12 | tasks: 13 | - name: Route53 entry for master 14 | route53: 15 | command: create 16 | zone: "{{ r53_zone }}" 17 | record: "openshift-master.{{ r53_host_zone }}" 18 | ttl: 60 19 | type: A 20 | value: "{{ hostvars[groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_master'].0]['ec2_ip_address'] }}" 21 | overwrite: yes 22 | delegate_to: localhost 23 | 24 | - name: Miscellaneous Host Configuration 25 | hosts: cluster_hosts 26 | tasks: 27 | - name: Set the OpenShift package version (Ansible 1.9.x bug) 28 | set_fact: 29 | openshift_pkg_version: "{{ package_version }}" 30 | 31 | - name: Create /etc/origin directory 32 | command: mkdir -p /etc/origin 33 | 34 | - name: Create aws.conf ini file 35 | ini_file: 36 | dest: /etc/origin/aws.conf 37 | section: Global 38 | option: Zone 39 | value: "{{ ec2_placement }}" 40 | 41 | - name: Enable rhui extras channel 42 | command: yum-config-manager --enable rhui-REGION-rhel-server-extras 43 | 44 | - name: Disable rhel-72 prerelease repo when not prerelease 45 | command: yum-config-manager --disable rhel-72 46 | when: not prerelease | bool 47 | 48 | - name: Disable rhel-72-extras prerelease repo when not prerelease 49 | command: yum-config-manager --disable rhel-72-extras 50 | when: not prerelease | bool 51 | 52 | # this file is placed during the cloudformation instantiation 53 | - name: Add delay to /etc/sysconfig/docker-storage-setup 54 | lineinfile: 55 | dest: /etc/sysconfig/docker-storage-setup 56 | insertafter: EOF 57 | line: DEVICE_WAIT_TIMEOUT=60 58 | regexp: '^DEVICE_WAIT_TIMEOUT=.*' 59 | when: not prerelease | bool 60 | 61 | # for metrics time synchronization 62 | - name: Install ntpd 63 | yum: 64 | name: ntp 65 | state: present 66 | 67 | - name: Start and enable ntpd 68 | service: name=ntpd enabled=yes state=started 69 | 70 | # Configure the instances 71 | - include: ../../openshift-ansible/playbooks/byo/openshift-cluster/config.yml 72 | vars_files: 73 | - ../../../../demo-ansible/playbooks/vars.yml 74 | vars: 75 | deployment_type: "{{ deployment_type }}" 76 | openshift_cluster_id: "{{ cluster_id }}" 77 | openshift_debug_level: "{{ debug_level }}" 78 | openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}" 79 | osm_controller_args: 80 | cloud-provider: 81 | - "aws" 82 | cloud-config: 83 | - "/etc/origin/aws.conf" 84 | osm_api_server_args: 85 | cloud-provider: 86 | - "aws" 87 | cloud-config: 88 | - "/etc/origin/aws.conf" 89 | openshift_node_kubelet_args: 90 | max-pods: 91 | - "100" 92 | cloud-provider: 93 | - "aws" 94 | cloud-config: 95 | - "/etc/origin/aws.conf" 96 | openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}" 97 | openshift_master_access_token_max_seconds: 2419200 98 | openshift_master_identity_providers: "{{ identity_providers }}" 99 | openshift_master_api_port: "{{ console_port }}" 100 | openshift_master_console_port: "{{ console_port }}" 101 | openshift_pkg_version: "{{ package_version }}" 102 | openshift_master_logging_public_url: "https://kibana.{{ r53_wildcard_zone }}" 103 | openshift_master_metrics_public_url: "https://metrics.{{ r53_wildcard_zone }}/hawkular/metrics" 104 | osm_cluster_network_cidr: 10.0.0.0/8 105 | osm_host_subnet_length: 16 106 | osm_default_subdomain: "{{ r53_wildcard_zone }}" 107 | osm_default_node_selector: "env=demo" 108 | osm_use_cockpit: false 109 | openshift_master_cluster_method: native 110 | openshift_master_cluster_hostname: openshift.internal.{{ r53_host_zone }} 111 | openshift_master_cluster_public_hostname: openshift.{{ r53_host_zone }} 112 | os_firewall_enabled: False 113 | oreg_url: "{{ registry_url }}" 114 | openshift_hosted_router_selector: "env=infra" 115 | openshift_hosted_registry_selector: "env=infra" 116 | 117 | # Router, Registry, internal users and projects, priming 118 | - include: post_setup.yml 119 | -------------------------------------------------------------------------------- /playbooks/post_setup.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | # to get the storage volume info and set up groups in case this playbook is run by itself 4 | - include: cloudformation_setup.yml 5 | - include: group_setup.yml 6 | 7 | - name: Node post configuration 8 | hosts: nodes 9 | vars_files: 10 | - vars.yml 11 | tasks: 12 | - name: pre-pull images 13 | command: "docker pull {{ item }}" 14 | with_items: preload_images 15 | when: not prerelease | bool 16 | 17 | - name: User creation 18 | hosts: masters 19 | vars: 20 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 21 | vars_files: 22 | - vars.yml 23 | tasks: 24 | - name: Change the oc context 25 | command: "oc config use-context {{ default_context }}" 26 | 27 | - name: Switch to default project 28 | command: oc project default 29 | 30 | - name: Create the default users 31 | command: "htpasswd -b /etc/origin/master/htpasswd {{ item.user }} {{ default_password }}" 32 | with_items: users 33 | 34 | - name: Configuration of Router 35 | hosts: project_master 36 | vars_files: 37 | - vars.yml 38 | vars: 39 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 40 | tasks: 41 | - name: Change the oc context 42 | command: "oc config use-context {{ default_context }}" 43 | 44 | - name: Switch to default project 45 | command: oc project default 46 | 47 | - name: Scale router 48 | command: "oc scale --replicas={{ num_infra_nodes }} dc router" 49 | 50 | - name: Wait for scaled router 51 | script: files/router_scale.sh 52 | register: router_scale_out 53 | until: router_scale_out | success 54 | retries: 10 55 | delay: 15 56 | 57 | # ** NOTE: Still using this because there's some weird bug when calling 58 | # openshift-ansible from demo-ansible 59 | # Using EBS storage with OpenShift requires that the systems 60 | # know the EC2 credentials in order to manipulate the EBS volumes. 61 | # masters require the keys in /etc/sysconfig/atomic-openshift-master 62 | - name: Set up master EC2 credentials 63 | hosts: masters 64 | gather_facts: no 65 | vars_files: 66 | - vars.yml 67 | tasks: 68 | - name: Write EC2 key ID to /etc/sysconfig/atomic-openshift-master 69 | lineinfile: 70 | dest: /etc/sysconfig/atomic-openshift-master 71 | insertafter: EOF 72 | line: "AWS_ACCESS_KEY_ID={{ lookup('env','AWS_ACCESS_KEY_ID') }}" 73 | regexp: '^AWS_ACCESS_KEY_ID=.*' 74 | register: master_id_result 75 | 76 | - name: Write EC2 secret key to /etc/sysconfig/atomic-openshift-master 77 | lineinfile: 78 | dest: /etc/sysconfig/atomic-openshift-master 79 | insertafter: EOF 80 | line: "AWS_SECRET_ACCESS_KEY={{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" 81 | regexp: '^AWS_SECRET_ACCESS_KEY=.*' 82 | register: master_key_result 83 | 84 | - name: Restart atomic-openshift-master-controllers service 85 | service: 86 | name: atomic-openshift-master-controllers 87 | state: restarted 88 | when: (master_id_result | changed) or (master_key_result | changed) 89 | 90 | # Using EBS storage with OpenShift requires that the systems 91 | # know the EC2 credentials in order to manipulate the EBS volumes. 92 | # nodes require the keys in /etc/sysconfig/atomic-openshift-node 93 | - name: Set up node EC2 credentials 94 | hosts: nodes 95 | gather_facts: no 96 | vars_files: 97 | - vars.yml 98 | tasks: 99 | - name: Write EC2 key ID to /etc/sysconfig/atomic-openshift-node 100 | lineinfile: 101 | dest: /etc/sysconfig/atomic-openshift-node 102 | insertafter: EOF 103 | line: "AWS_ACCESS_KEY_ID={{ lookup('env','AWS_ACCESS_KEY_ID') }}" 104 | regexp: '^AWS_ACCESS_KEY_ID=.*' 105 | register: node_id_result 106 | 107 | - name: Write EC2 secret key to /etc/sysconfig/atomic-openshift-node 108 | lineinfile: 109 | dest: /etc/sysconfig/atomic-openshift-node 110 | insertafter: EOF 111 | line: "AWS_SECRET_ACCESS_KEY={{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" 112 | regexp: '^AWS_SECRET_ACCESS_KEY=.*' 113 | register: node_key_result 114 | 115 | - name: Restart atomic-openshift-node service 116 | service: 117 | name: atomic-openshift-node 118 | state: restarted 119 | when: (node_id_result | changed) or (node_key_result | changed) 120 | 121 | - name: Installation and Configuration of Registry 122 | hosts: project_master 123 | gather_facts: no 124 | vars_files: 125 | - vars.yml 126 | vars: 127 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 128 | tasks: 129 | 130 | # make sure that we are using the default user (system:admin) and the default project 131 | - name: Change the oc context 132 | command: "oc config use-context {{ default_context }}" 133 | 134 | - name: Switch to default project 135 | command: oc project default 136 | 137 | # we use a template to then lay down YAML to create the PV 138 | # this sets facts that are then consumed in the template 139 | - name: Set the facts for the registry PV template 140 | set_fact: 141 | pv_name: "registry-pv" 142 | capacity: "50" 143 | volid: "{{ hostvars['localhost']['registry_volume'] }}" 144 | 145 | - name: Create a YAML file for the PV for the Registry volume 146 | template: 147 | src: templates/pv.yaml.j2 148 | dest: /root/registry-pv.yaml 149 | 150 | - name: Check for registry PV 151 | command: oc get pv "{{ pv_name }}" 152 | register: registry_pv_out 153 | ignore_errors: true 154 | 155 | # as before 156 | - name: Set the facts for the registry PVC template 157 | set_fact: 158 | claim_name: "registry-pvc" 159 | capacity: "50" 160 | access_mode: "ReadWriteMany" 161 | 162 | - name: Check for registry PVC 163 | command: oc get pvc "{{ claim_name }}" 164 | register: registry_pvc_out 165 | ignore_errors: true 166 | 167 | - name: Create a YAML file for the PVC for the Registry volume 168 | template: 169 | src: templates/pvc.yaml.j2 170 | dest: /root/registry-pvc.yaml 171 | 172 | - name: Create PV from YAML for registry EBS volume 173 | command: oc create -f /root/registry-pv.yaml 174 | when: registry_pv_out | failed 175 | 176 | - name: Create PVC from YAML for registry EBS volume 177 | command: oc create -f /root/registry-pvc.yaml 178 | when: registry_pvc_out | failed 179 | 180 | - name: Disable config change trigger on registry DC 181 | command: oc patch dc/docker-registry -p '{"spec":{"triggers":[]}}' 182 | 183 | - name: Check if registry is still using empty directory 184 | command: oc volume dc/docker-registry 185 | register: registry_dc_out 186 | 187 | - name: Attach volume to registry DC 188 | command: > 189 | oc volume dc/docker-registry --add --overwrite -t persistentVolumeClaim 190 | --claim-name=registry-pvc --name=registry-storage 191 | when: "'empty directory' in registry_dc_out.stdout" 192 | register: registry_volume_attached 193 | 194 | - name: Check if fsGroup is set in registry DC 195 | shell: "oc get dc/docker-registry -o yaml | grep fsGroup" 196 | register: fsgroup_out 197 | ignore_errors: true 198 | 199 | - name: Determine default project supplemental group 200 | command: oc get project default -o json 201 | register: default_project_out 202 | when: fsgroup_out | failed 203 | 204 | - name: Process the default project json into a fact 205 | set_fact: 206 | default_project_json: "{{ default_project_out.stdout | from_json }}" 207 | when: fsgroup_out | failed 208 | 209 | - name: Patch the docker registry DC with the fsGroup 210 | command: oc patch dc/docker-registry -p '{"spec":{"template":{"spec":{"securityContext":{"fsGroup":{{ default_project_json["metadata"]["annotations"]["openshift.io/sa.scc.supplemental-groups"].split("/").0 }}}}}}}' 211 | when: fsgroup_out | failed 212 | 213 | - name: Cancel any current deployments 214 | command: oc deploy docker-registry --cancel 215 | when: fsgroup_out | failed or registry_volume_attached | success 216 | 217 | - name: Deploy latest configuration of registry DC 218 | command: oc deploy docker-registry --latest 219 | when: fsgroup_out | failed or registry_volume_attached | success 220 | register: deploy_latest 221 | 222 | - name: Re-enable config trigger on docker-registry 223 | command: oc patch dc/docker-registry -p '{"spec":{"triggers":[{"type":"ConfigChange"}]}}' 224 | when: deploy_latest | success 225 | 226 | - name: Check if registry is running 227 | script: files/check_registry_running.sh 228 | register: registry_running 229 | until: registry_running | success 230 | retries: 10 231 | delay: 15 232 | 233 | - name: Installation and Configuration of Log Aggregation 234 | hosts: project_master 235 | gather_facts: no 236 | vars_files: 237 | - vars.yml 238 | vars: 239 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 240 | image_prefix: "{{ registry_url | regex_replace('^(.*\\/).*$', '\\\\1') }}" 241 | registry_fqdn: "{{ registry_url | regex_replace('(.*?)\\/.*$', '\\\\1') }}" 242 | tasks: 243 | 244 | # make sure that we are using the default user (system:admin) and the default project 245 | - name: Change the oc context 246 | command: "oc config use-context {{ default_context }}" 247 | 248 | - name: Check for the logging project 249 | command: "oc get project logging" 250 | register: logging_out 251 | ignore_errors: true 252 | 253 | # we will eventually want to look at the logging and metrics project, so 254 | # this is useful 255 | - name: Make admin user a cluster-admin 256 | command: oadm policy add-cluster-role-to-user cluster-admin admin 257 | 258 | # eventually we will change the region to be appropriate and this command will need to change 259 | - name: Create the logging project 260 | command: "oadm new-project logging" 261 | when: logging_out | failed 262 | 263 | - name: Remove the default node selector on the logging project 264 | command: oc patch namespace/logging -p '{"metadata":{"annotations":{"openshift.io/node-selector":""}}}' 265 | 266 | - name: Switch to the logging project 267 | command: "oc project logging" 268 | 269 | # for prerelease / QE stuff 270 | - name: Delete Docker auth secret if it exists 271 | command: oc delete secret prerelease 272 | ignore_errors: true 273 | when: prerelease | bool 274 | 275 | - name: Create the Docker auth secret 276 | command: oc secrets new-dockercfg prerelease --docker-server={{ registry_fqdn }} --docker-username={{ kerberos_user }}@redhat.com --docker-password={{ kerberos_token }} --docker-email={{ kerberos_user }}@redhat.com 277 | when: prerelease | bool 278 | 279 | - name: Check for logging-deployer secret 280 | command: "oc get secret logging-deployer" 281 | register: logging_deployer_secret_out 282 | ignore_errors: true 283 | 284 | - name: Create the null logging-deployer secret 285 | command: oc secrets new logging-deployer nothing=/dev/null 286 | when: logging_deployer_secret_out | failed 287 | 288 | - name: Check for logging-deployer service account 289 | command: oc get sa logging-deployer 290 | register: logging_deployer_sa_out 291 | ignore_errors: true 292 | 293 | - name: Create the logging-deployer service account 294 | shell: 'echo ''{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"logging-deployer"},"secrets":[{"name":"logging-deployer"}]}'' | oc create -f -' 295 | when: logging_deployer_sa_out | failed 296 | 297 | - name: Wait for the logging-deployer secrets 298 | shell: "oc get secrets | grep logging-deployer-token" 299 | register: deployer_token_out 300 | until: deployer_token_out | success 301 | retries: 15 302 | delay: 10 303 | 304 | - name: Grant the edit role to the logging-deployer service account 305 | command: oc policy add-role-to-user edit system:serviceaccount:logging:logging-deployer 306 | 307 | - name: Put the fluentd service account in the privileged SCC 308 | command: oadm policy add-scc-to-user privileged system:serviceaccount:logging:aggregated-logging-fluentd 309 | 310 | - name: Give fluentd cluster-reader permissions 311 | command: oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:logging:aggregated-logging-fluentd 312 | 313 | # if the artifacts are already deployed, don't process the deployer template 314 | - name: Check for the deployed artifacts 315 | command: oc get template logging-support-template 316 | register: logging_support_template_out 317 | ignore_errors: true 318 | 319 | - name: Instantiate the logging deployer via the template 320 | shell: > 321 | oc process logging-deployer-template -n openshift 322 | -v IMAGE_PREFIX="{{ image_prefix }}" 323 | -v KIBANA_HOSTNAME=kibana."{{ r53_wildcard_zone }}" 324 | -v PUBLIC_MASTER_URL=https://openshift."{{ r53_host_zone }}" 325 | -v ES_CLUSTER_SIZE=1 326 | -v ES_INSTANCE_RAM=1024M | oc create -f - 327 | when: logging_support_template_out | failed 328 | 329 | - name: Wait for the deployer to finish 330 | script: files/check_pod_complete.sh 'logging-deployer-[a-zA-Z0-9]*' 331 | register: check_out 332 | until: check_out | success 333 | retries: 15 334 | delay: 10 335 | 336 | - name: Determine elastic search DC 337 | shell: "oc get dc | awk '/logging-es-[a-zA-Z0-9]*/{ print $1 }'" 338 | register: logging_es_out 339 | 340 | - name: Modify the kibana DC with a node selector for infra 341 | command: oc patch dc/logging-kibana -p '{"spec":{"template":{"spec":{"nodeSelector":{"env":"infra"}}}}}' 342 | 343 | - name: Modify the es DC with a node selector for infra 344 | command: oc patch dc/{{ logging_es_out.stdout }} -p '{"spec":{"template":{"spec":{"nodeSelector":{"env":"infra"}}}}}' 345 | 346 | # if the image streams exist, don't process the support template 347 | - name: Check for logging-kibana imagestream 348 | command: oc get is logging-kibana 349 | register: kibana_is_out 350 | ignore_errors: true 351 | 352 | - name: Process the logging support template 353 | shell: "oc process logging-support-template | oc create -f -" 354 | when: kibana_is_out | failed 355 | 356 | - name: Determine image stream version tags 357 | script: files/image_stream_version_check.sh logging-support-template logging 358 | register: is_version_out 359 | 360 | - name: "Pull the image stream tags" 361 | command: oc import-image {{ item }}:{{ is_version_out.stdout }} --insecure=true 362 | with_items: 363 | - "logging-auth-proxy" 364 | - "logging-elasticsearch" 365 | - "logging-fluentd" 366 | - "logging-kibana" 367 | register: image_tag_pull_out 368 | retries: 2 369 | until: image_tag_pull_out | success 370 | 371 | # we use a template to then lay down YAML to create the PV 372 | # this sets facts that are then consumed in the template 373 | - name: Set the facts for the logging PV template 374 | set_fact: 375 | pv_name: "logging-pv" 376 | capacity: "100" 377 | volid: "{{ hostvars['localhost']['logging_volume'] }}" 378 | 379 | - name: Create a YAML file for the PV for the logging volume 380 | template: 381 | src: templates/pv.yaml.j2 382 | dest: /root/logging-pv.yaml 383 | 384 | - name: Check for logging PV 385 | command: oc get pv "{{ pv_name }}" 386 | register: logging_pv_out 387 | ignore_errors: true 388 | 389 | # as before 390 | - name: Set the facts for the logging PVC template 391 | set_fact: 392 | claim_name: "logging-pvc" 393 | capacity: "100" 394 | access_mode: "ReadWriteMany" 395 | 396 | - name: Check for logging PVC 397 | command: oc get pvc "{{ claim_name }}" 398 | register: logging_pvc_out 399 | ignore_errors: true 400 | 401 | - name: Create a YAML file for the PVC for the logging volume 402 | template: 403 | src: templates/pvc.yaml.j2 404 | dest: /root/logging-pvc.yaml 405 | 406 | - name: Create PV from YAML for logging EBS volume 407 | command: oc create -f /root/logging-pv.yaml 408 | when: logging_pv_out | failed 409 | 410 | - name: Create PVC from YAML for logging EBS volume 411 | command: oc create -f /root/logging-pvc.yaml 412 | when: logging_pvc_out | failed 413 | 414 | - name: Check if es is still using empty directory 415 | command: "oc volume dc/{{ logging_es_out.stdout }}" 416 | register: logging_dc_out 417 | 418 | - name: Attach volume to es DC 419 | command: > 420 | oc volume dc/{{ logging_es_out.stdout }} --add --overwrite -t persistentVolumeClaim 421 | --claim-name=logging-pvc --name=elasticsearch-storage 422 | when: "'empty directory' in logging_dc_out.stdout" 423 | 424 | - name: Check if fsGroup is set in logging DC 425 | shell: "oc get dc/{{ logging_es_out.stdout }} -o yaml | grep fsGroup" 426 | register: fsgroup_out 427 | ignore_errors: true 428 | 429 | - name: Determine logging project supplemental group 430 | command: oc get project logging -o json 431 | register: logging_project_out 432 | when: fsgroup_out | failed 433 | 434 | - name: Process the logging project json into a fact 435 | set_fact: 436 | logging_project_json: "{{ logging_project_out.stdout | from_json }}" 437 | when: fsgroup_out | failed 438 | 439 | - name: Patch the es DC with the fsGroup 440 | command: oc patch dc/{{ logging_es_out.stdout }} -p '{"spec":{"template":{"spec":{"securityContext":{"fsGroup":{{ logging_project_json["metadata"]["annotations"]["openshift.io/sa.scc.supplemental-groups"].split("/").0 }}}}}}}' 441 | when: fsgroup_out | failed 442 | 443 | - name: Scale fluentd to number of nodes 444 | command: oc scale dc/logging-fluentd --replicas={{ groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_node'] | count }} 445 | 446 | - name: Installation and Configuration of Metrics 447 | hosts: project_master 448 | gather_facts: no 449 | vars_files: 450 | - vars.yml 451 | vars: 452 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 453 | image_prefix: "{{ registry_url | regex_replace('^(.*\\/).*$', '\\\\1') }}" 454 | registry_fqdn: "{{ registry_url | regex_replace('(.*?)\\/.*$', '\\\\1') }}" 455 | tasks: 456 | 457 | # make sure that we are using the default user (system:admin) and the default project 458 | - name: Change the oc context 459 | command: "oc config use-context {{ default_context }}" 460 | 461 | - name: Change to the openshift-infra project 462 | command: "oc project openshift-infra" 463 | 464 | # for prerelease / QE stuff 465 | - name: Delete Docker auth secret if it exists 466 | command: oc delete secret prerelease 467 | ignore_errors: true 468 | when: prerelease | bool 469 | 470 | - name: Create the Docker auth secret 471 | command: oc secrets new-dockercfg prerelease --docker-server={{ registry_fqdn }} --docker-username={{ kerberos_user }}@redhat.com --docker-password={{ kerberos_token }} --docker-email={{ kerberos_user }}@redhat.com 472 | when: prerelease | bool 473 | 474 | - name: Force metrics components into env=infra 475 | command: oc patch namespace/openshift-infra -p '{"metadata":{"annotations":{"openshift.io/node-selector":"env=infra"}}}' 476 | 477 | - name: Check for metrics-deployer service account 478 | command: oc get sa metrics-deployer 479 | register: metrics_deployer_sa_out 480 | ignore_errors: true 481 | 482 | - name: Create the metrics-deployer service account 483 | shell: 'echo ''{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"metrics-deployer"},"secrets":[{"name":"metrics-deployer"}]}'' | oc create -f -' 484 | when: metrics_deployer_sa_out | failed 485 | 486 | - name: Wait for the metrics-deployer secrets 487 | shell: "oc get secrets | grep metrics-deployer-token" 488 | register: metrics_token_out 489 | until: metrics_token_out | success 490 | retries: 15 491 | delay: 10 492 | 493 | - name: Grant the edit role to the metrics-deployer service account 494 | command: oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer 495 | 496 | - name: Grant the cluster-reader role to the heapster service account 497 | command: oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster 498 | 499 | - name: Check for metrics-deployer secret 500 | command: "oc get secret metrics-deployer" 501 | register: metrics_deployer_secret_out 502 | ignore_errors: true 503 | 504 | - name: Create the null metrics-deployer secret 505 | command: oc secrets new metrics-deployer nothing=/dev/null 506 | when: metrics_deployer_secret_out | failed 507 | 508 | # we use a template to then lay down YAML to create the PV 509 | # this sets facts that are then consumed in the template 510 | - name: Set the facts for the metrics PV template 511 | set_fact: 512 | pv_name: "metrics-pv" 513 | capacity: "100" 514 | volid: "{{ hostvars['localhost']['metrics_volume'] }}" 515 | 516 | - name: Create a YAML file for the PV for the metrics volume 517 | template: 518 | src: templates/pv.yaml.j2 519 | dest: /root/metrics-pv.yaml 520 | 521 | - name: Check for metrics PV 522 | command: oc get pv "{{ pv_name }}" 523 | register: metrics_pv_out 524 | ignore_errors: true 525 | 526 | - name: Create PV from YAML for metrics EBS volume 527 | command: oc create -f /root/metrics-pv.yaml 528 | when: metrics_pv_out | failed 529 | 530 | # if the artifacts are already deployed, don't process the deployer template 531 | - name: Check for the deployed artifacts 532 | command: oc get rc hawkular-metrics 533 | register: metrics_artifacts_out 534 | ignore_errors: true 535 | 536 | - name: Instantiate the logging deployer via the template 537 | shell: > 538 | oc process metrics-deployer-template -n openshift 539 | -v IMAGE_PREFIX="{{ image_prefix }}" 540 | -v CASSANDRA_PV_SIZE=100Gi 541 | -v HAWKULAR_METRICS_HOSTNAME=metrics."{{ r53_wildcard_zone }}" | oc create -f - 542 | when: metrics_artifacts_out | failed 543 | 544 | - name: Wait for the deployer to finish 545 | script: files/check_pod_complete.sh 'metrics-deployer-[a-zA-Z0-9]*' 546 | register: check_out 547 | until: check_out | success 548 | retries: 15 549 | delay: 10 550 | 551 | - name: Wait for the hawkular-cassandra-1 RC to exist 552 | command: oc get rc hawkular-cassandra-1 553 | register: rc_out 554 | until: rc_out.stdout | search("hawkular-cassandra-1") 555 | retries: 15 556 | delay: 10 557 | 558 | - name: Check if fsGroup is set in cassandra RC 559 | shell: "oc get rc/hawkular-cassandra-1 -o yaml | grep fsGroup" 560 | register: fsgroup_out 561 | ignore_errors: true 562 | 563 | - name: Determine openshift-infra project supplemental group 564 | command: oc get project openshift-infra -o json 565 | register: infra_project_out 566 | when: fsgroup_out | failed 567 | 568 | - name: Process the openshift-infra project json into a fact 569 | set_fact: 570 | infra_project_json: "{{ infra_project_out.stdout | from_json }}" 571 | when: fsgroup_out | failed 572 | 573 | - name: Patch the cassandra RC with the fsGroup 574 | command: oc patch rc/hawkular-cassandra-1 -p '{"spec":{"template":{"spec":{"securityContext":{"fsGroup":{{ infra_project_json["metadata"]["annotations"]["openshift.io/sa.scc.supplemental-groups"].split("/").0 }}}}}}}' 575 | when: fsgroup_out | failed 576 | register: patched_out 577 | 578 | - name: Find the cassandra pod 579 | shell: oc get pod | awk '/hawkular-cassandra-1/{ print $1 }' 580 | register: cassandra_pod_out 581 | when: patched_out | success 582 | 583 | - name: Delete the cassandra pod to get the fsGroup into it 584 | command: "oc delete pod {{ cassandra_pod_out.stdout }}" 585 | when: patched_out | success 586 | 587 | - name: Check if the stats resolution has been set 588 | shell: oc get rc/heapster -o json | grep resolution 589 | register: resolution_out 590 | ignore_errors: true 591 | 592 | - name: Patch the heapster RC 593 | command: oc patch rc/heapster -p '{"spec":{"template":{"spec":{"containers":[{"name":"heapster","image":"registry.access.redhat.com/openshift3/metrics-heapster:3.2.0","command":["heapster-wrapper.sh","--wrapper.username_file=/hawkular-account/hawkular-metrics.username","--wrapper.password_file=/hawkular-account/hawkular-metrics.password","--wrapper.allowed_users_file=/secrets/heapster.allowed-users","--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status","--source=kubernetes:https://kubernetes.default.svc:443?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250","--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId=nodename&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^/system.slice.*|^/user.slice)","--tls_cert=/secrets/heapster.cert","--tls_key=/secrets/heapster.key","--tls_client_ca=/secrets/heapster.client-ca","--allowed_users=%allowed_users%","--stats_resolution=30s"],"ports":[{"name":"http-endpoint","containerPort":8082,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"heapster-secrets","mountPath":"/secrets"},{"name":"hawkular-metrics-certificate","mountPath":"/hawkular-cert"},{"name":"hawkular-metrics-account","mountPath":"/hawkular-account"}],"readinessProbe":{"exec":{"command":["/opt/heapster-readiness.sh"]},"timeoutSeconds":1,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]}}}}' 594 | when: resolution_out | failed 595 | 596 | - name: Find the heapster pod 597 | shell: oc get pod | awk '/heapster/{ print $1 }' 598 | register: heapster_pod_out 599 | 600 | - name: Kill the heapster pod 601 | shell: oc delete pod {{ heapster_pod_out.stdout }} 602 | 603 | - name: Wait for old heapster pod to be gone 604 | shell: oc get pod | grep {{ heapster_pod_out.stdout }} 605 | register: metrics_pods_out 606 | until: metrics_pods_out | failed 607 | retries: 15 608 | delay: 10 609 | ignore_errors: true 610 | 611 | - name: Wait for new heapster pod to be running 612 | shell: oc get pod | grep heapster | grep -i unning 613 | register: heapster_running_out 614 | until: heapster_running_out | success 615 | retries: 15 616 | delay: 10 617 | ignore_errors: true 618 | 619 | - name: Demonstration project configuration 620 | hosts: project_master 621 | vars: 622 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 623 | vars_files: 624 | - vars.yml 625 | tasks: 626 | - name: Find current projects list 627 | command: oc get projects 628 | register: projects 629 | 630 | - name: Create projects for internal users 631 | command: "oadm new-project {{ item.project }} --display-name='{{ item.project.title() }}' --admin='{{ item.user }}'" 632 | when: item.project not in projects.stdout and item.project != "empty" 633 | with_items: users 634 | 635 | - name: Switch to default project 636 | command: oc project default 637 | 638 | - name: Retrieve hexboard deployment configurations 639 | command: oc get dc/{{ hexboard.name }} -n {{ hexboard.namespace }} 640 | register: dcs_out 641 | ignore_errors: true 642 | 643 | - name: Login as the demo user 644 | command: oc login -u {{ users.0.user }} -p {{ default_password }} --certificate-authority=/etc/origin/master/ca.crt 645 | when: dcs_out | failed 646 | 647 | - name: "Get the demo user's token" 648 | script: files/get_token.sh 649 | register: auth_token 650 | when: dcs_out | failed 651 | 652 | - name: Set the token as a fact 653 | set_fact: 654 | access_token: "{{ auth_token.stdout }}" 655 | when: dcs_out | failed 656 | 657 | - name: Switch to the hexboard project 658 | command: oc project {{ hexboard.namespace }} 659 | when: dcs_out | failed 660 | 661 | - name: Install the hexboard template file on the master 662 | template: 663 | dest: /root/hexboard_template.json 664 | src: templates/hexboard_template.json.j2 665 | when: dcs_out | failed 666 | 667 | - name: Create the objects in the hexboard template 668 | command: oc create -f /root/hexboard_template.json 669 | when: dcs_out | failed 670 | ignore_errors: true 671 | 672 | - name: Start the hexboard build 673 | command: oc start-build {{ hexboard.name }} 674 | when: dcs_out | failed 675 | 676 | - name: Change the oc context 677 | command: "oc config use-context {{ default_context }}" 678 | when: dcs_out | failed 679 | -------------------------------------------------------------------------------- /playbooks/projects_setup.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | # to get the storage volume info and set up groups in case this playbook is run by itself 4 | - include: cloudformation_setup.yml 5 | - include: group_setup.yml 6 | 7 | # Smoke projects 8 | # need to create the users on all of the masters since we are using "local" auth 9 | - name: Smoke users 10 | vars_files: 11 | - vars.yml 12 | hosts: masters 13 | tasks: 14 | - name: Create the users 15 | command: "htpasswd -b /etc/origin/master/htpasswd {{ item }} {{ default_password }}" 16 | with_sequence: start=0 end={{ num_smoke_test_users }} format=user%02d 17 | when: run_smoke_tests | bool or run_only_smoke_tests | bool 18 | 19 | # run the actual commands only on one of the masters 20 | - name: Smoke projects and apps 21 | vars_files: 22 | - vars.yml 23 | hosts: project_master 24 | tasks: 25 | - include: tasks/smoke_projects.yml 26 | when: run_smoke_tests | bool or run_only_smoke_tests | bool 27 | -------------------------------------------------------------------------------- /playbooks/register_hosts.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - include: cloudformation_setup.yml 4 | 5 | - include: group_setup.yml 6 | 7 | - name: Register host(s) 8 | hosts: cluster_hosts 9 | serial: 1 10 | gather_facts: no 11 | tasks: 12 | - name: Register host 13 | redhat_subscription: 14 | username: "{{ rhsm_user }}" 15 | password: "{{ rhsm_pass }}" 16 | state: present 17 | pool: "^(60 Day Supported OpenShift Enterprise|OpenShift Enterprise, Standard|OpenShift Enterprise, Premium|Employee)" 18 | register: register_result 19 | when: not (skip_subscription_management | bool) 20 | 21 | - name: Disable all known rhsm repos 22 | command: subscription-manager repos --disable='*' 23 | when: register_result | changed 24 | 25 | - name: Subscribe only to the ose repo 26 | command: subscription-manager repos --enable=rhel-7-server-ose-3.2-rpms 27 | when: register_result | changed 28 | 29 | -------------------------------------------------------------------------------- /playbooks/roles: -------------------------------------------------------------------------------- 1 | ../../openshift-ansible/roles -------------------------------------------------------------------------------- /playbooks/subscriptions_and_repos.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - name: Register host(s) 4 | hosts: cluster_hosts 5 | serial: 1 6 | gather_facts: no 7 | tasks: 8 | - name: Register host 9 | redhat_subscription: 10 | username: "{{ rhsm_user }}" 11 | password: "{{ rhsm_pass }}" 12 | state: present 13 | pool: "^(60 Day Supported OpenShift Enterprise|OpenShift Enterprise, Standard|OpenShift Enterprise, Premium|Employee)" 14 | register: register_result 15 | when: not (skip_subscription_management | bool) 16 | 17 | - name: Disable all known rhsm repos 18 | command: subscription-manager repos --disable='*' 19 | when: register_result | changed 20 | 21 | - name: Subscribe only to the ose repo 22 | command: subscription-manager repos --enable=rhel-7-server-ose-3.1-rpms 23 | when: register_result | changed 24 | 25 | - name: Repository configuration 26 | hosts: cluster_hosts 27 | gather_facts: yes 28 | tasks: 29 | - name: Enable rhui extras channel 30 | command: yum-config-manager --enable rhui-REGION-rhel-server-extras 31 | 32 | - include: ../../aos-ansible/playbooks/roles/ops_mirror_bootstrap/tasks/main.yml 33 | vars: 34 | omb_aos_repo: "{{ aos_repo }}" 35 | when: use_certificate_repos | bool 36 | 37 | - name: Disable rhel-72 prerelease repo when not prerelease 38 | command: yum-config-manager --disable rhel-72 39 | when: not prerelease | bool 40 | 41 | - name: Disable rhel-72-extras prerelease repo when not prerelease 42 | command: yum-config-manager --disable rhel-72-extras 43 | when: not prerelease | bool 44 | 45 | - include: ../../aos-ansible/playbooks/roles/qe_registry_bootstrap/tasks/main.yml 46 | vars: 47 | omb_aos_repo: "{{ aos_repo }}" 48 | qe_openshift_kerberos_user: "{{ kerberos_user }}" 49 | qe_openshift_token: "{{ kerberos_token }}" 50 | when: use_certificate_repos | bool and prerelease | bool 51 | 52 | 53 | -------------------------------------------------------------------------------- /playbooks/tasks/cloudformation.yml: -------------------------------------------------------------------------------- 1 | - name: Launch the CloudFormation Template 2 | cloudformation: 3 | region: "{{ ec2_region }}" 4 | stack_name: openshift-demo-{{ cluster_id }} 5 | state: present 6 | tags: 7 | openshift-demo: "{{ cluster_id }}" 8 | template: files/cloudformation.json 9 | template_parameters: 10 | Route53HostedZone: "{{ r53_zone }}." 11 | MasterApiPort: "{{ api_port }}" 12 | MasterHealthTarget: "TCP:{{ api_port }}" 13 | MasterClusterHostname: openshift.internal.{{ r53_host_zone }} 14 | MasterClusterPublicHostname: openshift.{{ r53_host_zone }} 15 | AppWildcardDomain: "*.{{ r53_wildcard_zone }}" 16 | VpcCidrBlock: 172.18.0.0/16 17 | VpcName: "{{ cluster_id }}" 18 | NumSubnets: "{{ vpc_subnet_count }}" 19 | SubnetAvailabilityZones: "{{ vpc_subnet_azs }}" 20 | SubnetCidrBlocks: 172.18.0.0/24,172.18.1.0/24,172.18.2.0/24,172.18.3.0/24 21 | KeyName: "{{ ec2_keypair }}" 22 | NumMasters: "{{ num_masters }}" 23 | MasterInstanceType: "{{ ec2_master_instance_type }}" 24 | MasterImageId: "{{ ec2_image }}" 25 | MasterUserData: "{{ lookup('template', '../templates/user_data_master.j2') | b64encode }}" 26 | MasterRootVolSize: "{{ os_defaults.masters.vol_sizes.root }}" 27 | MasterRootVolType: gp2 28 | MasterDockerVolSize: "{{ os_defaults.masters.vol_sizes.docker }}" 29 | MasterDockerVolType: gp2 30 | MasterEtcdVolSize: "{{ os_defaults.masters.vol_sizes.etcd }}" 31 | MasterEtcdVolType: gp2 32 | NumInfra: "{{ num_infra_nodes }}" 33 | InfraInstanceType: "{{ ec2_infra_instance_type }}" 34 | InfraImageId: "{{ ec2_image }}" 35 | InfraUserData: "{{ lookup('template', '../templates/user_data_node.j2') | b64encode }}" 36 | InfraRootVolSize: "{{ os_defaults.infra_nodes.vol_sizes.root }}" 37 | InfraRootVolType: gp2 38 | InfraDockerVolSize: "{{ os_defaults.infra_nodes.vol_sizes.docker }}" 39 | InfraDockerVolType: gp2 40 | NumNodes: "{{ num_app_nodes }}" 41 | NodeInstanceType: "{{ ec2_node_instance_type }}" 42 | NodeImageId: "{{ ec2_image }}" 43 | NodeUserData: "{{ lookup('template', '../templates/user_data_node.j2') | b64encode }}" 44 | NodeRootVolSize: "{{ os_defaults.app_nodes.vol_sizes.root }}" 45 | NodeRootVolType: gp2 46 | NodeDockerVolSize: "{{ os_defaults.app_nodes.vol_sizes.docker }}" 47 | NodeDockerVolType: gp2 48 | register: cf_output 49 | 50 | - name: Store CloudFormation-generated volume IDs as facts 51 | set_fact: 52 | registry_volume: "{{ cf_output.stack_outputs.RegistryVolumeID }}" 53 | metrics_volume: "{{ cf_output.stack_outputs.MetricsVolumeID }}" 54 | logging_volume: "{{ cf_output.stack_outputs.LoggingVolumeID }}" 55 | 56 | 57 | -------------------------------------------------------------------------------- /playbooks/tasks/group_setup.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | - name: Add masters to requisite groups 3 | add_host: 4 | name: "{{ item }}" 5 | groups: masters, etcd, nodes, cluster_hosts 6 | openshift_node_labels: 7 | region: "{{ cluster_id }}" 8 | env: master 9 | zone: "{{ hostvars[item].ec2_placement }}" 10 | with_items: groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_master'] 11 | 12 | - name: Create group for first master 13 | add_host: 14 | name: "{{ item }}" 15 | groups: project_master 16 | with_items: groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_master'].0 17 | 18 | - name: Add nodes to requisite groups 19 | add_host: 20 | name: "{{ item }}" 21 | groups: nodes, cluster_hosts 22 | openshift_node_labels: 23 | region: "{{ cluster_id }}" 24 | env: "{{ 'infra' if hostvars[item]['ec2_tag_openshift-demo-' ~ cluster_id ~ '-node-type'] == 'infrastructure' else 'demo' }}" 25 | zone: "{{ hostvars[item].ec2_placement }}" 26 | with_items: groups['tag_openshift-demo-' ~ cluster_id ~ '-host-type_node'] 27 | -------------------------------------------------------------------------------- /playbooks/tasks/registry_wait.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | # using base64 because of a bug in processing golang braced templates with ansible 3 | # echo "oc get dc/docker-registry -o go-template='{{.status.latestVersion}}'" | base64 4 | # b2MgZ2V0IGRjL2RvY2tlci1yZWdpc3RyeSAtbyBnby10ZW1wbGF0ZT0ne3suc3RhdHVzLmxhdGVzdFZlcnNpb259fScK 5 | - name: Make sure registry deployment version is non-zero 6 | shell: echo "b2MgZ2V0IGRjL2RvY2tlci1yZWdpc3RyeSAtbyBnby10ZW1wbGF0ZT0ne3suc3RhdHVzLmxhdGVzdFZlcnNpb259fScK" | base64 -d | bash 7 | register: registry_version_out 8 | until: '"0" not in registry_version_out.stdout' 9 | retries: 15 10 | delay: 10 11 | 12 | - name: Wait for registry to be running 13 | shell: oc get pod | grep -v deploy | awk '/docker-registry-{{ registry_version_out.stdout }}/{ print $3 }' 14 | register: deployer_output 15 | until: deployer_output.stdout | search("Running") 16 | retries: 15 17 | delay: 10 18 | -------------------------------------------------------------------------------- /playbooks/tasks/smoke_projects.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - name: Find current projects list 4 | command: oc get projects 5 | register: projects 6 | 7 | # Set up the smoke project and app 8 | - name: Create user smoke test projects 9 | command: "oadm new-project {{ item }}-smoke --display-name='Smoke Test' --admin={{ item }}" 10 | when: item not in projects.stdout 11 | with_sequence: start=0 end={{ num_smoke_test_users }} format=user%02d 12 | 13 | - name: Execute build script for each project 14 | script: files/smoke_project.sh {{ item }} 15 | with_sequence: start=0 end={{ num_smoke_test_users }} format=user%02d 16 | 17 | - name: Expose smoke project service 18 | command: "oc expose service smoke -n {{ item }}-smoke" 19 | when: item not in projects.stdout 20 | with_sequence: start=0 end={{ num_smoke_test_users }} format=user%02d 21 | 22 | - name: Scale smoke app 23 | command: "oc scale dc/smoke --replicas=2 -n {{ item }}-smoke" 24 | with_sequence: start=0 end={{ num_smoke_test_users }} format=user%02d 25 | -------------------------------------------------------------------------------- /playbooks/tasks/validator.yml: -------------------------------------------------------------------------------- 1 | - name: 'Validating options' 2 | fail: 3 | msg: required values not set 4 | when: > 5 | cluster_id is not defined 6 | or ec2_region is not defined 7 | or ec2_image is not defined 8 | or ec2_keypair is not defined 9 | or ec2_master_instance_type is not defined 10 | or ec2_infra_instance_type is not defined or 11 | ec2_node_instance_type is not defined or 12 | r53_zone is not defined 13 | or r53_host_zone is not defined 14 | or r53_wildcard_zone is not defined 15 | or num_app_nodes is not defined 16 | or hexboard_size is not defined or 17 | rhsm_user is not defined or 18 | rhsm_pass is not defined or 19 | deployment_type is not defined 20 | -------------------------------------------------------------------------------- /playbooks/templates/aos.repo.j2: -------------------------------------------------------------------------------- 1 | [aos] 2 | name=Prerelease version of Atomic Enterprise Platform and OpenShift Enterprise RPMs 3 | baseurl={{ aos_repo | default('https://mirror.openshift.com/enterprise/enterprise-3.1.1.6/RH7-RHAOS-3.1/x86_64/os') }} 4 | failovermethod=priority 5 | enabled=1 6 | gpgcheck=0 7 | sslverify=0 8 | sslclientcert=/var/lib/yum/client-cert.pem 9 | sslclientkey=/var/lib/yum/client-key.pem 10 | -------------------------------------------------------------------------------- /playbooks/templates/hexboard_template.json.j2: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "List", 3 | "apiVersion": "v1", 4 | "metadata": { 5 | "name": "{{ hexboard.name }}", 6 | "creationTimestamp": null, 7 | "annotations": { 8 | "description": "Hexboard Demo", 9 | "provider": "Red Hat Summit 2015 / JBoss Keynote demo team", 10 | "iconClass": "icon-nodejs" 11 | } 12 | }, 13 | "items": [ 14 | { 15 | "kind": "ImageStream", 16 | "apiVersion": "v1", 17 | "metadata": { 18 | "name": "{{ hexboard.name }}", 19 | "creationTimestamp": null 20 | }, 21 | "spec": {}, 22 | "status": { 23 | "dockerImageRepository": "" 24 | } 25 | }, 26 | { 27 | "kind": "BuildConfig", 28 | "apiVersion": "v1", 29 | "metadata": { 30 | "name": "{{ hexboard.name }}", 31 | "creationTimestamp": null 32 | }, 33 | "spec": { 34 | "source": { 35 | "type": "Git", 36 | "git": { 37 | "uri": "{{ hexboard.src_url }}" 38 | } 39 | }, 40 | "strategy": { 41 | "type": "Source", 42 | "sourceStrategy": { 43 | "from": { 44 | "kind": "DockerImage", 45 | "name": "{{ hexboard.img }}" 46 | }, 47 | "incremental": true 48 | } 49 | }, 50 | "output": { 51 | "to": { 52 | "kind": "ImageStreamTag", 53 | "name": "{{ hexboard.name }}:latest" 54 | } 55 | }, 56 | "resources": {} 57 | }, 58 | "status": { 59 | "lastVersion": 0 60 | } 61 | }, 62 | { 63 | "kind": "DeploymentConfig", 64 | "apiVersion": "v1", 65 | "metadata": { 66 | "name": "{{ hexboard.name }}", 67 | "creationTimestamp": null 68 | }, 69 | "spec": { 70 | "strategy": { 71 | "type": "Recreate", 72 | "resources": {} 73 | }, 74 | "triggers": [ 75 | { 76 | "type": "ConfigChange" 77 | }, 78 | { 79 | "type": "ImageChange", 80 | "imageChangeParams": { 81 | "automatic": true, 82 | "containerNames": [ 83 | "{{ hexboard.name }}" 84 | ], 85 | "from": { 86 | "kind": "ImageStreamTag", 87 | "name": "{{ hexboard.name }}:latest" 88 | } 89 | } 90 | } 91 | ], 92 | "replicas": 1, 93 | "selector": { 94 | "name": "{{ hexboard.name }}" 95 | }, 96 | "template": { 97 | "metadata": { 98 | "creationTimestamp": null, 99 | "labels": { 100 | "name": "{{ hexboard.name }}" 101 | } 102 | }, 103 | "spec": { 104 | "containers": [ 105 | { 106 | "name": "{{ hexboard.name }}", 107 | "image": "{{ hexboard.name }}:latest", 108 | "livenessProbe": { 109 | "httpGet": { 110 | "path": "/status", 111 | "port": 8080 112 | }, 113 | "timeoutSeconds": 5, 114 | "initialDelaySeconds": 30 115 | }, 116 | "env": [ 117 | { 118 | "name": "ACCESS_TOKEN", 119 | "value": "{{ access_token }}" 120 | }, 121 | { 122 | "name": "NAMESPACE", 123 | "value": "{{ hexboard.watch_namespace }}" 124 | }, 125 | { 126 | "name": "HEXBOARD_SIZE", 127 | "value": "{{ hexboard_size }}" 128 | } 129 | ], 130 | "ports": [ 131 | { 132 | "name": "hexboard-8080", 133 | "containerPort": 8080, 134 | "protocol": "TCP", 135 | "targetPort": 8080 136 | } 137 | ], 138 | "resources": {}, 139 | "terminationMessagePath": "/dev/termination-log", 140 | "imagePullPolicy": "Always", 141 | "securityContext": { 142 | "capabilities": {}, 143 | "privileged": false 144 | } 145 | } 146 | ], 147 | "restartPolicy": "Always", 148 | "dnsPolicy": "ClusterFirst" 149 | } 150 | } 151 | }, 152 | "status": {} 153 | }, 154 | { 155 | "kind": "Service", 156 | "apiVersion": "v1", 157 | "metadata": { 158 | "name": "{{ hexboard.name }}-service", 159 | "creationTimestamp": null 160 | }, 161 | "spec": { 162 | "ports": [ 163 | { 164 | "name": "hexboard-8080", 165 | "protocol": "TCP", 166 | "port": 8080, 167 | "targetPort": 8080 168 | } 169 | ], 170 | "selector": { 171 | "name": "{{ hexboard.name }}" 172 | }, 173 | "portalIP": "", 174 | "type": "ClusterIP" 175 | } 176 | }, 177 | { 178 | "kind": "Route", 179 | "apiVersion": "v1", 180 | "metadata": { 181 | "name": "{{ hexboard.name }}-route" 182 | }, 183 | "spec": { 184 | "host": "{{ hexboard.name ~ '-' ~ hexboard.namespace ~ '.' ~ r53_wildcard_zone }}", 185 | "to": { 186 | "kind": "Service", 187 | "name": "{{ hexboard.name }}-service" 188 | } 189 | } 190 | } 191 | ] 192 | } 193 | -------------------------------------------------------------------------------- /playbooks/templates/pv.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: "v1" 2 | kind: "PersistentVolume" 3 | metadata: 4 | name: "{{ pv_name }}" 5 | spec: 6 | capacity: 7 | storage: "{{ capacity }}Gi" 8 | accessModes: 9 | - ReadWriteOnce 10 | - ReadWriteMany 11 | awsElasticBlockStore: 12 | fsType: "ext4" 13 | volumeID: "{{ volid }}" 14 | -------------------------------------------------------------------------------- /playbooks/templates/pvc.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: "v1" 2 | kind: "PersistentVolumeClaim" 3 | metadata: 4 | name: "{{ claim_name }}" 5 | spec: 6 | accessModes: 7 | - "{{ access_mode }}" 8 | resources: 9 | requests: 10 | storage: "{{ capacity }}Gi" 11 | -------------------------------------------------------------------------------- /playbooks/templates/registry.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | /bin/oc get service docker-registry --template {{"'{{ .spec.portalIP }}:{{ with index .spec.ports 0 }}{{ .port }}{{ end }}'"}} 4 | -------------------------------------------------------------------------------- /playbooks/templates/user_data_master.j2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | cloud_config_modules: 3 | - fs_setup 4 | 5 | mounts: 6 | - [ /dev/xvdb ] 7 | - [ /dev/xvdc, /var/lib/etcd, xfs, "defaults" ] 8 | 9 | fs_setup: 10 | - label: etcd_storage 11 | filesystem: xfs 12 | device: /dev/xvdc 13 | partition: auto 14 | 15 | write_files: 16 | - content: | 17 | DEVS='/dev/xvdb' 18 | VG=docker_vg 19 | path: /etc/sysconfig/docker-storage-setup 20 | owner: root:root 21 | - path: /etc/sudoers.d/99-openshift-cloud-init-requiretty 22 | permissions: 440 23 | content: | 24 | Defaults:openshift !requiretty 25 | 26 | users: 27 | - default 28 | 29 | system_info: 30 | default_user: 31 | name: openshift 32 | -------------------------------------------------------------------------------- /playbooks/templates/user_data_node.j2: -------------------------------------------------------------------------------- 1 | #cloud-config 2 | 3 | mounts: 4 | - [ xvdb ] 5 | 6 | write_files: 7 | - content: | 8 | DEVS='/dev/xvdb' 9 | VG=docker_vg 10 | path: /etc/sysconfig/docker-storage-setup 11 | owner: root:root 12 | permissions: '0644' 13 | - path: /etc/sudoers.d/99-openshift-cloud-init-requiretty 14 | permissions: 440 15 | content: | 16 | Defaults:openshift !requiretty 17 | 18 | users: 19 | - default 20 | 21 | system_info: 22 | default_user: 23 | name: openshift 24 | -------------------------------------------------------------------------------- /playbooks/test.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | --- 3 | - include: cloudformation_setup.yml 4 | 5 | - include: group_setup.yml 6 | 7 | - name: Installation and Configuration of Metrics 8 | hosts: project_master 9 | gather_facts: no 10 | vars_files: 11 | - vars.yml 12 | vars: 13 | default_context: 'default/openshift-internal-{{ r53_host_zone | regex_replace("\.", "-") }}:{{ api_port }}/system:admin' 14 | image_prefix: "{{ registry_url | regex_replace('^(.*\\/).*$', '\\\\1') }}" 15 | registry_fqdn: "{{ registry_url | regex_replace('(.*?)\\/.*$', '\\\\1') }}" 16 | tasks: 17 | 18 | # make sure that we are using the default user (system:admin) and the default project 19 | - name: Change the oc context 20 | command: "oc config use-context {{ default_context }}" 21 | 22 | - name: Change to the openshift-infra project 23 | command: "oc project openshift-infra" 24 | 25 | # for prerelease / QE stuff 26 | - name: Delete Docker auth secret if it exists 27 | command: oc delete secret prerelease 28 | ignore_errors: true 29 | when: prerelease | bool 30 | 31 | - name: Create the Docker auth secret 32 | command: oc secrets new-dockercfg prerelease --docker-server={{ registry_fqdn }} --docker-username={{ kerberos_user }}@redhat.com --docker-password={{ kerberos_token }} --docker-email={{ kerberos_user }}@redhat.com 33 | when: prerelease | bool 34 | 35 | - name: Force metrics components into env=infra 36 | command: oc patch namespace/openshift-infra -p '{"metadata":{"annotations":{"openshift.io/node-selector":"env=infra"}}}' 37 | 38 | - name: Check for metrics-deployer service account 39 | command: oc get sa metrics-deployer 40 | register: metrics_deployer_sa_out 41 | ignore_errors: true 42 | 43 | - name: Create the metrics-deployer service account 44 | shell: 'echo ''{"apiVersion":"v1","kind":"ServiceAccount","metadata":{"name":"metrics-deployer"},"secrets":[{"name":"metrics-deployer"}]}'' | oc create -f -' 45 | when: metrics_deployer_sa_out | failed 46 | 47 | - name: Wait for the metrics-deployer secrets 48 | shell: "oc get secrets | grep metrics-deployer-token" 49 | register: metrics_token_out 50 | until: metrics_token_out | success 51 | retries: 15 52 | delay: 10 53 | 54 | - name: Grant the edit role to the metrics-deployer service account 55 | command: oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer 56 | 57 | - name: Grant the cluster-reader role to the heapster service account 58 | command: oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster 59 | 60 | - name: Check for metrics-deployer secret 61 | command: "oc get secret metrics-deployer" 62 | register: metrics_deployer_secret_out 63 | ignore_errors: true 64 | 65 | - name: Create the null metrics-deployer secret 66 | command: oc secrets new metrics-deployer nothing=/dev/null 67 | when: metrics_deployer_secret_out | failed 68 | 69 | # we use a template to then lay down YAML to create the PV 70 | # this sets facts that are then consumed in the template 71 | - name: Set the facts for the metrics PV template 72 | set_fact: 73 | pv_name: "metrics-pv" 74 | capacity: "100" 75 | volid: "{{ hostvars['localhost']['metrics_volume'] }}" 76 | 77 | - name: Create a YAML file for the PV for the metrics volume 78 | template: 79 | src: templates/pv.yaml.j2 80 | dest: /root/metrics-pv.yaml 81 | 82 | - name: Check for metrics PV 83 | command: oc get pv "{{ pv_name }}" 84 | register: metrics_pv_out 85 | ignore_errors: true 86 | 87 | - name: Create PV from YAML for metrics EBS volume 88 | command: oc create -f /root/metrics-pv.yaml 89 | when: metrics_pv_out | failed 90 | 91 | # if the artifacts are already deployed, don't process the deployer template 92 | - name: Check for the deployed artifacts 93 | command: oc get rc hawkular-metrics 94 | register: metrics_artifacts_out 95 | ignore_errors: true 96 | 97 | - name: Instantiate the logging deployer via the template 98 | shell: > 99 | oc process metrics-deployer-template -n openshift 100 | -v IMAGE_PREFIX="{{ image_prefix }}" 101 | -v CASSANDRA_PV_SIZE=100Gi 102 | -v HAWKULAR_METRICS_HOSTNAME=metrics."{{ r53_wildcard_zone }}" | oc create -f - 103 | when: metrics_artifacts_out | failed 104 | 105 | - name: Wait for the deployer to finish 106 | script: files/check_pod_complete.sh 'metrics-deployer-[a-zA-Z0-9]*' 107 | register: check_out 108 | until: check_out | success 109 | retries: 15 110 | delay: 10 111 | 112 | - name: Wait for the hawkular-cassandra-1 RC to exist 113 | command: oc get rc hawkular-cassandra-1 114 | register: rc_out 115 | until: rc_out.stdout | search("hawkular-cassandra-1") 116 | retries: 15 117 | delay: 10 118 | 119 | - name: Check if fsGroup is set in cassandra RC 120 | shell: "oc get rc/hawkular-cassandra-1 -o yaml | grep fsGroup" 121 | register: fsgroup_out 122 | ignore_errors: true 123 | 124 | - name: Determine openshift-infra project supplemental group 125 | command: oc get project openshift-infra -o json 126 | register: infra_project_out 127 | when: fsgroup_out | failed 128 | 129 | - name: Process the openshift-infra project json into a fact 130 | set_fact: 131 | infra_project_json: "{{ infra_project_out.stdout | from_json }}" 132 | when: fsgroup_out | failed 133 | 134 | - name: Patch the cassandra RC with the fsGroup 135 | command: oc patch rc/hawkular-cassandra-1 -p '{"spec":{"template":{"spec":{"securityContext":{"fsGroup":{{ infra_project_json["metadata"]["annotations"]["openshift.io/sa.scc.supplemental-groups"].split("/").0 }}}}}}}' 136 | when: fsgroup_out | failed 137 | register: patched_out 138 | 139 | - name: Find the cassandra pod 140 | shell: oc get pod | awk '/hawkular-cassandra-1/{ print $1 }' 141 | register: cassandra_pod_out 142 | when: patched_out | success 143 | 144 | - name: Delete the cassandra pod to get the fsGroup into it 145 | command: "oc delete pod {{ cassandra_pod_out.stdout }}" 146 | when: patched_out | success 147 | 148 | - name: Check if the stats resolution has been set 149 | shell: oc get rc/heapster -o json | grep resolution 150 | register: resolution_out 151 | 152 | - name: Patch the heapster RC 153 | command: oc patch rc/heapster -p '{"spec":{"template":{"spec":{"containers":[{"name":"heapster","image":"registry.access.redhat.com/openshift3/metrics-heapster:3.2.0","command":["heapster-wrapper.sh","--wrapper.username_file=/hawkular-account/hawkular-metrics.username","--wrapper.password_file=/hawkular-account/hawkular-metrics.password","--wrapper.allowed_users_file=/secrets/heapster.allowed-users","--wrapper.endpoint_check=https://hawkular-metrics:443/hawkular/metrics/status","--source=kubernetes:https://kubernetes.default.svc:443?useServiceAccount=true&kubeletHttps=true&kubeletPort=10250","--sink=hawkular:https://hawkular-metrics:443?tenant=_system&labelToTenant=pod_namespace&labelNodeId=nodename&caCert=/hawkular-cert/hawkular-metrics-ca.certificate&user=%username%&pass=%password%&filter=label(container_name:^/system.slice.*|^/user.slice)","--tls_cert=/secrets/heapster.cert","--tls_key=/secrets/heapster.key","--tls_client_ca=/secrets/heapster.client-ca","--allowed_users=%allowed_users%","--stats_resolution=15s"],"ports":[{"name":"http-endpoint","containerPort":8082,"protocol":"TCP"}],"resources":{},"volumeMounts":[{"name":"heapster-secrets","mountPath":"/secrets"},{"name":"hawkular-metrics-certificate","mountPath":"/hawkular-cert"},{"name":"hawkular-metrics-account","mountPath":"/hawkular-account"}],"readinessProbe":{"exec":{"command":["/opt/heapster-readiness.sh"]},"timeoutSeconds":1,"periodSeconds":10,"successThreshold":1,"failureThreshold":3},"terminationMessagePath":"/dev/termination-log","imagePullPolicy":"IfNotPresent"}]}}}}' 154 | when: resolution_out | failed 155 | 156 | - name: Find the heapster pod 157 | shell: oc get pod | awk '/heapster/{ print $1 }' 158 | register: heapster_pod_out 159 | 160 | - name: Kill the heapster pod 161 | shell: oc delete pod {{ heapster_pod_out.stdout }} 162 | 163 | - name: Wait for old heapster pod to be gone 164 | shell: oc get pod | grep {{ heapster_pod_out.stdout }} 165 | register: metrics_pods_out 166 | until: metrics_pods_out | failed 167 | retries: 15 168 | delay: 10 169 | ignore_errors: true 170 | 171 | - name: Wait for new heapster pod to be running 172 | shell: oc get pod | grep heapster | grep -i unning 173 | register: heapster_running_out 174 | until: heapster_running_out | success 175 | retries: 15 176 | delay: 10 177 | ignore_errors: true 178 | 179 | -------------------------------------------------------------------------------- /playbooks/test2.yml: -------------------------------------------------------------------------------- 1 | - shell: echo "foo2" 2 | -------------------------------------------------------------------------------- /playbooks/uninstall.yml: -------------------------------------------------------------------------------- 1 | # vim: set ft=ansible: 2 | # This will uninstall OpenShift software but not tear down the env 3 | --- 4 | - include: cloudformation_setup.yml 5 | 6 | - include: group_setup.yml 7 | 8 | # Configure the instances 9 | - include: ../../openshift-ansible/playbooks/adhoc/uninstall.yml 10 | vars_files: 11 | - ../../../demo-ansible/playbooks/vars.yml 12 | vars: 13 | deployment_type: "{{ deployment_type }}" 14 | openshift_cluster_id: "{{ cluster_id }}" 15 | openshift_debug_level: "{{ debug_level }}" 16 | openshift_node_debug_level: "{{ node_debug_level | default(debug_level, true) }}" 17 | osm_controller_args: 18 | cloud-provider: 19 | - "aws" 20 | cloud-config: 21 | - "/etc/origin/aws.conf" 22 | osm_api_server_args: 23 | cloud-provider: 24 | - "aws" 25 | cloud-config: 26 | - "/etc/origin/aws.conf" 27 | openshift_node_kubelet_args: 28 | max-pods: 29 | - "100" 30 | cloud-provider: 31 | - "aws" 32 | cloud-config: 33 | - "/etc/origin/aws.conf" 34 | openshift_master_debug_level: "{{ master_debug_level | default(debug_level, true) }}" 35 | openshift_master_access_token_max_seconds: 2419200 36 | openshift_master_identity_providers: "{{ identity_providers }}" 37 | openshift_master_api_port: "{{ console_port }}" 38 | openshift_master_console_port: "{{ console_port }}" 39 | openshift_pkg_version: "{{ package_version }}" 40 | openshift_master_logging_public_url: "https://kibana.{{ r53_wildcard_zone }}" 41 | openshift_master_metrics_public_url: "https://metrics.{{ r53_wildcard_zone }}/hawkular/metrics" 42 | osm_cluster_network_cidr: 10.0.0.0/8 43 | osm_host_subnet_length: 16 44 | osm_default_subdomain: "{{ r53_wildcard_zone }}" 45 | osm_default_node_selector: "env=demo" 46 | osm_use_cockpit: false 47 | openshift_master_cluster_method: native 48 | openshift_master_cluster_hostname: openshift.internal.{{ r53_host_zone }} 49 | openshift_master_cluster_public_hostname: openshift.{{ r53_host_zone }} 50 | os_firewall_enabled: False 51 | oreg_url: "{{ registry_url }}" 52 | 53 | 54 | -------------------------------------------------------------------------------- /playbooks/vars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | debug_level: 3 3 | node_debug_level: 2 4 | master_debug_level: 2 5 | 6 | identity_providers: 7 | - name: htpasswd_auth 8 | login: true 9 | challenge: true 10 | kind: HTPasswdPasswordIdentityProvider 11 | filename: /etc/origin/master/htpasswd 12 | 13 | preload_images: 14 | - openshift3/nodejs-010-rhel7:latest 15 | - openshift3/ose-pod:latest 16 | - openshift3/ose-sti-builder:latest 17 | - openshift3/ose-docker-builder:latest 18 | - openshift3/ose-deployer:latest 19 | - rhscl/mongodb-26-rhel7:latest 20 | - jboss-eap-6/eap64-openshift:1.1 21 | - kubernetes/guestbook:latest 22 | 23 | users: 24 | - user: demo 25 | project: demo 26 | - user: admin 27 | project: empty 28 | 29 | hexboard: 30 | namespace: demo 31 | watch_namespace: demo 32 | name: hexboard 33 | src_url: https://github.com/2015-Middleware-Keynote/hexboard 34 | img: openshift3/nodejs-010-rhel7:latest 35 | 36 | os_defaults: 37 | masters: 38 | count: "{{ num_masters }}" 39 | instance_type: "{{ ec2_master_instance_type }}" 40 | vol_sizes: 41 | root: 15 42 | docker: 25 43 | etcd: 25 44 | infra_nodes: 45 | count: "{{ num_infra_nodes }}" 46 | instance_type: "{{ ec2_infra_instance_type }}" 47 | vol_sizes: 48 | root: 30 49 | docker: 25 50 | app_nodes: 51 | count: "{{ num_app_nodes }}" 52 | instance_type: "{{ ec2_node_instance_type }}" 53 | vol_sizes: 54 | root: 30 55 | docker: 25 56 | hosted: 57 | vol_sizes: 58 | registry: 30 59 | logging: 30 60 | metrics: 30 61 | 62 | provider_defaults: 63 | ec2: 64 | tags: 65 | env: "{{ cluster_id }}" 66 | groups: ["tag_env_{{ cluster_id }}"] 67 | master: 68 | groups: 69 | - "tag_openshift-master_{{ cluster_id }}" 70 | - "tag_openshift-node_{{ cluster_id }}" 71 | - tag_node-region_master 72 | security_groups: 73 | - "{{ cluster_id }}-master" 74 | - "{{ cluster_id }}-node" 75 | tags: 76 | openshift-master: "{{ cluster_id }}" 77 | openshift-node: "{{ cluster_id }}" 78 | node-region: master 79 | node: 80 | groups: 81 | - "tag_openshift-node_{{ cluster_id }}" 82 | security_groups: 83 | - "{{ cluster_id }}-node" 84 | tags: 85 | openshift-node: "{{ cluster_id }}" 86 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # vim: sw=2 ts=2 3 | 4 | import click 5 | import os 6 | import sys 7 | 8 | hexboard_sizes = ['tiny', 'xsmall', 'small', 'medium', 'large', 'xlarge'] 9 | 10 | @click.command() 11 | 12 | ### Cluster options 13 | @click.option('--cluster-id', default='demo', show_default=True, 14 | help='Cluster identifier (used for prefixing/naming various items created in AWS') 15 | @click.option('--num-nodes', type=click.INT, default=1, show_default=True, 16 | help='Number of application nodes') 17 | @click.option('--num-infra', type=click.IntRange(1,3), default=1, 18 | show_default=True, help='Number of infrastructure nodes') 19 | @click.option('--hexboard-size', type=click.Choice(hexboard_sizes), 20 | help='Override Hexboard size calculation (tiny=32, xsmall=64, small=108, medium=266, large=512, xlarge=1026)', 21 | show_default=True) 22 | @click.option('--console-port', default='443', type=click.IntRange(1,65535), help='OpenShift web console port', 23 | show_default=True) 24 | @click.option('--api-port', default='443', type=click.IntRange(1,65535), help='OpenShift API port', 25 | show_default=True) 26 | @click.option('--deployment-type', default='openshift-enterprise', help='openshift deployment type', 27 | show_default=True) 28 | @click.option('--default-password', default='openshift3', 29 | help='password for all users', show_default=True) 30 | 31 | ### Smoke test options 32 | @click.option('--run-smoke-tests', is_flag=True, help='Run workshop smoke tests') 33 | @click.option('--num-smoke-test-users', default=5, type=click.INT, 34 | help='Number of smoke test users', show_default=True) 35 | @click.option('--run-only-smoke-tests', is_flag=True, help='Run only the workshop smoke tests') 36 | 37 | ### AWS/EC2 options 38 | @click.option('--region', default='us-east-1', help='ec2 region', 39 | show_default=True) 40 | @click.option('--ami', default='ami-2051294a', help='ec2 ami', 41 | show_default=True) 42 | @click.option('--master-instance-type', default='m4.large', help='ec2 instance type', 43 | show_default=True) 44 | @click.option('--infra-instance-type', default='m4.2xlarge', help='ec2 instance type', 45 | show_default=True) 46 | @click.option('--node-instance-type', default='m4.large', help='ec2 instance type', 47 | show_default=True) 48 | @click.option('--keypair', default='default', help='ec2 keypair name', 49 | show_default=True) 50 | 51 | ### DNS options 52 | @click.option('--r53-zone', help='route53 hosted zone (must be pre-configured)') 53 | @click.option('--app-dns-prefix', default='apps', help='application dns prefix', 54 | show_default=True) 55 | 56 | ### Subscription and Software options 57 | @click.option('--package-version', help='OpenShift Package version (eg: 3.2.1.9)', 58 | show_default=True, default='3.2.1.9') 59 | @click.option('--rhsm-user', help='Red Hat Subscription Management User') 60 | @click.option('--rhsm-pass', help='Red Hat Subscription Management Password', 61 | hide_input=True,) 62 | @click.option('--skip-subscription-management', is_flag=True, 63 | help='Skip subscription management steps') 64 | @click.option('--use-certificate-repos', is_flag=True, 65 | help='Uses certificate-based yum repositories for the AOS content. Requires providing paths to local certificate key and pem files.') 66 | @click.option('--aos-repo', help='An alternate URL to locate software') 67 | @click.option('--prerelease', help='If using prerelease software, set to true', 68 | show_default=True, default=False, is_flag=True) 69 | @click.option('--kerberos-user', help='Kerberos userid (eg: jsmith) for use with --prerelease') 70 | @click.option('--kerberos-token', help='Token to go with the kerberos user for use with --prerelease') 71 | @click.option('--registry-url', help='A URL for an alternate Docker registry for dockerized components of OpenShift', 72 | show_default=True, default='registry.access.redhat.com/openshift3/ose-${component}:${version}') 73 | 74 | ### Miscellaneous options 75 | @click.option('--no-confirm', is_flag=True, 76 | help='Skip confirmation prompt') 77 | @click.option('--debug-playbook', 78 | help='Specify a path to a specific playbook to debug with all vars') 79 | @click.option('--cleanup', is_flag=True, 80 | help='Deletes environment') 81 | @click.help_option('--help', '-h') 82 | @click.option('-v', '--verbose', count=True) 83 | 84 | def launch_demo_env(num_nodes, 85 | num_infra, 86 | hexboard_size=None, 87 | region=None, 88 | ami=None, 89 | no_confirm=False, 90 | master_instance_type=None, 91 | node_instance_type=None, 92 | infra_instance_type=None, 93 | keypair=None, 94 | r53_zone=None, 95 | cluster_id=None, 96 | app_dns_prefix=None, 97 | deployment_type=None, 98 | console_port=443, 99 | api_port=443, 100 | package_version=None, 101 | rhsm_user=None, 102 | rhsm_pass=None, 103 | skip_subscription_management=False, 104 | use_certificate_repos=False, 105 | aos_repo=None, 106 | prerelease=False, 107 | kerberos_user=None, 108 | kerberos_token=None, 109 | registry_url=None, 110 | run_smoke_tests=False, 111 | num_smoke_test_users=None, 112 | run_only_smoke_tests=False, 113 | default_password=None, 114 | debug_playbook=None, 115 | cleanup=False, 116 | verbose=0): 117 | 118 | # Force num_masters = 3 because of an issue with API startup and ELB health checks and more 119 | num_masters = 3 120 | 121 | # If not running cleanup need to prompt for the R53 zone: 122 | if r53_zone is None: 123 | r53_zone = click.prompt('R53 zone') 124 | 125 | # Cannot run cleanup with no-confirm 126 | if cleanup and no_confirm: 127 | click.echo('Cannot use --cleanup and --no-confirm as it is not safe.') 128 | sys.exit(1) 129 | 130 | # If skipping subscription management, must have cert repos enabled 131 | # If cleaning up, this is ok 132 | if not cleanup: 133 | if skip_subscription_management and not use_certificate_repos: 134 | click.echo('Cannot skip subscription management without using certificate repos.') 135 | sys.exit(1) 136 | 137 | # If using subscription management, cannot use certificate repos 138 | if not skip_subscription_management and use_certificate_repos: 139 | click.echo('Must skip subscription management when using certificate repos') 140 | sys.exit(1) 141 | 142 | # Prompt for RHSM user and password if not skipping subscription management 143 | if not skip_subscription_management: 144 | # If the user already provided values, don't bother asking again 145 | if rhsm_user is None: 146 | rhsm_user = click.prompt("RHSM username?") 147 | if rhsm_pass is None: 148 | rhsm_pass = click.prompt("RHSM password?", hide_input=True, confirmation_prompt=True) 149 | 150 | # User must supply a repo URL if using certificate repos 151 | if use_certificate_repos and aos_repo is None: 152 | click.echo('Must provide a repo URL via --aos-repo when using certificate repos') 153 | sys.exit(1) 154 | 155 | # User must supply kerberos user and token with --prerelease 156 | if prerelease and ( kerberos_user is None or kerberos_token is None ): 157 | click.echo('Must provider --kerberos-user / --kerberos-token with --prerelease') 158 | sys.exit(1) 159 | 160 | # Override hexboard size calculation 161 | if hexboard_size is None: 162 | if num_nodes <= 1: 163 | hexboard_size = 'tiny' 164 | elif num_nodes < 3: 165 | hexboard_size = 'xsmall' 166 | elif num_nodes < 5: 167 | hexboard_size = 'small' 168 | elif num_nodes < 9: 169 | hexboard_size = 'medium' 170 | elif num_nodes < 15: 171 | hexboard_size = 'large' 172 | else: 173 | hexboard_size = 'xlarge' 174 | 175 | # Calculate various DNS values 176 | host_zone="%s.%s" % (cluster_id, r53_zone) 177 | wildcard_zone="%s.%s.%s" % (app_dns_prefix, cluster_id, r53_zone) 178 | 179 | # Display information to the user about their choices 180 | click.echo('Configured values:') 181 | click.echo('\tcluster_id: %s' % cluster_id) 182 | click.echo('\tami: %s' % ami) 183 | click.echo('\tregion: %s' % region) 184 | click.echo('\tmaster instance_type: %s' % master_instance_type) 185 | click.echo('\tnode_instance_type: %s' % node_instance_type) 186 | click.echo('\tinfra_instance_type: %s' % infra_instance_type) 187 | click.echo('\tkeypair: %s' % keypair) 188 | click.echo('\tnodes: %s' % num_nodes) 189 | click.echo('\tinfra nodes: %s' % num_infra) 190 | click.echo('\tmasters: %s' % num_masters) 191 | click.echo('\tconsole port: %s' % console_port) 192 | click.echo('\tapi port: %s' % api_port) 193 | click.echo('\tdeployment_type: %s' % deployment_type) 194 | click.echo('\tpackage_version: %s' % package_version) 195 | 196 | if use_certificate_repos: 197 | click.echo('\taos_repo: %s' % aos_repo) 198 | 199 | click.echo('\tprerelease: %s' % prerelease) 200 | 201 | if prerelease: 202 | click.echo('\tkerberos user: %s' % kerberos_user) 203 | click.echo('\tkerberos token: %s' % kerberos_token) 204 | 205 | click.echo('\tregistry_url: %s' % registry_url) 206 | click.echo('\thexboard_size: %s' % hexboard_size) 207 | click.echo('\tr53_zone: %s' % r53_zone) 208 | click.echo('\tapp_dns_prefix: %s' % app_dns_prefix) 209 | click.echo('\thost dns: %s' % host_zone) 210 | click.echo('\tapps dns: %s' % wildcard_zone) 211 | 212 | # Don't bother to display subscription manager values if we're skipping subscription management 213 | if not skip_subscription_management: 214 | click.echo('\trhsm_user: %s' % rhsm_user) 215 | click.echo('\trhsm_pass: *******') 216 | 217 | if run_smoke_tests or run_only_smoke_tests: 218 | click.echo('\tnum smoke users: %s' % num_smoke_test_users) 219 | 220 | click.echo('\tdefault password: %s' % default_password) 221 | 222 | click.echo("") 223 | 224 | if run_only_smoke_tests: 225 | click.echo('Only smoke tests will be run.') 226 | 227 | if debug_playbook: 228 | click.echo('We will debug the following playbook: %s' % (debug_playbook)) 229 | 230 | if not no_confirm and not cleanup: 231 | click.confirm('Continue using these values?', abort=True) 232 | 233 | # Special confirmations for cleanup 234 | if cleanup: 235 | click.confirm('Delete the cluster %s' % cluster_id, abort=True) 236 | click.confirm('ARE YOU REALLY SURE YOU WANT TO DELETE THE CLUSTER %s' % cluster_id, abort=True) 237 | click.confirm('Press enter to continue', abort=True, default=True) 238 | 239 | playbooks = [] 240 | 241 | if debug_playbook: 242 | playbooks = [debug_playbook] 243 | elif run_only_smoke_tests: 244 | playbooks = ['playbooks/projects_setup.yml'] 245 | elif cleanup: 246 | playbooks = ['playbooks/cleanup.yml'] 247 | else: 248 | 249 | # start with the basic setup 250 | playbooks = ['playbooks/cloudformation_setup.yml'] 251 | 252 | # if cert repos, then add that playbook 253 | if use_certificate_repos: 254 | playbooks.append('playbooks/certificate_repos.yml') 255 | 256 | # if not cert repos, add the register hosts playbook 257 | if not use_certificate_repos: 258 | playbooks.append('playbooks/register_hosts.yml') 259 | 260 | # add the setup and projects playbooks 261 | playbooks.append('playbooks/openshift_setup.yml') 262 | playbooks.append('playbooks/projects_setup.yml') 263 | 264 | for playbook in playbooks: 265 | 266 | # hide cache output unless in verbose mode 267 | devnull='> /dev/null' 268 | 269 | if verbose > 0: 270 | devnull='' 271 | 272 | # refresh the inventory cache to prevent stale hosts from 273 | # interferring with re-running 274 | command='inventory/aws/hosts/ec2.py --refresh-cache %s' % (devnull) 275 | os.system(command) 276 | 277 | # remove any cached facts to prevent stale data during a re-run 278 | command='rm -rf .ansible/cached_facts' 279 | os.system(command) 280 | 281 | command='ansible-playbook -i inventory/aws/hosts -e \'cluster_id=%s \ 282 | ec2_region=%s \ 283 | ec2_image=%s \ 284 | ec2_keypair=%s \ 285 | ec2_master_instance_type=%s \ 286 | ec2_infra_instance_type=%s \ 287 | ec2_node_instance_type=%s \ 288 | r53_zone=%s \ 289 | r53_host_zone=%s \ 290 | r53_wildcard_zone=%s \ 291 | console_port=%s \ 292 | api_port=%s \ 293 | num_app_nodes=%s \ 294 | num_infra_nodes=%s \ 295 | num_masters=%s \ 296 | hexboard_size=%s \ 297 | deployment_type=%s \ 298 | package_version=-%s \ 299 | rhsm_user=%s \ 300 | rhsm_pass=%s \ 301 | skip_subscription_management=%s \ 302 | use_certificate_repos=%s \ 303 | aos_repo=%s \ 304 | prerelease=%s \ 305 | kerberos_user=%s \ 306 | kerberos_token=%s \ 307 | registry_url=%s \ 308 | run_smoke_tests=%s \ 309 | run_only_smoke_tests=%s \ 310 | num_smoke_test_users=%s \ 311 | default_password=%s\' %s' % (cluster_id, 312 | region, 313 | ami, 314 | keypair, 315 | master_instance_type, 316 | infra_instance_type, 317 | node_instance_type, 318 | r53_zone, 319 | host_zone, 320 | wildcard_zone, 321 | console_port, 322 | api_port, 323 | num_nodes, 324 | num_infra, 325 | num_masters, 326 | hexboard_size, 327 | deployment_type, 328 | package_version, 329 | rhsm_user, 330 | rhsm_pass, 331 | skip_subscription_management, 332 | use_certificate_repos, 333 | aos_repo, 334 | prerelease, 335 | kerberos_user, 336 | kerberos_token, 337 | registry_url, 338 | run_smoke_tests, 339 | run_only_smoke_tests, 340 | num_smoke_test_users, 341 | default_password, 342 | playbook) 343 | 344 | if verbose > 0: 345 | command += " -" + "".join(['v']*verbose) 346 | click.echo('We are running: %s' % command) 347 | 348 | status = os.system(command) 349 | if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: 350 | return os.WEXITSTATUS(status) 351 | 352 | # if the last run playbook didn't explode, assume cluster provisioned successfully 353 | # but make sure that user was not just running tests or cleaning up 354 | if os.WIFEXITED(status) and os.WEXITSTATUS(status) == 0: 355 | if not debug_playbook and not run_only_smoke_tests and not cleanup: 356 | click.echo('Your cluster provisioned successfully. The console is available at https://openshift.%s:%s' % (host_zone, console_port)) 357 | click.echo('You can SSH into a master using the same SSH key with: ssh -i /path/to/key.pem openshift@openshift-master.%s' % (host_zone)) 358 | click.echo('**After logging into the OpenShift console** you will need to visit https://metrics.%s and accept the Hawkular SSL certificate' % ( wildcard_zone )) 359 | click.echo('You can access Kibana at https://kibana.%s' % ( wildcard_zone )) 360 | 361 | if cleanup: 362 | click.echo('Your cluster, %s, was de-provisioned and removed successfully.' % (cluster_id)) 363 | 364 | if __name__ == '__main__': 365 | # check for AWS access info 366 | if os.getenv('AWS_ACCESS_KEY_ID') is None or os.getenv('AWS_SECRET_ACCESS_KEY') is None: 367 | print 'AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY **MUST** be exported as environment variables.' 368 | sys.exit(1) 369 | 370 | launch_demo_env(auto_envvar_prefix='OSE_DEMO') 371 | --------------------------------------------------------------------------------