├── .gitignore ├── .gitmodules ├── LICENSE ├── README.md ├── allinone.hosts ├── allinone.yml ├── ansible ├── ansible.cfg ├── bootstrap.yml ├── common ├── status.yml ├── verify-local-requirements.yml └── wait_for.yml ├── doc ├── ANSIBLE_WRAPPER.md ├── BASTION.md ├── CUSTOMIZATION.md ├── MACHINE_PREPARATION.md ├── PLAYBOOKS.md ├── ROLES.md └── SAMPLE_PARTITIONING.md ├── group_vars └── all ├── offline-storage ├── docker │ └── .gitignore ├── files │ └── .gitignore ├── openshift │ └── .gitignore └── rpms │ └── .gitignore └── roles ├── 3scale ├── defaults │ └── main.yml ├── tasks │ ├── 3scale_status.yml │ ├── apicast_cors.yml │ ├── create_api.yml │ ├── create_application_plan.yml │ ├── create_applications.yml │ ├── main.yml │ ├── oauth-client.yml │ ├── patch_apicast.yml │ └── webhooks.yml └── templates │ └── amp.json ├── base ├── defaults │ └── main.yml ├── handlers │ └── main.yml └── tasks │ └── main.yml ├── bootstrap ├── defaults │ └── main.yml ├── tasks │ └── main.yml └── templates │ └── sudoers ├── docker ├── defaults │ └── main.yml ├── tasks │ └── main.yml └── templates │ └── docker-storage-setup ├── hostpath-provisioner ├── defaults │ └── main.yml ├── files │ └── registry-storage-pvc.yaml └── tasks │ └── main.yml ├── microcks ├── defaults │ └── main.yml └── tasks │ ├── main.yml │ ├── post-install.yml │ └── register-service.yml ├── name-resolution ├── tasks │ └── main.yml └── templates │ ├── dnsmasq.conf │ ├── hosts │ └── resolv.conf ├── openshift-postinstall ├── defaults │ └── main.yml ├── tasks │ └── main.yml └── templates │ └── wildcard.conf ├── openshift-prereq └── tasks │ └── main.yml ├── register-rhn └── tasks │ └── main.yml └── sso ├── defaults └── main.yml └── tasks ├── create-client.yml ├── create-user.yml ├── main.yml ├── post-install.yml └── update-route.yml /.gitignore: -------------------------------------------------------------------------------- 1 | admin.pub 2 | *.retry 3 | -------------------------------------------------------------------------------- /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "openshift-ansible"] 2 | path = openshift-ansible 3 | url = https://github.com/openshift/openshift-ansible.git 4 | branch = release-3.6 5 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | The MIT License (MIT) 2 | 3 | Copyright (c) 2016 Nicolas MASSE 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # An "easy to use" OpenShift Lab 2 | This project is an Ansible Playbook to install OpenShift in a Lab Environment. 3 | 4 | Its goal is to help people install easily OpenShift in a lab environment, 5 | for a test drive or a PoC. So, this project focuses mostly on ease of use instead 6 | of security, availability, etc. **DO NOT USE THIS PROJECT IN PRODUCTION**. 7 | You have been warned. 8 | 9 | It features multiple architecture choices : 10 | - All-in-one: master, etcd, infra node, app node on the same machines (**DONE**) 11 | - Small Cluster: 1 master with etcd, 1 infra node, 2 app nodes (**TODO**) 12 | - Big Cluster: 3 masters with etcd, 2 infra nodes, 2 app nodes, 1 load balancer (**TODO**) 13 | 14 | By default, it deploys the following software in addition to OpenShift : 15 | - Red Hat SSO 16 | - 3scale 17 | - the [OpenShift-Hostpath-Provisioner](https://github.com/nmasse-itix/OpenShift-HostPath-Provisioner) 18 | 19 | This project is different from existing "demo" OpenShift playbooks in the sense that : 20 | - It features a common inventory file for both the OpenShift playbooks and the complimentary playbooks. (it's easier to maintain) 21 | - The underlying openshift-ansible playbooks are included directly (as opposed to other approaches that run an `ansible-playbook` command from inside the main playbook). 22 | 23 | By default, this project comes with a git submodule reference to the `openshift-ansible` repository for convenience. 24 | But you could replace this reference with a symlink to your `openshift-ansible` installation, for instance if you installed the supported package from Red Hat. 25 | 26 | ## Requirements 27 | 28 | - This playbook starts from a minimal RHEL 7.3 installation. 29 | - You need at least a free disk partition to hold the docker storage (try to allocate at least 50Gi) 30 | - You will need at least 30Gi free disk space on /var 31 | 32 | The docker storage partition needs to be added to `docker` Volume Group. 33 | To do so, if your docker storage partition is /dev/sda3, run : 34 | ``` 35 | vgcreate docker /dev/sda3 36 | ``` 37 | 38 | ## Setup 39 | 40 | 1. First of all, clone this repo : 41 | ``` 42 | git clone https://github.com/nmasse-itix/OpenShift-Lab.git 43 | ``` 44 | 45 | 2. Pull the "openshift-ansible" sub-project using : 46 | ``` 47 | git submodule init 48 | git submodule update 49 | ``` 50 | 3. Review allinone.hosts and change hostnames to target your environment 51 | 52 | 4. If needed, bootstrap your machines (optional) : 53 | ``` 54 | ./ansible bootstrap vm.openshift.test 55 | ``` 56 | 57 | 5. Run the playbook that installs everything on one machine : 58 | ``` 59 | ./ansible play allinone 60 | ``` 61 | 62 | ## Further readings 63 | 64 | If you plan to use this project regularly, you might have a look at the [Ansible roles description](doc/ROLES.md). 65 | And if you need to customize this project to suit your own needs, have a look at the [Customization Guide](doc/CUSTOMIZATION.md). 66 | -------------------------------------------------------------------------------- /allinone.hosts: -------------------------------------------------------------------------------- 1 | # 2 | # Variables used by my playbook 3 | # 4 | [allinone:vars] 5 | lab_dns_suffix=openshift.test 6 | lab_openshift_version=3.6 7 | 8 | [allinone:children] 9 | masters 10 | 11 | # 12 | # Shared variables used by both openshift-ansible and my playbook 13 | # 14 | [all:vars] 15 | # Default route suffix 16 | openshift_master_default_subdomain=app.openshift.test 17 | 18 | # The SSH user that Ansible will use to run playbooks 19 | ansible_ssh_user=redhat 20 | 21 | [masters] 22 | openshift36.openshift.test 23 | 24 | [nodes] 25 | openshift36.openshift.test openshift_schedulable=true openshift_node_labels="{'region': 'infra'}" 26 | 27 | # 28 | # The rest is used only by the OpenShift installer playbook 29 | # 30 | [OSEv3:children] 31 | masters 32 | nodes 33 | 34 | [OSEv3:vars] 35 | # By default, OpenShift 3.6 checks that 16GB of memory is available, 36 | # which is way too much on a standard laptop... 37 | # It also checks for disk space on /var which can be an issue for a lab environment. 38 | openshift_disable_check=memory_availability,disk_availability 39 | 40 | # Deploy the Service Catalog (Warning: Tech Preview in OCP 3.6) 41 | openshift_enable_service_catalog=true 42 | 43 | # Use the hostpath-provisioner to allocate storage for the Service Catalog 44 | openshift_hosted_etcd_storage_kind=dynamic 45 | 46 | # Yes, we need to use sudo 47 | ansible_become=yes 48 | 49 | # what to install 50 | deployment_type=openshift-enterprise 51 | 52 | # New installation method : everything in containers ! 53 | contenairized=true 54 | 55 | # Clustering method 56 | openshift_master_cluster_method=native 57 | 58 | # Bypass Registry Security Checks 59 | openshift_docker_insecure_registries=172.30.0.0/16 60 | 61 | # Disable any authentication 62 | openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] 63 | 64 | # Make sure NTP is enabled 65 | openshift_clock_enabled=true 66 | 67 | # Do not create the default project "my-project" 68 | openshift_additional_projects={} 69 | 70 | # Choose the default networking plugin so that all projects can communicate with eachother 71 | os_sdn_network_plugin_name='redhat/openshift-ovs-subnet' 72 | 73 | # Deploy the metrics 74 | openshift_metrics_install_metrics=True 75 | openshift_metrics_start_cluster=True 76 | 77 | # Metrics storage is provisioned through the hostpath-provisioner 78 | openshift_metrics_cassandra_pvc_size=10Gi 79 | openshift_metrics_cassandra_storage_type=dynamic 80 | -------------------------------------------------------------------------------- /allinone.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Check that the system running this playbook meets our requirements 3 | # (see issues #5 and #9) 4 | - include: "common/verify-local-requirements.yml" 5 | 6 | - name: Prepare an "All-in-one" VM for OpenShift 7 | hosts: allinone 8 | become: yes 9 | roles: 10 | - { name: 'base', tags: 'base' } 11 | - { name: 'name-resolution', tags: 'name-resolution' } 12 | - { name: 'docker', tags: 'docker' } 13 | - { name: 'openshift-prereq', tags: 'openshift-prereq' } 14 | 15 | # Launch the OpenShift Installer Playbook 16 | - include: "./openshift-ansible/playbooks/byo/config.yml" 17 | 18 | - include: "./openshift-ansible/playbooks/byo/openshift-cluster/openshift-metrics.yml" 19 | vars: 20 | # The hostname to allocate to hawkular (one of the components enabling metrics monitoring) 21 | openshift_metrics_hawkular_hostname: "{{ 'hawkular-metrics.' ~ lab_dns_suffix }}" 22 | 23 | - name: Customize the OpenShift installation 24 | hosts: allinone 25 | become: yes 26 | roles: 27 | - { name: 'openshift-postinstall', tags: 'openshift-postinstall' } 28 | - { name: 'hostpath-provisioner', tags: 'hostpath-provisioner' } 29 | - { name: 'microcks', tags: 'microcks' } 30 | - { name: 'sso', tags: 'sso' } 31 | - { name: '3scale', tags: '3scale' } 32 | 33 | # Update OpenShift to the latest asynchronous errata updates 34 | - include: "./openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml" 35 | -------------------------------------------------------------------------------- /ansible: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | options="" 4 | ssh_key="$HOME/.ssh/id_rsa" 5 | initial_user="root" 6 | 7 | target="$1" 8 | shift 9 | case "$target" in 10 | "bootstrap") 11 | if [ -z "$1" ]; then 12 | echo "Please specify the target host(s) !" 13 | exit 1 14 | fi 15 | echo "Bootstraping $@..." 16 | echo 17 | echo -n "Please enter the initial $initial_user password: " 18 | read -s password 19 | echo # Add a Line Feed since the "read -s" do not output it ! 20 | 21 | # Ask for Red Hat Network credentials 22 | if [ -z "$RHN_LOGIN" ]; then 23 | echo -n "Please enter your RHN login: " 24 | read rhn_login 25 | export RHN_LOGIN="$rhn_login" 26 | fi 27 | if [ -z "$RHN_PASSWORD" ]; then 28 | echo -n "Please enter your RHN password: " 29 | read -s rhn_password 30 | export RHN_PASSWORD="$rhn_password" 31 | echo # Add a Line Feed since the "read -s" do not output it ! 32 | fi 33 | if [ -z "$RHN_POOLID" ]; then 34 | echo -n "Please enter your RHN Pool ID: " 35 | read rhn_poolid 36 | export RHN_POOLID="$rhn_poolid" 37 | fi 38 | echo 39 | echo 40 | 41 | # Pre-register SSH Host Keys 42 | for host; do 43 | echo "Connecting to $host to register the SSH Host Key !" 44 | LC_ALL=C sshpass -p "$password" ssh -i $ssh_key -o StrictHostKeyChecking=no "$initial_user@$host" /bin/true 45 | done 46 | 47 | # Setup authentication 48 | if [ -n "$password" ]; then 49 | options="$options -e ansible_ssh_pass=$password" 50 | else 51 | options="$options -e ansible_ssh_private_key_file=$ssh_key" 52 | fi 53 | 54 | # Setup the ssh user 55 | options="$options -e ansible_ssh_user=$initial_user " 56 | 57 | # Generate an inventory file "on the fly" 58 | echo "[bootstrap]" > "bootstrap.hosts" 59 | for host; do 60 | echo -e "$host" 61 | done >> "bootstrap.hosts" 62 | 63 | ansible-playbook -i "bootstrap.hosts" $options bootstrap.yml 64 | 65 | rm -f "bootstrap.hosts" 66 | ;; 67 | "play") 68 | if [ -z "$1" ]; then 69 | echo "Please specify the playbook to run !" 70 | exit 1 71 | fi 72 | 73 | playbook="$1" 74 | shift 75 | 76 | ansible-playbook -i "$playbook.hosts" $options "$@" $playbook.yml 77 | ;; 78 | *) 79 | echo "Usage: $0 {bootstrap|play} [options]" 80 | echo 81 | echo "Samples: " 82 | echo " $0 bootstrap machine.example.com" 83 | echo " $0 play allinone" 84 | exit 1 85 | ;; 86 | esac 87 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | # This is needed by the openshift-ansible installer 3 | deprecation_warnings=False 4 | -------------------------------------------------------------------------------- /bootstrap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Bootstrap one or more RHEL7 nodes 4 | hosts: bootstrap 5 | become: no 6 | roles: 7 | - bootstrap 8 | - register-rhn 9 | -------------------------------------------------------------------------------- /common/status.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Retrieve current ReplicationController status 4 | command: 'oc get rc -o json -n "{{ project }}"' 5 | register: rc_state 6 | changed_when: false 7 | 8 | - name: Parse the list of deployed ReplicationController 9 | set_fact: 10 | replication_controllers: '{{ rc_state.stdout |from_json |json_query(''items[? @.status.replicas && @.status.replicas != `0`].metadata.annotations."openshift.io/deployment-config.name"'') }}' 11 | replication_controllers_status: '{{ rc_state.stdout |from_json |json_query(''items[? @.status.replicas && @.status.replicas != `0`].{"name": metadata.annotations."openshift.io/deployment-config.name", "status": status.readyReplicas}'') }}' 12 | 13 | 14 | - name: Retrieve current DeploymentConfig status 15 | command: 'oc get dc -o json -n "{{ project }}"' 16 | register: dc_state 17 | changed_when: false 18 | 19 | - name: Parse the list of DeploymentConfig 20 | set_fact: 21 | deployment_configs: '{{ dc_state.stdout |from_json |json_query(''items[? metadata.generation > `1`].metadata.name'') }}' 22 | deployment_configs_status: '{{ dc_state.stdout |from_json |json_query(''items[? metadata.generation > `1` ].{"name": metadata.name, "status": status.replicas}'') }}' 23 | -------------------------------------------------------------------------------- /common/verify-local-requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - hosts: localhost 4 | connection: local 5 | tasks: 6 | 7 | - name: Check if jmespath is installed locally 8 | debug: msg={{dummy|json_query('@')}} 9 | register: check_jmespath 10 | ignore_errors: yes 11 | vars: 12 | dummy: Hello World 13 | 14 | - name: Check if jinja 2.8 is installed locally 15 | debug: msg={{(dummy|selectattr("id", "equalto", "hello")|first)['value']}} 16 | vars: 17 | dummy: 18 | - id: hello 19 | value: Hello World 20 | register: check_jinja28 21 | ignore_errors: yes 22 | 23 | - set_fact: 24 | jmespath_missing: '{{ check_jmespath|failed }}' 25 | jinja28_missing: '{{ check_jinja28|failed }}' 26 | on_rhel7: '{{ ansible_distribution == ''RedHat'' and ansible_distribution_major_version|int == 7 }}' 27 | 28 | - debug: 29 | msg: "jmespath is not installed on the machine that runs the playbooks" 30 | when: jmespath_missing 31 | 32 | - fail: 33 | msg: "This playbook can install by itself the missing packages on RHEL 7.x but we are not on such system (detected OS : {{ansible_distribution}} {{ansible_distribution_version}}). See https://github.com/nmasse-itix/OpenShift-Lab/issues/5 for more information." 34 | when: 'jmespath_missing and not on_rhel7' 35 | 36 | - fail: 37 | msg: "This playbook can install by itself the missing packages on RHEL 7.x but we are not on such system (detected OS : {{ansible_distribution}} {{ansible_distribution_version}}). See https://github.com/nmasse-itix/OpenShift-Lab/issues/9 for more information." 38 | when: 'jinja28_missing and not on_rhel7' 39 | 40 | - name: Enable the RHSCL repo 41 | command: subscription-manager repos --enable rhel-server-rhscl-7-rpms 42 | become: yes 43 | when: '(jmespath_missing or jinja28_missing) and on_rhel7' 44 | 45 | - name: Install PIP 46 | yum: name=python27-python-pip state=installed 47 | become: yes 48 | when: '(jmespath_missing or jinja28_missing) and on_rhel7' 49 | 50 | - name: Install JMESPATH 51 | command: 'scl enable python27 ''pip install --install-option="--install-purelib=/usr/lib/python2.7/site-packages/" jmespath'' ' 52 | become: yes 53 | when: 'jmespath_missing and on_rhel7' 54 | 55 | - name: Update jinja to versions 2.8 56 | command: 'scl enable python27 ''pip install --install-option="--install-purelib=/usr/lib/python2.7/site-packages/" jinja2'' ' 57 | become: yes 58 | when: 'jinja28_missing and on_rhel7' 59 | -------------------------------------------------------------------------------- /common/wait_for.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Wait for all pending deployments to become ready 4 | command: 'oc get rc -o json -n "{{ project }}"' 5 | register: rc_state 6 | changed_when: false 7 | retries: "{{ retries }}" 8 | delay: "{{ delay }}" 9 | until: 'rc_state.stdout |from_json |json_query(''items[? status.replicas != `0` && (status.readyReplicas == ""|| status.readyReplicas == `0`) ].metadata.annotations."openshift.io/deployment-config.name"'') |intersect(pod_to_wait) |length == 0' 10 | -------------------------------------------------------------------------------- /doc/ANSIBLE_WRAPPER.md: -------------------------------------------------------------------------------- 1 | # Ansible Wrapper presentation 2 | 3 | The Ansible Wrapper is a small shell script (`./ansible`) that does two things : 4 | - It calls the bootstrap playbook with the right environment variables 5 | - It calls the target playbooks (`allinone.yml` for instance) with the right inventory file 6 | 7 | ## Bootstrap 8 | 9 | Usually, when machines are provisioned, they are not ready to be used in Ansible. 10 | For example : 11 | - There is no regular user account, `root` is the only available user 12 | - You SSH Keys are not yet installed, so a Password Authentication is required 13 | - Sudo might no be configured 14 | - etc. 15 | 16 | The ansible wrapper will : 17 | - Make sure the SSH Host Key of the target machine is trusted (otherwise Ansible would complain...) 18 | - Do a password authentication for the first time (thanks to `sshpass`) 19 | - Add your SSH Keys to the `authorized_keys` 20 | - Create a regular user (by default: `redhat`) 21 | - Install and configure sudo 22 | - Register the machine with the Red Hat Network (RHN) 23 | - Attach a subscription pool 24 | 25 | To use the wrapper, you need to make sure you have `sshpass` installed : 26 | ``` 27 | sshpass -V 28 | ``` 29 | 30 | If not installed, setup sshpass as explained here : https://gist.github.com/arunoda/7790979 31 | 32 | To bootstrap a machine, just use : 33 | ``` 34 | ./ansible bootstrap machine1.compute.internal 35 | ``` 36 | 37 | __Tip :__ You can pass multiple machine on the command line to bootstrap them all at the same time. 38 | 39 | The wrapper, will then ask you a few questions : 40 | - The root password. If you have already setup SSK Key Authentication, you can just hit enter. 41 | - Your RHN login 42 | - Your RHN password 43 | - The Pool ID that you would like to use. If you do not provide a Pool ID, no pool will be attached and you will have to do it later manually. 44 | 45 | ## Daily usage 46 | 47 | Once your machines are bootstrapped, you can launch the target playbook (`allinone` for instance) with : 48 | ``` 49 | ./ansible play allinone 50 | ``` 51 | 52 | __Note :__ the `play` command is just a shortcut to `ansible-playbook -i .host .yml` 53 | -------------------------------------------------------------------------------- /doc/BASTION.md: -------------------------------------------------------------------------------- 1 | ## Connection through a bastion host 2 | 3 | Sometimes, your target machines are on a restricted network where access is 4 | done through a "bastion host" (also called "jump host"). 5 | 6 | This section explains how to configure this project to work with such a 7 | configuration. 8 | 9 | Two variants of this configuration are possible : 10 | 1. The jump host holds the SSH keys to connect to the target host 11 | 2. The jump host has no SSH key, the SSH Keys remains on your machine 12 | 13 | In the second configuration, you will have to setup your SSH Agent (if not 14 | already done) and forward it. 15 | 16 | ### Step 1: Setup your SSH Agent (optional) 17 | 18 | Run the SSH Agent : 19 | ``` 20 | eval "$(ssh-agent -s)" 21 | ``` 22 | 23 | And add your SSH key to your agent : 24 | ``` 25 | ssh-add ~/.ssh/id_rsa 26 | ``` 27 | 28 | Source : https://help.github.com/articles/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent/ 29 | 30 | ### Step 2: Create the ssh.cfg 31 | 32 | Create a file named `ssh.cfg` with the following content : 33 | ``` 34 | Host jump.host 35 | Hostname jump.host 36 | User john-adm 37 | ForwardAgent yes 38 | ControlMaster auto 39 | ControlPath ~/.ssh/ansible-%r@%h:%p 40 | ControlPersist 5m 41 | 42 | Host 10.0.0.* 43 | ProxyCommand ssh -q -W %h:%p jump.host 44 | User john 45 | ``` 46 | 47 | You will have to replace `jump.host` (three occurrences) with the hostname of your jump host. 48 | Also make sure to that the two usernames match your environment : 49 | - The first `User` stanza is the username you will use to connect to your jump host 50 | - The second `User` stanza is the username you will use to connect to your target host 51 | 52 | You will also have to replace `10.0.0.*` by the subnet of your target machines. 53 | If you reference your machines by DNS names instead of IP address, you could use 54 | the DNS suffix common to your target machines, like `*.compute.internal`. 55 | 56 | Note: the `ForwardAgent` stanza is only required if your jump host does not hold 57 | the SSH keys to connect to your target machines. 58 | 59 | Now you can test your ssh.cfg by issuing the following command : 60 | ``` 61 | ssh -F ssh.cfg your.target.host 62 | ``` 63 | If your configuration is correct, you will be directly connected to your target 64 | host. 65 | 66 | ### Step 3: Edit the Ansible configuration file 67 | 68 | Edit the `ansible.cfg` file and add : 69 | ``` 70 | # Connection through a jump host 71 | [ssh_connection] 72 | ssh_args = -F ./ssh.cfg -o ControlMaster=auto -o ControlPersist=30m 73 | control_path = ~/.ssh/ansible-%%r@%%h:%%p 74 | ``` 75 | 76 | You can test that your setup is correct by using the `ping` module of Ansible : 77 | ``` 78 | ansible -i your-inventory-file all -m ping 79 | ``` 80 | 81 | If your setup is correct, you should see something like : 82 | ``` 83 | machine1.internal | SUCCESS => { 84 | "changed": false, 85 | "ping": "pong" 86 | } 87 | machine2.internal | SUCCESS => { 88 | "changed": false, 89 | "ping": "pong" 90 | } 91 | ``` 92 | 93 | Note: sometime your lab has no DNS server and you have to connect to your target 94 | machines using IP addresses. If you still want to name your machines in Ansible 95 | with a nice name, you can declare the target machines in the inventory file like this : 96 | ``` 97 | machine1.internal ansible_host=10.0.0.1 98 | machine2.internal ansible_host=10.0.0.2 99 | ``` 100 | -------------------------------------------------------------------------------- /doc/CUSTOMIZATION.md: -------------------------------------------------------------------------------- 1 | # TODO 2 | -------------------------------------------------------------------------------- /doc/MACHINE_PREPARATION.md: -------------------------------------------------------------------------------- 1 | # Preparation of target machines 2 | 3 | Currently, the machines needs to have at least 2 disk partitions : 4 | - 1 partition for the Operating System (**REQUIRED**) 5 | - 1 LVM partition for the Docker Storage (**REQUIRED**) 6 | 7 | A third partition is recommended but not required : 8 | - 1 partition for the OpenShift Persistent Volumes (**OPTIONAL**), 9 | 10 | Minimal requirements : 11 | - the Docker Storage partition has to be at least 50 GiB 12 | - the OpenShift Persistent Volumes partition has to be at least 30 GiB (**OPTIONAL**) 13 | - the Operating System partition has to be at least 10 GiB if you have a dedicated 14 | partition for OpenShift PVs, 40 GiB otherwise. 15 | 16 | If your machine has only one disk, you can create partitions (that may use LVM underneath or not, free choice). 17 | An alternative when using Virtual Machines is to add 3 disks to the VM, the setup is a bit easier. 18 | 19 | The OS partition is created by the RHEL installer so you do not have to care much about it. 20 | 21 | The Docker Storage partition **has to be LVM** and **has to be in a separate Volume Group**. 22 | Namely, if your Docker Storage partition is `/dev/sda2`, you can create a separate Volume Group by using : 23 | ``` 24 | vgcreate docker /dev/sda2 25 | ``` 26 | 27 | The OpenShift Persistent Volumes partition, if not required is still highly recommended. 28 | By a having a dedicated partition, if the Persistent Volumes start to grow it will not 29 | fill up the OS partition. 30 | 31 | If your OpenShift PV partition is `/dev/sda3`, you can set it up by using : 32 | ``` 33 | mkfs.xfs /dev/sda3 34 | echo "/dev/sda3 /var/openshift xfs defaults 0 0" >> /etc/fstab 35 | ``` 36 | 37 | If you kept the default values (`docker` for the Volume Group name and 38 | `/var/openshift` for the OpenShift PV mount point), no further setup is required. 39 | 40 | Otherwise, you might have to set the following variables in your inventory file : 41 | - `docker_storage_vg` 42 | - `hostpath_provisioner_options` 43 | 44 | See the [Sample Partitioning Guide](SAMPLE_PARTITIONING.md) for a step-by-step guide on how to prepare 45 | partitions for OpenShift. 46 | -------------------------------------------------------------------------------- /doc/PLAYBOOKS.md: -------------------------------------------------------------------------------- 1 | # Playbooks description 2 | 3 | ## Bootstrap (`bootstrap.yml`) 4 | 5 | The bootstrap playbook is used to prepare a machine to be managed by Ansible. 6 | Namely, it will : 7 | - Create a regular user account (named `redhat`) 8 | - Add your SSH Public Key to the `authorized_keys` of `root` and `redhat` 9 | - Install and configure `sudo` so that the `redhat` user can launch commands as `root` without password 10 | - Register the machine on the RHN (Red Hat Network) 11 | 12 | To work, this playbook will require a few environment variables : 13 | 14 | | Environment Variable | Description | 15 | | --- | --- | 16 | | RHN_LOGIN | Your Red Hat Network login | 17 | | RHN_PASSWORD | Your Red Hat Network password | 18 | | RHN_POOLID | The subscription pool you want to use | 19 | 20 | __Tip :__ You can get the PoolID by querying : 21 | ``` 22 | sudo subscription-manager list --available --matches '*OpenShift*' 23 | ``` 24 | 25 | This playbook is best used with the [Ansible Wrapper](ANSIBLE_WRAPPER.md). 26 | 27 | ## All-in-one cluster (`allinone.yml`) 28 | 29 | The All-in-one cluster playbook will deploy everything on one machine. It is very 30 | convenient for development or PoC where the focus is on the features rather than on the infrastructure. 31 | 32 | Minimal requirements for the target machine are : 33 | - 2 Cores 34 | - 4 GB of RAM 35 | - Hard Disk partitioned as explained in the [Machine Preparation Guide](MACHINE_PREPARATION.md) 36 | 37 | Recommended config : 38 | - 4 Cores 39 | - 10 GB of RAM 40 | - Hard Disk partitioned as explained in the [Machine Preparation Guide](MACHINE_PREPARATION.md) 41 | 42 | See [Machine Preparation Guide](MACHINE_PREPARATION.md) for more details about partitioning. 43 | 44 | ## Small cluster (TODO) 45 | 46 | TODO 47 | 48 | ## Big cluster (TODO) 49 | 50 | TODO 51 | -------------------------------------------------------------------------------- /doc/ROLES.md: -------------------------------------------------------------------------------- 1 | # Roles description 2 | 3 | ## Bootstrap roles 4 | 5 | | Role | Description | 6 | | --- | --- | 7 | | [bootstrap](../roles/bootstrap/) | adds your SSH key to `authorized_keys`, creates users, configures sudo | 8 | | [register-rhn](../roles/register-rhn/) | registers the target machine on RHN (Red Hat Network) and attaches a subscription pool | 9 | 10 | ## Regular roles 11 | 12 | | Role | Description | 13 | | --- | --- | 14 | | [base](../roles/base/) | configures SSH to forbid password authentication, installs basic software and sets the hostname | 15 | | [name-resolution](../roles/name-resolution/) | ensures name resolution through the whole cluster | 16 | | [docker](../roles/docker/) | installs docker and configures docker storage | 17 | | [openshift-prereq](../roles/openshift-prereq/) | ensures the system meet the pre-requisites for the OpenShift installation | 18 | | [openshift-postinstall](../roles/openshift-postinstall/) | installs the latest JBoss ImageStreams | 19 | | [3scale](../roles/3scale/) | deploys 3scale | 20 | | [sso](../roles/sso/) | deploys Red Hat SSO | 21 | -------------------------------------------------------------------------------- /doc/SAMPLE_PARTITIONING.md: -------------------------------------------------------------------------------- 1 | # Sample partitioning 2 | 3 | ## Three disks partitions 4 | 5 | This guide provides step-by-step instructions to help you partition your disks 6 | for OpenShift. 7 | 8 | It assumes you created a Virtual Machines with three disks : 9 | - `/dev/sda`: 10 GiB for the Operating System 10 | - `/dev/sdb`: 30 GiB for the OpenShift PVs 11 | - `/dev/sdc`: 50 GiB for the Docker Storage 12 | 13 | `/dev/sda` is partitioned during installation. If possible, use LVM that will 14 | give you greater flexibility if you need to extend that partition later. 15 | 16 | Make sure to **NOT** allocate swap space since it is a [recommended best practice](https://docs.openshift.com/container-platform/3.5/admin_guide/overcommit.html#disabling-swap-memory). 17 | 18 | After installation, you should have : 19 | - `/boot` backed by a primary partition, 512 MiB 20 | - `/` backed by a Volume Group named `rhel`, with a Physical Volume backed by a primary partition, 9.5 GiB 21 | - `/dev/sdb` and `/dev/sdc` are now empty 22 | 23 | ``` 24 | $ sudo fdisk -l /dev/sda 25 | 26 | Disk /dev/sda: 10.7 GB, 10737418240 bytes, 20971520 sectors 27 | Units = sectors of 1 * 512 = 512 bytes 28 | Sector size (logical/physical): 512 bytes / 512 bytes 29 | I/O size (minimum/optimal): 512 bytes / 512 bytes 30 | Disk label type: dos 31 | Disk identifier: 0x000a801b 32 | 33 | Device Boot Start End Blocks Id System 34 | /dev/sda1 * 2048 1050624 524288 83 Linux 35 | /dev/sda2 1050625 20971519 9960447 8e Linux LVM 36 | 37 | $ sudo vgs 38 | VG #PV #LV #SN Attr VSize VFree 39 | rhel 1 1 0 wz--n- 9.50g 0 40 | 41 | $ sudo pvs 42 | PV VG Fmt Attr PSize PFree 43 | /dev/sda2 rhel lvm2 a-- 9.50g 0 44 | ``` 45 | 46 | __You can now partition `/dev/sdb` (OpenShift Persistent Volumes):__ 47 | 48 |
 49 | $ sudo fdisk /dev/sdb 
 50 | Welcome to fdisk (util-linux 2.23.2).
 51 | 
 52 | Changes will remain in memory only, until you decide to write them.
 53 | Be careful before using the write command.
 54 | 
 55 | 
 56 | Command (m for help): n
 57 | Partition type:
 58 |    p   primary
 59 |    e   extended
 60 | Select (default p): p
 61 | Partition number (1,2,3,4, default 1): 1
 62 | First sector (2048-20971519, default 2048): <ENTER>
 63 | Using default value 2048
 64 | Last sector, +sectors or +size{K,M,G} (2049-20971519, default 20971519): <ENTER>
 65 | Using default value 20971519
 66 | Partition 1 of type Linux and of size 30720 MiB is set
 67 | 
 68 | Command (m for help): t
 69 | Partition number (1, default 1): 1
 70 | Hex code (type L to list all codes): 8e
 71 | Changed type of partition 'Linux' to 'Linux LVM'
 72 | 
 73 | Command (m for help): w
 74 | The partition table has been altered!
 75 | 
 76 | Calling ioctl() to re-read partition table.
 77 | 
 78 | WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
 79 | The kernel still uses the old table. The new table will be used at
 80 | the next reboot or after you run partprobe(8) or kpartx(8)
 81 | Syncing disks.
 82 | 
83 | 84 | Create a Volume Group and add the new partition: 85 | ``` 86 | sudo vgcreate storage /dev/sdb1 87 | ``` 88 | 89 | Create a new Logical Volume: 90 | ``` 91 | sudo lvcreate storage -n openshift -l 100%FREE 92 | ``` 93 | 94 | Format it: 95 | ``` 96 | sudo mkfs.xfs /dev/mapper/storage-openshift 97 | ``` 98 | 99 | Create an entry in `/etc/fstab`: 100 | ``` 101 | sudo -i 102 | echo "/dev/mapper/storage-openshift /var/openshift xfs defaults 0 0" >> /etc/fstab 103 | ``` 104 | 105 | Finalize the setup: 106 | ``` 107 | sudo mkdir /var/openshift 108 | sudo mount /var/openshift 109 | sudo chmod 777 -R /var/openshift 110 | sudo chcon -Rt svirt_sandbox_file_t /var/openshift 111 | ``` 112 | 113 | __You can now partition `/dev/sdc` (Docker Storage):__ 114 | 115 |
116 | $ sudo fdisk /dev/sdc 
117 | Welcome to fdisk (util-linux 2.23.2).
118 | 
119 | Changes will remain in memory only, until you decide to write them.
120 | Be careful before using the write command.
121 | 
122 | 
123 | Command (m for help): n
124 | Partition type:
125 |    p   primary
126 |    e   extended
127 | Select (default p): p
128 | Partition number (1,2,3,4, default 1): 1
129 | First sector (2048-20971519, default 2048): <ENTER>
130 | Using default value 2048
131 | Last sector, +sectors or +size{K,M,G} (2049-20971519, default 20971519): <ENTER>
132 | Using default value 20971519
133 | Partition 1 of type Linux and of size 51200 MiB is set
134 | 
135 | Command (m for help): t
136 | Partition number (1, default 1): 1
137 | Hex code (type L to list all codes): 8e
138 | Changed type of partition 'Linux' to 'Linux LVM'
139 | 
140 | Command (m for help): w
141 | The partition table has been altered!
142 | 
143 | Calling ioctl() to re-read partition table.
144 | 
145 | WARNING: Re-reading the partition table failed with error 16: Device or resource busy.
146 | The kernel still uses the old table. The new table will be used at
147 | the next reboot or after you run partprobe(8) or kpartx(8)
148 | Syncing disks.
149 | 
150 | 151 | Create a Volume Group and add the new partition: 152 | ``` 153 | sudo vgcreate docker /dev/sdc1 154 | ``` 155 | 156 | And that's it ! :) 157 | -------------------------------------------------------------------------------- /group_vars/all: -------------------------------------------------------------------------------- 1 | --- 2 | # On some distro, /usr/bin/python does not exists... Here is the workaround 3 | # to make it work everywhere 4 | ansible_python_interpreter: /usr/bin/python2 5 | 6 | # The SSH Private Key that Ansible will use to connect to target hosts 7 | # Also, the matching public key will be used by the "bootstrap" role. 8 | ansible_ssh_private_key_file: "{{ lookup('env','HOME') }}/.ssh/id_rsa" 9 | 10 | # Use the good old SSH command instead of paramiko that fails on unknown keys 11 | ansible_connection: ssh 12 | -------------------------------------------------------------------------------- /offline-storage/docker/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /offline-storage/files/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /offline-storage/openshift/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /offline-storage/rpms/.gitignore: -------------------------------------------------------------------------------- 1 | * 2 | -------------------------------------------------------------------------------- /roles/3scale/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | threescale_apicast_git_repo: https://github.com/3scale/apicast.git 4 | threescale_apicast_tag: 3.1-stable 5 | threescale_template: https://raw.githubusercontent.com/3scale/3scale-amp-openshift-templates/2.0.0.GA/amp/amp.yml 6 | threescale_template_format: YAML 7 | threescale_project: 3scale 8 | threescale_tenant_name: 3scale 9 | threescale_wildcard_domain: "{{ openshift_master_default_subdomain }}" 10 | threescale_delay: 5 11 | threescale_retries: 30 12 | threescale_webhooks_secret: supersecret 13 | threescale_expected_deployment_configs: 14 | - apicast-staging 15 | - apicast-production 16 | - backend-redis 17 | - system-memcache 18 | - system-mysql 19 | - system-redis 20 | - backend-listener 21 | - backend-worker 22 | - system-app 23 | - system-resque 24 | - system-sidekiq 25 | - backend-cron 26 | - system-sphinx 27 | - zync 28 | - zync-database 29 | - apicast-wildcard-router 30 | threescale_apis_to_create: 31 | - service: 32 | name: "Hello API" 33 | system_name: hello-api 34 | backend_version: 1 # 1 means "API Key" 35 | proxy: 36 | credentials_location: headers 37 | api_test_path: /rest/Hello%20API%20Mock/0.8/v1/hello?David 38 | backend: microcks 39 | application_plans: 40 | - name: Default 41 | system_name: default 42 | default: true 43 | state: published 44 | applications: 45 | - name: Hello App 46 | description: "This is my very first application" 47 | - service: 48 | name: "Hello API OAuth" 49 | system_name: hello-api-oauth 50 | backend_version: oauth 51 | proxy: 52 | credentials_location: headers 53 | api_test_path: /rest/Hello%20API%20Mock/0.8/v1/hello?Gavin 54 | backend: microcks 55 | application_plans: 56 | - name: Default 57 | system_name: default 58 | default: true 59 | state: published 60 | applications: 61 | - name: Hello App with OAuth 62 | description: "This is my very first application using OAuth" 63 | redirect_url: https://www.getpostman.com/oauth2/callback 64 | application_id: "my-app" 65 | application_key: "{{ 'my-app'|sha1() }}" 66 | -------------------------------------------------------------------------------- /roles/3scale/tasks/3scale_status.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get a list of available services 4 | uri: 5 | url: "https://{{ threescale_admin_hostname }}/admin/api/services.json?access_token={{ access_token }}" 6 | validate_certs: no 7 | register: response 8 | 9 | - set_fact: 10 | services: '{{ response.json|json_query(''services[*].service.system_name'') }}' 11 | services_details: '{{ response.json|json_query(''services[].{"system_name": service.system_name, "id": service.id}'') }}' 12 | 13 | - name: Get the list of existing applications 14 | uri: 15 | url: https://{{ threescale_admin_hostname }}/admin/api/applications.json?access_token={{ access_token|urlencode }} 16 | validate_certs: no 17 | register: response 18 | 19 | - set_fact: 20 | applications: '{{ response.json|json_query(''applications[*].application.name'') }}' 21 | 22 | - name: Get the default (first) account 23 | uri: 24 | url: https://{{ threescale_admin_hostname }}/admin/api/accounts.json?access_token={{ access_token|urlencode }}&state=approved&page=1&per_page=1 25 | validate_certs: no 26 | register: response 27 | 28 | - set_fact: 29 | account_id: '{{ response.json.accounts[0].account.id }}' 30 | -------------------------------------------------------------------------------- /roles/3scale/tasks/apicast_cors.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create a temporary directory 3 | tempfile: state=directory 4 | register: tempfile 5 | 6 | - name: Use a local copy of the 3scale/apicast GIT repo 7 | set_fact: 8 | threescale_apicast_git_repo: 'http://{{ offline_git_route }}/{{ threescale_apicast_git_repo|basename|regex_replace(''[.]git$'', '''') }}' 9 | when: 'is_offline|default(false)|bool' 10 | 11 | - name: Check out the 3scale/apicast GIT repo 12 | git: 13 | repo: '{{ threescale_apicast_git_repo }}' 14 | dest: '{{ tempfile.path }}' 15 | version: '{{ threescale_apicast_tag|default(''master'') }}' 16 | 17 | - name: Create ConfigMap 'apicast-custom-module' 18 | command: chdir={{ tempfile.path }} oc create configmap apicast-custom-module --from-file=examples/cors/cors.lua -n "{{ threescale_project }}" 19 | register: oc 20 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 21 | changed_when: oc.rc == 0 22 | 23 | - name: Create ConfigMap 'cors-conf' 24 | command: chdir={{ tempfile.path }} oc create configmap apicast.d --from-file=examples/cors/cors.conf -n "{{ threescale_project }}" 25 | register: oc 26 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 27 | changed_when: oc.rc == 0 28 | 29 | - include: patch_apicast.yml 30 | with_items: 31 | - apicast-staging 32 | - apicast-production 33 | -------------------------------------------------------------------------------- /roles/3scale/tasks/create_api.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - debug: msg="Working on service {{ item.service.name }}" 4 | 5 | - set_fact: 6 | body_create_svc: '{{ "access_token=" ~ access_token|urlencode }}' 7 | 8 | - set_fact: 9 | body_create_svc: '{{ body_create_svc ~ "&" ~ (param.key|urlencode) ~ "=" ~ (param.value|urlencode) }}' 10 | with_dict: '{{ item.service }}' 11 | loop_control: 12 | loop_var: param 13 | when: 'item.service.system_name not in services' 14 | 15 | - name: Create the service 16 | uri: 17 | url: https://{{ threescale_admin_hostname }}/admin/api/services.json 18 | validate_certs: no 19 | method: POST 20 | body: '{{ body_create_svc }}' 21 | status_code: 201 22 | register: response 23 | when: 'item.service.system_name not in services' 24 | 25 | - set_fact: 26 | services: '{{ services|union([ item.service.system_name ]) }}' 27 | services_details: '{{ services_details|union([{ "system_name": item.service.system_name, "id": response.json.service.id }]) }}' 28 | when: 'item.service.system_name not in services' 29 | 30 | - set_fact: 31 | service_id: '{{ (services_details|selectattr("system_name", "equalto", item.service.system_name)|first)["id"] }}' 32 | 33 | - set_fact: 34 | body_update_proxy: '{{ "access_token=" ~ access_token|urlencode }}' 35 | 36 | - set_fact: 37 | body_update_proxy: '{{ body_update_proxy ~ "&" ~ (param.key|urlencode) ~ "=" ~ (param.value|urlencode) }}' 38 | with_dict: '{{ item.proxy }}' 39 | loop_control: 40 | loop_var: param 41 | 42 | - name: Set Backend URL 43 | set_fact: 44 | body_update_proxy: '{{ body_update_proxy ~ "&api_backend" ~ "=" ~ (threescale_backend_map[item.backend]|urlencode) }}' 45 | when: 'item.backend in threescale_backend_map' 46 | 47 | - name: Update the proxy 48 | uri: 49 | url: https://{{ threescale_admin_hostname }}/admin/api/services/{{ service_id }}/proxy.json 50 | validate_certs: no 51 | method: PATCH 52 | body: '{{ body_update_proxy }}' 53 | 54 | - name: Get the list of existing application plans 55 | uri: 56 | url: https://{{ threescale_admin_hostname }}/admin/api/services/{{ service_id }}/application_plans.json?access_token={{ access_token|urlencode }} 57 | validate_certs: no 58 | register: response 59 | 60 | - set_fact: 61 | application_plans: '{{ response.json|json_query(''plans[*].application_plan.system_name'') }}' 62 | application_plans_details: '{{ response.json|json_query(''plans[].{"system_name": application_plan.system_name, "id": application_plan.id}'') }}' 63 | 64 | - include: create_application_plan.yml 65 | with_items: '{{ item.application_plans }}' 66 | loop_control: 67 | loop_var: plan 68 | -------------------------------------------------------------------------------- /roles/3scale/tasks/create_application_plan.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - debug: msg="Working on plan {{ plan.system_name }} / service {{ item.service.name }}" 4 | 5 | - set_fact: 6 | body_create_plan: '{{ "access_token=" ~ access_token|urlencode }}' 7 | 8 | - set_fact: 9 | body_create_plan: '{{ body_create_plan ~ "&" ~ (param.key|urlencode) ~ "=" ~ (param.value|urlencode) }}' 10 | with_dict: '{{ plan }}' 11 | loop_control: 12 | loop_var: param 13 | # applications is a nested hash that is used to create client applications later 14 | when: 'param.key != ''applications'' and plan.system_name not in application_plans ' 15 | 16 | - name: Create the application plan 17 | uri: 18 | url: https://{{ threescale_admin_hostname }}/admin/api/services/{{ service_id }}/application_plans.json 19 | validate_certs: no 20 | method: POST 21 | body: '{{ body_create_plan }}' 22 | status_code: 201 23 | register: response 24 | when: 'plan.system_name not in application_plans' 25 | 26 | - set_fact: 27 | application_plans: '{{ application_plans|union([ plan.system_name ]) }}' 28 | application_plans_details: '{{ application_plans_details|union([{ "system_name": plan.system_name, "id": response.json.application_plan.id }]) }}' 29 | when: 'plan.system_name not in application_plans' 30 | 31 | - set_fact: 32 | plan_id: '{{ (application_plans_details|selectattr("system_name", "equalto", plan.system_name)|first)["id"] }}' 33 | 34 | - include: create_applications.yml 35 | with_items: '{{ plan.applications }}' 36 | loop_control: 37 | loop_var: app 38 | when: 'app.name not in applications' 39 | -------------------------------------------------------------------------------- /roles/3scale/tasks/create_applications.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - debug: msg="Working on application {{ app.name }} / plan {{ plan.system_name }} / service {{ item.service.name }}" 4 | 5 | - set_fact: 6 | body_create_app: '{{ "access_token=" ~ (access_token|urlencode) ~ "&plan_id=" ~ (plan_id) }}' 7 | 8 | - set_fact: 9 | body_create_app: '{{ body_create_app ~ "&" ~ (param.key|urlencode) ~ "=" ~ (param.value|urlencode) }}' 10 | with_dict: '{{ app }}' 11 | loop_control: 12 | loop_var: param 13 | 14 | - name: Create the application 15 | uri: 16 | url: https://{{ threescale_admin_hostname }}/admin/api/accounts/{{ account_id }}/applications.json 17 | validate_certs: no 18 | method: POST 19 | body: '{{ body_create_app }}' 20 | status_code: 201 21 | register: response 22 | -------------------------------------------------------------------------------- /roles/3scale/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Get a list of existing projects 4 | command: oc get projects -o name 5 | register: oc_get_projects 6 | changed_when: false 7 | 8 | - name: Create a new project for 3scale 9 | command: oc new-project "{{ threescale_project }}" 10 | when: '"projects/" ~ threescale_project not in oc_get_projects.stdout_lines' 11 | 12 | - include: common/status.yml 13 | vars: 14 | project: "{{ threescale_project }}" 15 | tags: status 16 | 17 | - set_fact: 18 | deploy_needed: '{{ deployment_configs|intersect(threescale_expected_deployment_configs)|length < threescale_expected_deployment_configs|length }}' 19 | 20 | - name: Fetch the latest 3scale AMP template from the 3scale repo 21 | uri: url={{threescale_template}} return_content=yes 22 | register: template 23 | when: deploy_needed 24 | 25 | - name: Parse the YAML file 26 | set_fact: 27 | template: '{{template.content |from_yaml }}' 28 | when: "deploy_needed and threescale_template_format|upper == 'YAML'" 29 | 30 | - name: Parse the JSON file 31 | set_fact: 32 | template: '{{template.content |from_json }}' 33 | when: "deploy_needed and threescale_template_format|upper == 'JSON'" 34 | 35 | - name: Disable the triggers and set the replicas to 0 36 | set_fact: 37 | template: >- 38 | {% for obj in template.objects -%} 39 | {% if obj.kind == 'DeploymentConfig' -%} 40 | {% if obj.spec.update({'replicas': 0, 'triggers': []}) -%}{% endif -%} 41 | {% endif -%} 42 | {% endfor -%} 43 | {{ template }} 44 | when: deploy_needed 45 | 46 | - name: Create a temporary directory 47 | tempfile: state=directory 48 | register: tempfile 49 | when: deploy_needed 50 | 51 | - name: Write the template 52 | template: src=amp.json dest={{tempfile.path}}/amp.json 53 | when: deploy_needed 54 | 55 | - name: Process the OpenShift Template and create the OpenShift objects for the 3scale API Management Platform 56 | shell: oc process -f "{{tempfile.path}}/amp.json" -p "TENANT_NAME={{ threescale_tenant_name }}" -p "WILDCARD_DOMAIN={{ threescale_wildcard_domain }}" -n "{{ threescale_project }}" | oc create -f - -n "{{ threescale_project }}" 57 | register: oc 58 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 59 | changed_when: oc.rc == 0 60 | when: deploy_needed 61 | 62 | - name: Wait for OpenShift to create all objects 63 | pause: 64 | seconds: '{{ threescale_delay }}' 65 | when: deploy_needed 66 | 67 | # Deploy the CORS Configuration for APICast 68 | # This is needed to make the "Try out" feature working in the Developer Portal 69 | - include: apicast_cors.yml 70 | 71 | - include: common/status.yml 72 | vars: 73 | project: "{{ threescale_project }}" 74 | tags: status 75 | 76 | - name: Deploy the storage tier (MySQL, Redis, PostgreSQL and Memcache) without any replicas 77 | command: oc rollout latest "{{ item }}" -n "{{ threescale_project }}" 78 | with_items: 79 | - backend-redis 80 | - system-memcache 81 | - system-mysql 82 | - system-redis 83 | - zync-database 84 | when: item not in deployment_configs 85 | tags: rollout 86 | 87 | - name: Scale the storage tier (MySQL, Redis, PostgreSQL and Memcache) 88 | command: oc scale dc "{{ item }}" --replicas=1 -n "{{ threescale_project }}" 89 | with_items: 90 | - backend-redis 91 | - system-memcache 92 | - system-mysql 93 | - system-redis 94 | - zync-database 95 | when: item not in replication_controllers 96 | tags: rollout 97 | 98 | - include: common/wait_for.yml 99 | static: no 100 | vars: 101 | pod_to_wait: 102 | - backend-redis 103 | - system-memcache 104 | - system-mysql 105 | - system-redis 106 | - zync-database 107 | delay: "{{ threescale_delay }}" 108 | retries: "{{ threescale_retries }}" 109 | project: "{{ threescale_project }}" 110 | tags: status 111 | 112 | - name: Deploy the backend-listener without any replicas 113 | command: oc rollout latest "{{ item }}" -n "{{ threescale_project }}" 114 | with_items: 115 | - backend-listener 116 | when: item not in deployment_configs 117 | tags: rollout 118 | 119 | - name: Scale backend-listener 120 | command: oc scale dc "{{ item }}" --replicas=1 -n "{{ threescale_project }}" 121 | with_items: 122 | - backend-listener 123 | when: item not in replication_controllers 124 | tags: rollout 125 | 126 | - include: common/wait_for.yml 127 | static: no 128 | vars: 129 | pod_to_wait: 130 | - backend-listener 131 | delay: "{{ threescale_delay }}" 132 | retries: "{{ threescale_retries }}" 133 | project: "{{ threescale_project }}" 134 | tags: status 135 | 136 | - name: Deploy everything else without any replicas 137 | command: oc rollout latest "{{ item }}" -n "{{ threescale_project }}" 138 | with_items: 139 | - backend-worker 140 | - system-app 141 | - system-resque 142 | - system-sidekiq 143 | - backend-cron 144 | - system-sphinx 145 | - apicast-staging 146 | - apicast-production 147 | - apicast-wildcard-router 148 | - zync 149 | when: item not in deployment_configs 150 | tags: rollout 151 | 152 | - name: Scale system-app, system-resque and system-sidekiq 153 | command: oc scale dc "{{ item }}" --replicas=1 -n "{{ threescale_project }}" 154 | with_items: 155 | - system-app 156 | - system-resque 157 | - system-sidekiq 158 | when: item not in replication_controllers 159 | tags: rollout 160 | 161 | - include: common/wait_for.yml 162 | static: no 163 | vars: 164 | pod_to_wait: 165 | - system-app 166 | - system-resque 167 | - system-sidekiq 168 | delay: "{{ threescale_delay }}" 169 | retries: "{{ threescale_retries }}" 170 | project: "{{ threescale_project }}" 171 | tags: status 172 | 173 | - name: Scale backend-cron, backend-worker and system-sphinx 174 | command: oc scale dc "{{ item }}" --replicas=1 -n "{{ threescale_project }}" 175 | with_items: 176 | - backend-worker 177 | - backend-cron 178 | - system-sphinx 179 | when: item not in replication_controllers 180 | tags: rollout 181 | 182 | - include: common/wait_for.yml 183 | static: no 184 | vars: 185 | pod_to_wait: 186 | - backend-worker 187 | - backend-cron 188 | - system-sphinx 189 | delay: "{{ threescale_delay }}" 190 | retries: "{{ threescale_retries }}" 191 | project: "{{ threescale_project }}" 192 | tags: status 193 | 194 | - name: Deploy zync, apicast-staging, apicast-production 195 | command: oc scale dc "{{ item }}" --replicas=1 -n "{{ threescale_project }}" 196 | with_items: 197 | - zync 198 | - apicast-wildcard-router 199 | - apicast-staging 200 | - apicast-production 201 | when: item not in replication_controllers 202 | tags: rollout 203 | 204 | - include: common/wait_for.yml 205 | static: no 206 | vars: 207 | pod_to_wait: 208 | - apicast-staging 209 | - apicast-production 210 | delay: "{{ threescale_delay }}" 211 | retries: "{{ threescale_retries }}" 212 | project: "{{ threescale_project }}" 213 | tags: status 214 | 215 | - name: Get Route URL 216 | command: oc get route system-provider-admin-route -n "{{ threescale_project }}" -o 'jsonpath={.spec.host}' 217 | register: route 218 | changed_when: false 219 | tags: status 220 | 221 | - set_fact: 222 | threescale_default_backend_map: 223 | microcks: http://{{ microcks_hostname }} 224 | tags: vars 225 | 226 | - set_fact: 227 | threescale_admin_hostname: '{{ route.stdout }}' 228 | threescale_backend_map: '{{ threescale_default_backend_map |combine(threescale_additional_backend_map|default({})) }}' 229 | tags: vars 230 | 231 | - name: Get the 3scale Administration Access Token 232 | command: oc get dc system-app -n "{{ threescale_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="ADMIN_ACCESS_TOKEN")].value}' 233 | register: oc_get_dc 234 | tags: vars 235 | changed_when: false 236 | 237 | - set_fact: 238 | access_token: '{{ oc_get_dc.stdout }}' 239 | tags: vars 240 | 241 | - include: 3scale_status.yml 242 | 243 | - include: create_api.yml 244 | with_items: '{{ threescale_apis_to_create }}' 245 | 246 | - include: oauth-client.yml 247 | tags: oauth-client 248 | when: 'not is_offline|default(False)|bool' 249 | 250 | - name: Get Admin Username 251 | command: oc get dc system-app -n "{{ threescale_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="USER_LOGIN")].value}' 252 | register: username 253 | changed_when: false 254 | tags: status 255 | 256 | - name: Get Admin Password 257 | command: oc get dc system-app -n "{{ threescale_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="USER_PASSWORD")].value}' 258 | register: password 259 | changed_when: false 260 | tags: status 261 | 262 | - name: 3scale is ready ! 263 | debug: msg="Login on https://{{ threescale_admin_hostname }} with username = '{{ username.stdout }}' and password = '{{ password.stdout }}'" 264 | tags: status 265 | -------------------------------------------------------------------------------- /roles/3scale/tasks/oauth-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: "common/status.yml" 4 | vars: 5 | project: "{{ threescale_project }}" 6 | tags: status 7 | 8 | - set_fact: 9 | deploy_needed: '{{ deployment_configs|intersect([ "oauth-client" ])|length < 1 }}' 10 | tags: status 11 | 12 | - name: Get APICast (staging) URL 13 | command: oc get route api-apicast-staging-route -n "{{ threescale_project }}" -o 'jsonpath={.spec.host}' 14 | register: route 15 | changed_when: false 16 | when: deploy_needed 17 | tags: status 18 | 19 | - set_fact: 20 | apicast_hostname: '{{ route.stdout }}' 21 | tags: vars 22 | when: deploy_needed 23 | 24 | - name: Deploy the OAuth client 25 | command: oc new-app -n '{{ threescale_project }}' '{{ threescale_apicast_git_repo }}' --name=oauth-client --context-dir=/examples/oauth2/client/ -e GATEWAY='https://{{ apicast_hostname }}' 26 | when: deploy_needed 27 | 28 | - name: Wait for OpenShift to create all objects 29 | pause: 30 | seconds: '{{ threescale_delay }}' 31 | when: deploy_needed 32 | 33 | - include: common/wait_for.yml 34 | static: no 35 | vars: 36 | pod_to_wait: 37 | - oauth-client 38 | delay: "{{ threescale_delay }}" 39 | retries: "{{ threescale_retries }}" 40 | project: "{{ threescale_project }}" 41 | tags: status 42 | 43 | - name: Expose the OAuth client 44 | command: oc expose service oauth-client -n '{{ threescale_project }}' 45 | when: deploy_needed 46 | 47 | - name: Get OAuth client URL 48 | command: oc get route oauth-client -n "{{ threescale_project }}" -o 'jsonpath={.spec.host}' 49 | register: route 50 | changed_when: false 51 | tags: status 52 | 53 | - set_fact: 54 | oauth_client_hostname: '{{ route.stdout }}' 55 | tags: vars 56 | 57 | - name: Patch the DeploymentConfig to add the REDIRECT_URI variable 58 | command: oc env dc/oauth-client REDIRECT_URI='http://{{ oauth_client_hostname }}/callback' 59 | when: deploy_needed 60 | -------------------------------------------------------------------------------- /roles/3scale/tasks/patch_apicast.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - debug: msg="Updating {{ item }}" 4 | 5 | - name: Check if APICast's DC has volumes 6 | command: oc get dc {{ item }} -o jsonpath='{range .spec.template.spec.volumes[*]}{.name}{"\n"}{end}' -n "{{ threescale_project }}" 7 | register: volumes 8 | changed_when: false 9 | 10 | - name: Add volume 'apicast-custom-module' 11 | command: 'oc set volume dc/{{ item }} -n {{ threescale_project }} --add --name=apicast-custom-module --mount-path /opt/app-root/src/src/custom/ --configmap-name=apicast-custom-module -t configmap' 12 | when: "'apicast-custom-module' not in volumes.stdout_lines" 13 | 14 | - name: Add volume 'apicastd' 15 | command: 'oc set volume dc/{{ item }} -n {{ threescale_project }} --add --name=apicastd --mount-path /opt/app-root/src/apicast.d/ --configmap-name=apicast.d -t configmap' 16 | when: "'apicastd' not in volumes.stdout_lines" 17 | 18 | - name: Add environment variable APICAST_MODULE to the APICast DeploymentConfig 19 | command: oc env dc/{{ item }} APICAST_MODULE=custom/cors -n {{ threescale_project }} 20 | -------------------------------------------------------------------------------- /roles/3scale/tasks/webhooks.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: "common/status.yml" 4 | vars: 5 | project: "{{ threescale_project }}" 6 | tags: status 7 | 8 | - set_fact: 9 | deploy_needed: '{{ deployment_configs|intersect([ "webhooks" ])|length < 1 }}' 10 | tags: status 11 | 12 | - name: Deploy the Webhooks service 13 | command: oc new-app -n '{{ threescale_project }}' '{{ threescale_webhooks_git_repo }}' --name=webhooks -e SSO_REALM={{ sso_realm }} -e SSO_HOSTNAME={{ sso_route_name }} -e SSO_SERVICE_USERNAME={{ sso_service_username }} -e SSO_SERVICE_PASSWORD={{ sso_service_password }} -e SSO_CLIENT_ID={{ sso_default_client_id }} -e SHARED_SECRET={{ threescale_webhooks_secret }} -e WEBHOOKS_MODULES=log,sso 14 | when: deploy_needed 15 | 16 | - name: Wait for OpenShift to create all objects 17 | pause: 18 | seconds: '{{ threescale_delay }}' 19 | when: deploy_needed 20 | 21 | - include: common/wait_for.yml 22 | static: no 23 | vars: 24 | pod_to_wait: 25 | - webhooks 26 | delay: "{{ threescale_delay }}" 27 | retries: "{{ threescale_retries }}" 28 | project: "{{ threescale_project }}" 29 | tags: status 30 | 31 | - name: Expose the Webhooks service 32 | command: oc expose service webhooks -n '{{ threescale_project }}' 33 | when: deploy_needed 34 | 35 | - name: Get Webhooks service URL 36 | command: oc get route webhooks -n "{{ threescale_project }}" -o 'jsonpath={.spec.host}' 37 | register: route 38 | changed_when: false 39 | tags: status 40 | 41 | - set_fact: 42 | webhooks_hostname: '{{ route.stdout }}' 43 | tags: vars 44 | 45 | - set_fact: 46 | body_set_webhook: '{{ "access_token=" ~ access_token|urlencode }}' 47 | 48 | - set_fact: 49 | body_set_webhook: '{{ body_set_webhook ~ "&" ~ (param.key|urlencode) ~ "=" ~ (param.value|urlencode) }}' 50 | with_dict: 51 | url: http://{{ webhooks_hostname }}/webhook?shared_secret={{ threescale_webhooks_secret }} 52 | active: "true" 53 | provider_actions: "true" 54 | application_created_on: "true" 55 | application_updated_on: "true" 56 | application_deleted_on: "true" 57 | loop_control: 58 | loop_var: param 59 | 60 | - name: Update the webhooks settings 61 | uri: 62 | url: https://{{ threescale_admin_hostname }}/admin/api/webhooks.json 63 | validate_certs: no 64 | method: PUT 65 | body: '{{ body_set_webhook }}' 66 | status_code: 200 67 | register: response 68 | -------------------------------------------------------------------------------- /roles/3scale/templates/amp.json: -------------------------------------------------------------------------------- 1 | {{template|to_json()}} 2 | -------------------------------------------------------------------------------- /roles/base/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | yum_keepcache: false 4 | -------------------------------------------------------------------------------- /roles/base/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: restart sshd 4 | service: name=sshd state=reloaded 5 | -------------------------------------------------------------------------------- /roles/base/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Keep locally downloaded RPMs (used by offline installer) 4 | lineinfile: dest=/etc/yum.conf regexp="^#* *keepcache=" line="keepcache=1" 5 | when: yum_keepcache 6 | tags: config 7 | 8 | - name: Tell SSHD not to use DNS 9 | lineinfile: dest=/etc/ssh/sshd_config regexp="^#* *UseDNS +" line="UseDNS no" 10 | notify: restart sshd 11 | tags: config 12 | 13 | - name: Tell SSHD to forbid root accesses 14 | lineinfile: dest=/etc/ssh/sshd_config regexp="^#* *PermitRootLogin +" line="PermitRootLogin no" 15 | notify: restart sshd 16 | tags: config 17 | 18 | - name: Tell SSHD to forbid password accesses 19 | lineinfile: dest=/etc/ssh/sshd_config regexp="^#* *PasswordAuthentication +" line="PasswordAuthentication no" 20 | notify: restart sshd 21 | tags: config 22 | 23 | - name: Install some software 24 | yum: name={{ item }} state=installed 25 | with_items: 26 | - vim-enhanced 27 | - tmux 28 | - unzip 29 | - tcpdump 30 | - telnet 31 | - strace 32 | - man-pages 33 | - man 34 | - iptraf 35 | - wget 36 | - openssh-clients 37 | tags: rpm 38 | 39 | - name: Install Open-VM tools 40 | yum: name=open-vm-tools state=installed 41 | tags: rpm 42 | 43 | - name: Fix /etc/environment to include PATH (because sometime the PATH is broken when using Ansible) 44 | lineinfile: dest=/etc/environment regexp="^PATH=" line="PATH=/bin:/usr/bin:/sbin:/usr/sbin" 45 | tags: config 46 | 47 | - name: Persist the hostname 48 | lineinfile: dest=/etc/sysconfig/network regexp="^HOSTNAME=" line="HOSTNAME={{ inventory_hostname_short }}" 49 | tags: 50 | - config 51 | - dns 52 | 53 | - name: Set the hostname 54 | command: hostnamectl set-hostname {{ inventory_hostname_short }} --static 55 | tags: 56 | - config 57 | - dns 58 | 59 | - name: Ensure consistent locale across systems (1/2) 60 | lineinfile: dest=/etc/locale.conf regexp="^LANG=" line="LANG=en_US.utf8" 61 | 62 | - name: Ensure consistent locale across systems (2/2) 63 | lineinfile: dest=/etc/locale.conf line="LC_CTYPE=en_US.utf8" 64 | -------------------------------------------------------------------------------- /roles/bootstrap/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # The SSH Public Key to add to the authorized_keys 3 | ansible_ssh_public_key: "{{ lookup('file', ansible_ssh_private_key_file + '.pub' ) }}" 4 | -------------------------------------------------------------------------------- /roles/bootstrap/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: This module has only been tested on RHEL and CentOS 4 | assert: 5 | that: 6 | - "ansible_os_family == 'RedHat' or ansible_os_family == 'CentOS'" 7 | 8 | - name: Create user RedHat 9 | user: name=redhat group=users groups=users,wheel state=present comment="RedHat privileged user" password="*" 10 | tags: 11 | - bootstrap 12 | - user 13 | 14 | - name: Set SSH key for root 15 | authorized_key: user=root key="{{ ansible_ssh_public_key }}" manage_dir=yes 16 | tags: 17 | - bootstrap 18 | - user 19 | 20 | - name: Set SSH key for user RedHat 21 | authorized_key: user=redhat key="{{ ansible_ssh_public_key }}" manage_dir=yes 22 | tags: 23 | - bootstrap 24 | - user 25 | 26 | - name: Configure SUDO 27 | template: src=sudoers dest=/etc/sudoers owner=root group=root mode=0440 validate="/usr/sbin/visudo -cf %s" 28 | tags: 29 | - bootstrap 30 | - config 31 | -------------------------------------------------------------------------------- /roles/bootstrap/templates/sudoers: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | %wheel ALL=(ALL) NOPASSWD: ALL 3 | root ALL=(ALL) NOPASSWD: ALL 4 | -------------------------------------------------------------------------------- /roles/docker/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | docker_storage_driver: devicemapper 3 | docker_storage_vg: docker 4 | -------------------------------------------------------------------------------- /roles/docker/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Make sure the extra repos is enabled 4 | command: subscription-manager repos --enable rhel-7-server-extras-rpms 5 | when: "ansible_os_family == 'RedHat' and not is_offline|default(False)|bool" 6 | tags: rpm 7 | 8 | - name: Install Docker 9 | yum: name=docker state=installed 10 | tags: rpm 11 | 12 | - name: Start docker 13 | service: name=docker state=started 14 | tags: docker-storage 15 | 16 | - name: Determine if the docker storage driver is the correct one 17 | shell: docker info 18 | register: docker_info 19 | changed_when: false 20 | 21 | - name: set docker_storage_setup_needs_to_run 22 | set_fact: 23 | docker_storage_setup_needs_to_run: '{{ docker_info.stdout.find(''Storage Driver: '' ~ docker_storage_driver) == -1 or docker_info.stdout.find(''Data file: /dev/loop'') != -1 }}' 24 | tags: docker-storage 25 | 26 | - name: stop docker 27 | service: 28 | name: docker 29 | state: stopped 30 | when: docker_storage_setup_needs_to_run 31 | tags: docker-storage 32 | 33 | - name: delete /var/lib/docker 34 | command: rm -rf /var/lib/docker 35 | when: docker_storage_setup_needs_to_run 36 | tags: docker-storage 37 | 38 | - name: Configure docker-storage-setup to use the correct storage driver 39 | template: dest=/etc/sysconfig/docker-storage-setup src=docker-storage-setup 40 | register: docker-storage-setup 41 | when: docker_storage_setup_needs_to_run 42 | tags: docker-storage 43 | 44 | - name: Run docker-storage-setup 45 | command: docker-storage-setup 46 | environment: 47 | PATH: /bin:/usr/bin:/sbin:/usr/sbin # Fix buggy PATH on RHEL7 48 | when: docker_storage_setup_needs_to_run 49 | tags: docker-storage 50 | 51 | - name: Start Docker 52 | service: name=docker state=started enabled=yes 53 | tags: docker-storage 54 | -------------------------------------------------------------------------------- /roles/docker/templates/docker-storage-setup: -------------------------------------------------------------------------------- 1 | STORAGE_DRIVER={{ docker_storage_driver }} 2 | {% if docker_storage_driver == 'devicemapper' %} 3 | DOCKER_ROOT_VOLUME=yes 4 | {% if docker_storage_disk is defined %} 5 | DEVS={{ docker_storage_disk }} 6 | {% endif %} 7 | {% if docker_storage_vg is defined %} 8 | VG={{ docker_storage_vg }} 9 | {% endif %} 10 | {% endif %} 11 | -------------------------------------------------------------------------------- /roles/hostpath-provisioner/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | hostpath_provisioner_path: /var/openshift 4 | hostpath_provisioner_target_namespace: default 5 | hostpath_provisioner_docker_image: nmasse/openshift-hostpath-provisioner:latest 6 | hostpath_provisioner_template: https://raw.githubusercontent.com/nmasse-itix/OpenShift-HostPath-Provisioner/master/setup/hostpath-provisioner-template.yaml 7 | hostpath_provisioner_patch_docker_registry: true 8 | -------------------------------------------------------------------------------- /roles/hostpath-provisioner/files/registry-storage-pvc.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolumeClaim 3 | metadata: 4 | name: registry-storage 5 | namespace: default 6 | spec: 7 | accessModes: 8 | - ReadWriteMany 9 | resources: 10 | requests: 11 | storage: 5Gi 12 | -------------------------------------------------------------------------------- /roles/hostpath-provisioner/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create a directory for the hostpath-provisioner 4 | file: state=directory path={{ hostpath_provisioner_path }} owner=root group=root mode=0777 setype=svirt_sandbox_file_t 5 | 6 | - name: Query existing deploymentconfigs 7 | command: oc get daemonset -n "{{ hostpath_provisioner_target_namespace }}" -o name -l "name=hostpath-provisioner" 8 | register: oc_get_daemonset 9 | changed_when: false 10 | 11 | - name: Deploy app if needed 12 | set_fact: 13 | deploy_needed: "{{ 'daemonsets/hostpath-provisioner' not in oc_get_daemonset.stdout_lines }}" 14 | 15 | - name: Create a temporary directory 16 | tempfile: 17 | state: directory 18 | register: tempfile 19 | when: 'is_offline|default(False)|bool and deploy_needed' 20 | 21 | - name: Push the Template to the target 22 | copy: 23 | src: '{{ hostpath_provisioner_template }}' 24 | dest: '{{ tempfile.path }}/{{ hostpath_provisioner_template|basename }}' 25 | when: 'is_offline|default(False)|bool and deploy_needed' 26 | 27 | - set_fact: 28 | hostpath_provisioner_template: '{{ tempfile.path }}/{{ hostpath_provisioner_template|basename }}' 29 | when: 'is_offline|default(False)|bool and deploy_needed' 30 | 31 | - name: Process the OpenShift Template and create the OpenShift objects for the hostpath-provisioner 32 | shell: oc process -f "{{ hostpath_provisioner_template }}" -p "HOSTPATH_TO_USE={{ hostpath_provisioner_path }}" -p "TARGET_NAMESPACE={{ hostpath_provisioner_target_namespace }}" -p "HOSTPATH_PROVISIONER_IMAGE={{ hostpath_provisioner_docker_image }}" | oc create -f - 33 | when: deploy_needed 34 | register: oc 35 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 36 | changed_when: oc.rc == 0 37 | 38 | - name: Create a temporary directory to hold the PVC YAML file 39 | tempfile: state=directory 40 | register: tempfile 41 | 42 | - name: Copy the PersistentVolumeClaim object definition 43 | copy: src=registry-storage-pvc.yaml dest={{tempfile.path}}/registry-storage-pvc.yaml 44 | when: hostpath_provisioner_patch_docker_registry 45 | 46 | - name: Create a PersistentVolumeClaim for the docker-registry 47 | command: oc create -n default -f {{tempfile.path}}/registry-storage-pvc.yaml 48 | when: hostpath_provisioner_patch_docker_registry 49 | register: oc 50 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 51 | changed_when: oc.rc == 0 52 | 53 | - name: Add the new volume to docker-registry 54 | command: oc volume dc docker-registry -n default --add=true --overwrite=true --type=persistentVolumeClaim --name=registry-storage --claim-name=registry-storage 55 | when: hostpath_provisioner_patch_docker_registry 56 | -------------------------------------------------------------------------------- /roles/microcks/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | microcks_project: microcks 4 | microcks_template_url: https://raw.githubusercontent.com/microcks/microcks/master/openshift-persistent-template.json 5 | microcks_template_name: microcks-persistent 6 | microcks_application_name: microcks 7 | microcks_delay: 5 8 | microcks_retries: 30 9 | microcks_sample_jobs: 10 | - name: Petstore 11 | repositoryUrl: https://raw.githubusercontent.com/microcks/microcks/master/samples/PetstoreAPI-collection.json 12 | - name: HelloREST 13 | repositoryUrl: https://raw.githubusercontent.com/microcks/microcks/master/samples/HelloAPI-soapui-project.xml 14 | - name: HelloSOAP 15 | repositoryUrl: https://raw.githubusercontent.com/microcks/microcks/master/samples/HelloService-soapui-project.xml 16 | -------------------------------------------------------------------------------- /roles/microcks/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create the Microcks templates (globally) 4 | command: oc create -n openshift -f {{ microcks_template_url }} 5 | register: oc 6 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 7 | changed_when: oc.rc == 0 8 | 9 | - name: Get a list of existing projects 10 | command: oc get projects -o name 11 | register: oc_get_projects 12 | changed_when: false 13 | 14 | - name: Create a new project for Microcks 15 | command: oc new-project "{{ microcks_project }}" 16 | when: '"projects/" ~ microcks_project not in oc_get_projects.stdout_lines' 17 | 18 | - name: Query existing deploymentconfigs 19 | command: oc get dc -n "{{ microcks_project }}" -o name -l "component={{ microcks_application_name }}" 20 | register: oc_get_dc 21 | changed_when: false 22 | 23 | - name: Deploy app if needed 24 | set_fact: 25 | deploy_needed: "{{ 'deploymentconfigs/' ~ microcks_application_name not in oc_get_dc.stdout_lines }}" 26 | 27 | - name: Process the OpenShift Template and create the OpenShift objects 28 | command: oc new-app -n {{ microcks_project }} --template={{ microcks_template_name }} -p MEMORY_LIMIT=1Gi 29 | when: deploy_needed 30 | 31 | - name: Wait for OpenShift to create all objects 32 | pause: 33 | seconds: '{{ microcks_delay }}' 34 | when: deploy_needed 35 | 36 | - include: common/wait_for.yml 37 | static: no 38 | vars: 39 | pod_to_wait: 40 | - microcks 41 | - microcks-mongodb 42 | delay: "{{ microcks_delay }}" 43 | retries: "{{ microcks_retries }}" 44 | project: "{{ microcks_project }}" 45 | tags: status 46 | 47 | - name: Get Route URL 48 | command: oc get route {{ microcks_application_name }} -n "{{ microcks_project }}" -o 'jsonpath={.spec.host}' 49 | register: route 50 | changed_when: false 51 | tags: 52 | - status 53 | - vars 54 | 55 | - set_fact: 56 | microcks_hostname: '{{ route.stdout }}' 57 | tags: vars 58 | 59 | - include: post-install.yml 60 | tags: post-install 61 | 62 | - name: Microcks is ready ! 63 | debug: msg="Go to http://{{ route.stdout }}/ to start using Microcks !" 64 | tags: status 65 | -------------------------------------------------------------------------------- /roles/microcks/tasks/post-install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - include: "register-service.yml" 4 | static: no 5 | with_items: "{{ microcks_sample_jobs }}" 6 | -------------------------------------------------------------------------------- /roles/microcks/tasks/register-service.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - debug: "msg='Working on service {{ item.name }}'" 4 | 5 | - name: Check if service exists 6 | uri: 7 | url: "http://{{ microcks_hostname }}/api/jobs?name={{ item.name|urlencode }}" 8 | method: GET 9 | status_code: 200 10 | register: services 11 | 12 | - set_fact: 13 | service_id: '{{ services.json[0].id }}' 14 | when: services.json|length > 0 15 | 16 | - name: Register sample service 17 | uri: 18 | url: http://{{ microcks_hostname }}/api/jobs 19 | method: POST 20 | body: "{{ item }}" 21 | body_format: json 22 | status_code: 201 23 | register: service 24 | when: services.json|length == 0 25 | 26 | - set_fact: 27 | service_id: '{{ service.json.id }}' 28 | when: '"json" in service' # => service.json is defined 29 | 30 | - name: Activate the service 31 | uri: 32 | url: http://{{ microcks_hostname }}/api/jobs/{{ service_id }}/activate 33 | method: PUT 34 | status_code: 200 35 | 36 | - name: Start the service 37 | uri: 38 | url: http://{{ microcks_hostname }}/api/jobs/{{ service_id }}/start 39 | method: PUT 40 | status_code: 200 41 | -------------------------------------------------------------------------------- /roles/name-resolution/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Check for mandatory variables required by this playbook 4 | fail: 5 | msg: "This playbook requires {{item}} to be set." 6 | when: "item is not defined or item == ''" 7 | with_items: 8 | - lab_dns_suffix 9 | - openshift_master_default_subdomain 10 | 11 | - name: Make sure each machine has an up-to-date /etc/hosts 12 | template: dest=/etc/hosts src=hosts 13 | tags: config 14 | 15 | - name: Install dnsmasq 16 | yum: name=dnsmasq state=installed 17 | when: "'name-server' in group_names" # Only on admin server 18 | tags: rpm 19 | 20 | - name: Set dnsmasq config 21 | template: src=dnsmasq.conf dest=/etc/dnsmasq.conf 22 | when: "'name-server' in group_names" # Only on admin server 23 | tags: config 24 | 25 | - name: Generate an /etc/hosts with all hosts 26 | template: dest=/etc/hosts.dnsmasq src=hosts 27 | when: "'name-server' in group_names" # Only on admin server 28 | tags: config 29 | 30 | - name: Make sure dnsmasq daemon is enabled and started 31 | service: name=dnsmasq state=started enabled=yes 32 | when: "'name-server' in group_names" # Only on admin server 33 | tags: config 34 | 35 | - name: Add an iptable rule to allow DNS queries from other hosts 36 | lineinfile: dest=/etc/sysconfig/iptables line="-A INPUT -p udp --dport 53 -j ACCEPT" insertafter="-A INPUT -i lo -j ACCEPT" 37 | when: "'name-server' in group_names" # Only on admin server 38 | tags: iptables 39 | 40 | - name: Restart iptables 41 | service: name=iptables enabled=yes state=restarted 42 | when: "'name-server' in group_names" # Only on admin server 43 | tags: iptables 44 | 45 | - name: Fix the /etc/resolv.conf of other hosts 46 | template: dest=/etc/resolv.conf src=resolv.conf 47 | when: "'name-server' in groups and 'name-server' not in group_names" # On all other nodes (if a name server has been setup) 48 | tags: config 49 | -------------------------------------------------------------------------------- /roles/name-resolution/templates/dnsmasq.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 3 | domain-needed 4 | bogus-priv 5 | expand-hosts 6 | log-queries 7 | local-ttl=60 8 | 9 | # Do not read the default /etc/hosts 10 | no-hosts 11 | 12 | # But read this one... 13 | addn-hosts=/etc/hosts.dnsmasq 14 | 15 | # Default suffix for all machines 16 | domain={{ lab_dns_suffix }} 17 | 18 | # 19 | # Wildcard DNS entries (see lab_route_suffix variable) 20 | # 21 | # note: will generate something like this : 22 | # address=/app.openshift.test/192.168.23.20 23 | # 24 | {% if 'lb' in groups %} 25 | address=/{{ openshift_master_default_subdomain }}/{{ hostvars[groups['lb'][0]]['ansible_default_ipv4']['address'] }} 26 | {% else %} 27 | address=/{{ openshift_master_default_subdomain }}/{{ hostvars[groups['masters'][0]]['ansible_default_ipv4']['address'] }} 28 | {% endif %} 29 | -------------------------------------------------------------------------------- /roles/name-resolution/templates/hosts: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | 127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 3 | ::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 4 | 5 | {% if "name-server" not in groups %} 6 | {% for item in groups['all'] %} 7 | {{ hostvars[item]['ansible_default_ipv4']['address'] }} {{ hostvars[item]['inventory_hostname']}} {{ hostvars[item]['inventory_hostname_short']}} 8 | {% endfor %} 9 | {% endif %} 10 | -------------------------------------------------------------------------------- /roles/name-resolution/templates/resolv.conf: -------------------------------------------------------------------------------- 1 | # {{ ansible_managed }} 2 | search {{ lab_dns_suffix }} 3 | {% for item in groups['name-server'] %} 4 | nameserver {{ hostvars[item]['ansible_default_ipv4']['address'] }} 5 | {% endfor %} 6 | -------------------------------------------------------------------------------- /roles/openshift-postinstall/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | jboss_image_streams: https://raw.githubusercontent.com/jboss-openshift/application-templates/ose-v1.3.7/jboss-image-streams.json 4 | -------------------------------------------------------------------------------- /roles/openshift-postinstall/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create a temporary directory 4 | tempfile: 5 | state: directory 6 | register: tempfile 7 | when: 'is_offline|default(False)|bool' 8 | 9 | - name: Push the JBoss ImageStreams to the target 10 | copy: 11 | src: '{{ jboss_image_streams }}' 12 | dest: '{{ tempfile.path }}/{{ jboss_image_streams|basename }}' 13 | when: 'is_offline|default(False)|bool' 14 | 15 | - set_fact: 16 | jboss_image_streams: '{{ tempfile.path }}/{{ jboss_image_streams|basename }}' 17 | when: 'is_offline|default(False)|bool' 18 | 19 | - name: Install new JBoss ImageStreams 20 | command: oc create -n openshift -f "{{ jboss_image_streams }}" 21 | register: oc 22 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 23 | changed_when: oc.rc == 0 24 | 25 | - name: Update existing JBoss ImageStreams 26 | command: oc replace -n openshift -f "{{ jboss_image_streams }}" 27 | register: oc 28 | failed_when: oc.rc > 0 and 'Error from server (NotFound):' not in oc.stderr 29 | changed_when: oc.rc == 0 30 | 31 | - name: Update the router to allow Wildcards 32 | command: oc set env dc/router ROUTER_ALLOW_WILDCARD_ROUTES=true -n default 33 | 34 | - name: Update the dnsmasq configuration to reference the wildcard DNS entry 35 | template: src=wildcard.conf dest=/etc/dnsmasq.d/wildcard.conf 36 | 37 | - name: Restart the dnsmasq service 38 | service: name=dnsmasq enabled=yes state=restarted 39 | -------------------------------------------------------------------------------- /roles/openshift-postinstall/templates/wildcard.conf: -------------------------------------------------------------------------------- 1 | 2 | address=/{{ openshift_master_default_subdomain }}/{{ hostvars[groups['masters'][0]]['ansible_default_ipv4']['address'] }} 3 | -------------------------------------------------------------------------------- /roles/openshift-prereq/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Install iptables-services 4 | yum: name=iptables-services state=installed 5 | tags: rpm 6 | 7 | - name: Disable firewalld 8 | service: name=firewalld state=stopped enabled=no 9 | ignore_errors: yes # The firewalld service might not be installed, ignore error 10 | 11 | - name: Enable iptables 12 | service: name=iptables state=started enabled=yes 13 | 14 | - name: First, disable any repos (using subscription-manager) 15 | command: subscription-manager repos --disable="*" 16 | when: 'not is_offline|default(False)|bool' 17 | tags: rpm 18 | 19 | - name: Make sure mandatory repos are enabled 20 | command: subscription-manager repos --enable {{ item }} 21 | with_items: 22 | - rhel-7-server-rpms 23 | - rhel-7-server-optional-rpms 24 | - rhel-7-server-extras-rpms 25 | - rhel-7-server-ose-{{ lab_openshift_version }}-rpms 26 | - rhel-7-fast-datapath-rpms # see https://access.redhat.com/solutions/3008401 27 | when: 'not is_offline|default(False)' 28 | tags: rpm 29 | 30 | - name: Install nfs-utils 31 | yum: name=nfs-utils state=installed 32 | tags: rpm 33 | 34 | - name: Install bash-completion 35 | yum: name=bash-completion state=installed 36 | when: "'masters' in group_names" # Only on master server 37 | tags: rpm 38 | 39 | - name: Install NetworkManager 40 | yum: name=NetworkManager state=installed 41 | tags: rpm 42 | 43 | - name: Install net-tools 44 | yum: name=net-tools state=installed 45 | tags: rpm 46 | 47 | - name: Install bind-utils 48 | yum: name=bind-utils state=installed 49 | tags: rpm 50 | 51 | - name: Install httpd-tools 52 | yum: name=httpd-tools state=installed 53 | tags: rpm 54 | -------------------------------------------------------------------------------- /roles/register-rhn/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: This module should only work on RHEL 3 | assert: 4 | that: 5 | - "ansible_os_family == 'RedHat'" 6 | 7 | - name: Register this system on RHN 8 | redhat_subscription: 9 | state: present 10 | username: "{{ lookup('env','RHN_LOGIN') }}" 11 | password: "{{ lookup('env','RHN_PASSWORD') }}" 12 | consumer_name: "{{ inventory_hostname }}" 13 | autosubscribe: false 14 | tags: rhn 15 | 16 | # 17 | # To know which Pool ID you can use, run the following command on a registered host : 18 | # 19 | # sudo subscription-manager list --available --matches '*OpenShift*' 20 | # 21 | 22 | - name: Attach the correct pool id to the new subscription 23 | command: subscription-manager attach --pool={{ lookup('env','RHN_POOLID') }} 24 | when: 'lookup("env","RHN_POOLID") != ""' 25 | tags: rhn 26 | -------------------------------------------------------------------------------- /roles/sso/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | sso_image_stream: redhat-sso71-openshift 3 | sso_image_stream_tag: redhat-sso71-openshift:1.0 4 | sso_available_templates: 5 | - sso71-https 6 | - sso71-mysql-persistent 7 | - sso71-mysql 8 | - sso71-postgresql-persistent 9 | - sso71-postgresql 10 | jboss_openshift_application_templates_tag: ose-v1.4.0 11 | jboss_openshift_application_templates_git_repo: https://github.com/jboss-openshift/application-templates.git 12 | sso_template: sso71-postgresql-persistent 13 | sso_project: sso 14 | sso_realm: 3scale 15 | sso_keystore_password: secret 16 | sso_admin_username: admin 17 | sso_application_name: sso 18 | sso_service_username: cli 19 | sso_retries: 30 20 | sso_delay: 5 21 | sso_default_client_id: admin-cli 22 | sso_initial_access_token_request: 23 | count: 1000 24 | expiration: 31557600 # a year expressed in seconds 25 | sso_default_realm_settings: 26 | notBefore: 3600 # Allow a clock skew of 1 hour 27 | accessTokenLifespan: 86400 # 1 day 28 | accessTokenLifespanForImplicitFlow: 86400 # 1 day 29 | ssoSessionIdleTimeout: 86400 # 1 day 30 | ssoSessionMaxLifespan: 86400 # 1 day 31 | accessCodeLifespan: 86400 # 1 day 32 | accessCodeLifespanUserAction: 86400 # 1 day 33 | accessCodeLifespanLogin: 86400 # 1 day 34 | registrationAllowed: true 35 | rememberMe: true 36 | sso_users_to_create: 37 | - username: jdoe 38 | firstName: John 39 | lastName: Doe 40 | enabled: true 41 | credentials: 42 | - type: password 43 | value: hackthis 44 | temporary: false 45 | sso_clients_to_create: 46 | - name: Hello App with OAuth 47 | description: "This is my very first application using OAuth" 48 | clientId: "my-app" 49 | publicClient: false 50 | clientAuthenticatorType: "client-secret" 51 | secret: "{{ 'my-app'|sha1() }}" 52 | enabled: true 53 | redirectUris: 54 | - https://www.getpostman.com/oauth2/callback 55 | -------------------------------------------------------------------------------- /roles/sso/tasks/create-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create the Demo Client 4 | uri: 5 | url: https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}/clients 6 | validate_certs: no 7 | headers: 8 | Authorization: 'Bearer {{ access_token }}' 9 | method: POST 10 | body: "{{ sso_demo_client }}" 11 | body_format: json 12 | status_code: "201,409" 13 | register: response 14 | changed_when: response.status == 201 15 | -------------------------------------------------------------------------------- /roles/sso/tasks/create-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Create the Demo User 4 | uri: 5 | url: https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}/users 6 | validate_certs: no 7 | headers: 8 | Authorization: 'Bearer {{ access_token }}' 9 | method: POST 10 | body: "{{ sso_demo_user }}" 11 | body_format: json 12 | status_code: "201,409" 13 | register: response 14 | changed_when: response.status == 201 15 | 16 | - set_fact: 17 | user_has_been_created: true 18 | user_url: "{{ response.location }}" 19 | when: response.status == 201 20 | 21 | - name: Retrieve the id of the Demo User 22 | uri: 23 | url: 'https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}/users?username={{ sso_demo_user.username|urlencode }}' 24 | validate_certs: no 25 | headers: 26 | Authorization: 'Bearer {{ access_token }}' 27 | register: response 28 | changed_when: false 29 | failed_when: response.status != 200 or (response.json|length != 1) 30 | when: user_has_been_created is not defined 31 | 32 | - set_fact: 33 | user_url: "https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}/users/{{ response.json[0].id }}" 34 | when: user_has_been_created is not defined 35 | 36 | - name: Set the password of the Demo User 37 | uri: 38 | url: "{{ user_url }}/reset-password" 39 | validate_certs: no 40 | headers: 41 | Authorization: 'Bearer {{ access_token }}' 42 | method: PUT 43 | body: "{{ sso_demo_user.credentials[0] }}" 44 | body_format: json 45 | status_code: 204 46 | -------------------------------------------------------------------------------- /roles/sso/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Compute the default route name if not provided 4 | set_fact: 5 | sso_route_name: '{{ "secure-" ~ sso_application_name ~ "-" ~ sso_project ~ "." ~ openshift_master_default_subdomain }}' 6 | when: sso_route_name is not defined 7 | tags: vars 8 | 9 | - name: Get the exiting service account password 10 | command: oc get dc {{ sso_application_name }} -n "{{ sso_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="SSO_SERVICE_PASSWORD")].value}' 11 | register: password 12 | changed_when: false 13 | failed_when: false 14 | tags: vars 15 | 16 | - name: Re-use the exiting service account password 17 | set_fact: 18 | sso_service_password: "{{ password.stdout_lines[0] }}" 19 | when: 'password.stdout != ""' 20 | tags: vars 21 | 22 | - name: Generate a new service account password 23 | set_fact: 24 | sso_service_password: "{{ lookup('password', '/dev/null length=8') }}" 25 | when: 'sso_service_password is not defined' 26 | tags: vars 27 | 28 | - name: Install java-1.8.0-openjdk-headless (required to use 'keytool') 29 | yum: name=java-1.8.0-openjdk-headless state=installed 30 | 31 | - name: Create a temporary directory 32 | tempfile: 33 | state: directory 34 | register: tempfile 35 | 36 | - name: Use a local copy of the jboss-openshift/application-templates GIT repo 37 | set_fact: 38 | jboss_openshift_application_templates_git_repo: 'http://{{ offline_git_route }}/{{ jboss_openshift_application_templates_git_repo|basename|regex_replace(''[.]git$'', '''') }}' 39 | when: 'is_offline|default(false)|bool' 40 | 41 | - name: Check out the jboss-openshift/application-templates GIT repo 42 | git: 43 | repo: '{{ jboss_openshift_application_templates_git_repo }}' 44 | dest: '{{ tempfile.path }}' 45 | version: '{{ jboss_openshift_application_templates_tag|default(''master'') }}' 46 | 47 | - name: Create the Red Hat SSO templates (globally) 48 | command: oc create -n openshift -f '{{ tempfile.path }}/sso/{{ item }}.json' 49 | with_items: "{{ sso_available_templates }}" 50 | register: oc 51 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 52 | changed_when: oc.rc == 0 53 | 54 | - name: Import the Red Hat SSO ImageStream (globally) 55 | command: oc import-image -n openshift redhat-sso71-openshift 56 | when: 'not is_offline|default(false)|bool' 57 | 58 | - name: Get a list of existing projects 59 | command: oc get projects -o name 60 | register: oc_get_projects 61 | changed_when: false 62 | 63 | - name: Create a new project for SSO 64 | command: oc new-project "{{ sso_project }}" 65 | when: '"projects/" ~ sso_project not in oc_get_projects.stdout_lines' 66 | 67 | - name: Query existing deploymentconfigs 68 | command: oc get dc -n "{{ sso_project }}" -o name -l "application={{ sso_application_name }}" 69 | register: oc_get_dc 70 | changed_when: false 71 | 72 | - name: Deploy app if needed 73 | set_fact: 74 | deploy_needed: "{{ 'deploymentconfigs/' ~ sso_application_name not in oc_get_dc.stdout_lines }}" 75 | 76 | - name: Create a service account for SSO 77 | command: oc create serviceaccount sso-service-account -n "{{ sso_project }}" 78 | register: oc 79 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 80 | changed_when: oc.rc == 0 81 | 82 | - name: Grant the "view" right to the SSO Service Account 83 | command: oc policy add-role-to-user view -z sso-service-account -n "{{ sso_project }}" 84 | 85 | - name: Generate a keypair for HTTPS 86 | command: creates=keystore.jks keytool -genkey -alias ssl -keypass "{{ sso_keystore_password }}" -storepass "{{ sso_keystore_password }}" -keyalg RSA -keystore keystore.jks -validity 10950 -storetype JKS -dname "CN={{ sso_route_name }}" 87 | 88 | - name: Generate a keypair for Jgroups 89 | command: creates=jgroups.jceks keytool -genseckey -alias jgroups -keypass "{{ sso_keystore_password }}" -storepass "{{ sso_keystore_password }}" -keyalg Blowfish -keysize 56 -keystore jgroups.jceks -storetype JCEKS 90 | 91 | - name: Create a secret combining both keypairs 92 | command: oc secret new sso-app-secret jgroups.jceks keystore.jks -n "{{ sso_project }}" 93 | register: oc 94 | failed_when: oc.rc > 0 and 'Error from server (AlreadyExists):' not in oc.stderr 95 | changed_when: oc.rc == 0 96 | 97 | - name: Link the new Secret to the SSO Service Account 98 | command: oc secrets link sso-service-account sso-app-secret -n "{{ sso_project }}" 99 | 100 | - name: Process the OpenShift Template and create the OpenShift objects 101 | command: oc new-app -n {{ sso_project }} {{ sso_template }} -p "HTTPS_PASSWORD={{ sso_keystore_password }}" -p "JGROUPS_ENCRYPT_PASSWORD={{ sso_keystore_password }}" -p "SSO_REALM={{ sso_realm }}" -p "SSO_ADMIN_USERNAME={{ sso_admin_username }}" -p "APPLICATION_NAME={{ sso_application_name }}" -p "SSO_SERVICE_PASSWORD={{ sso_service_password }}" -p "SSO_SERVICE_USERNAME={{ sso_service_username }}" 102 | when: deploy_needed 103 | 104 | - name: Wait for OpenShift to create all objects 105 | pause: 106 | seconds: '{{ sso_delay }}' 107 | when: deploy_needed 108 | 109 | # Update the secure route to use "Re-encrypt" instead of "Passthrough" 110 | - include: update-route.yml 111 | tags: update-route 112 | 113 | - include: common/wait_for.yml 114 | static: no 115 | vars: 116 | pod_to_wait: 117 | - sso 118 | delay: "{{ sso_delay }}" 119 | retries: "{{ sso_retries }}" 120 | project: "{{ sso_project }}" 121 | tags: status 122 | 123 | - name: Get Admin Username 124 | command: oc get dc {{ sso_application_name }} -n "{{ sso_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="SSO_ADMIN_USERNAME")].value}' 125 | register: username 126 | changed_when: false 127 | tags: status 128 | 129 | - name: Get Admin Password 130 | command: oc get dc {{ sso_application_name }} -n "{{ sso_project }}" -o 'jsonpath={.spec.template.spec.containers[0].env[?(@.name=="SSO_ADMIN_PASSWORD")].value}' 131 | register: password 132 | changed_when: false 133 | tags: status 134 | 135 | - name: Get Route URL 136 | command: oc get route secure-{{ sso_application_name }} -n "{{ sso_project }}" -o 'jsonpath={.spec.host}' 137 | register: route 138 | changed_when: false 139 | tags: status 140 | 141 | - set_fact: 142 | sso_route_name: '{{ route.stdout }}' 143 | 144 | - name: SSO is ready ! 145 | debug: msg="Login on https://{{ sso_route_name }}/auth/admin with username = '{{ username.stdout }}' and password = '{{ password.stdout }}'" 146 | tags: status 147 | 148 | - include: post-install.yml 149 | tags: post-install 150 | -------------------------------------------------------------------------------- /roles/sso/tasks/post-install.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Prepare the OAuth Request to RH-SSO (static params) 3 | set_fact: 4 | oauth_payload: "grant_type=password" 5 | 6 | - name: Prepare the OAuth Request to RH-SSO (urlencode dynamic params) 7 | set_fact: 8 | oauth_payload: '{{ oauth_payload ~ "&" ~ item.key ~ "=" ~ (item.value|urlencode) }}' 9 | with_dict: 10 | client_id: '{{ sso_default_client_id }}' 11 | username: '{{ sso_service_username }}' 12 | password: '{{ sso_service_password }}' 13 | 14 | - name: Authenticate to RH-SSO using the service account 15 | uri: 16 | url: 'https://{{ sso_route_name }}/auth/realms/{{ sso_realm }}/protocol/openid-connect/token' 17 | body: '{{ oauth_payload }}' 18 | method: POST 19 | validate_certs: no 20 | return_content: yes 21 | register: response 22 | changed_when: false 23 | 24 | - name: Extract the access_token 25 | set_fact: 26 | access_token: '{{ response.json |json_query("access_token") }}' 27 | 28 | - debug: msg="access_token = {{ access_token }}" 29 | 30 | - name: Create an Initial Access Token in RH-SSO 31 | uri: 32 | url: 'https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}/clients-initial-access' 33 | validate_certs: no 34 | method: POST 35 | body: '{{ sso_initial_access_token_request }}' 36 | body_format: json 37 | headers: 38 | Authorization: 'Bearer {{ access_token }}' 39 | register: response 40 | 41 | - name: Extract the Initial Access Token from the RH-SSO response 42 | set_fact: 43 | initial_access_token: '{{ response.json |json_query("token") }}' 44 | 45 | - debug: msg="initial_access_token = {{ initial_access_token }}" 46 | 47 | - name: Get the current Realm configuration 48 | uri: 49 | url: 'https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}' 50 | validate_certs: no 51 | headers: 52 | Authorization: 'Bearer {{ access_token }}' 53 | register: response 54 | 55 | - name: Change the Realm configuration to extend the token lifetimes (see variable sso_default_realm_settings) 56 | set_fact: 57 | realm_config: '{{ response.json |combine(sso_default_realm_settings) }}' 58 | 59 | - name: Update the Realm configuration 60 | uri: 61 | url: 'https://{{ sso_route_name }}/auth/admin/realms/{{ sso_realm }}' 62 | validate_certs: no 63 | headers: 64 | Authorization: 'Bearer {{ access_token }}' 65 | method: PUT 66 | body: "{{ realm_config }}" 67 | body_format: json 68 | status_code: 204 69 | 70 | - include: "create-user.yml" 71 | with_items: '{{ sso_users_to_create }}' 72 | loop_control: 73 | loop_var: sso_demo_user 74 | 75 | - include: "create-client.yml" 76 | with_items: '{{ sso_clients_to_create }}' 77 | loop_control: 78 | loop_var: sso_demo_client 79 | -------------------------------------------------------------------------------- /roles/sso/tasks/update-route.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # 3 | # By default, the RH-SSO template creates a secure route that uses "Passthrough". 4 | # 5 | # This my be useful if you plan to have client certificate authentication but 6 | # it has strong requirements on the client side because of the SNI extensions 7 | # (Server Name Indication) that are used by the underlying OpenShift Router to 8 | # direct the TLS flow to the right service. 9 | # 10 | # For a test lab / PoC, it is better to switch to "Re-encrypt" that has less 11 | # requirements to get it working. 12 | # 13 | 14 | - name: Extract the CA Cert from the keystore.jks 15 | command: creates=cacert.pem keytool -exportcert -alias ssl -keypass "{{ sso_keystore_password }}" -storepass "{{ sso_keystore_password }}" -keystore keystore.jks -file cacert.pem -rfc 16 | 17 | - name: Convert the CA Cert to a JSON String to be used in a JSON Patch 18 | command: 'perl -pe ''chomp; s/\r//g; print "\\n"'' cacert.pem' 19 | register: cacert 20 | 21 | - name: Update the secure route to use "reencrypt" instead of "passthrough" 22 | command: 'oc patch route secure-{{ sso_application_name }} -n {{ sso_project }} --type=json -p ''[ { "op": "replace", "path": "/spec/tls/termination", "value": "reencrypt" }, { "op": "replace", "path": "/spec/tls/destinationCACertificate", "value": "{{ cacert.stdout }}" } ]'' ' 23 | --------------------------------------------------------------------------------