├── roles ├── openshift-pre-reqs │ ├── files │ │ └── docker-storage-setup │ └── tasks │ │ └── main.yml ├── update-virtual-machines │ └── tasks │ │ └── main.yml ├── gluster-node-prereqs │ └── tasks │ │ └── main.yml ├── teardown-virtual-machines │ └── tasks │ │ ├── unregister-vm.yml │ │ └── main.yml ├── add-users │ └── tasks │ │ ├── add-user.yml │ │ └── main.yml ├── gluster-master-prereqs │ └── tasks │ │ └── main.yml ├── disable-service-catalog │ ├── files │ │ └── show-templates.sh │ └── tasks │ │ └── main.yml ├── grafana-install │ └── tasks │ │ └── main.yml ├── teardown-dns │ └── tasks │ │ ├── delete-route.yml │ │ └── main.yml ├── teardown-security-groups │ └── tasks │ │ └── main.yml ├── update-ssl-cockpit │ └── tasks │ │ └── main.yml ├── openshift-install │ ├── files │ │ ├── ansible.cfg │ │ ├── install-openshift.sh │ │ └── openshift_inventory.cfg │ └── tasks │ │ └── main.yml ├── define-derived-vars │ └── tasks │ │ └── main.yml ├── update-ssl-router │ └── tasks │ │ └── main.yml ├── setup-vpc │ └── tasks │ │ └── main.yml ├── teardown-vpc │ └── tasks │ │ └── main.yml ├── setup-host-groups │ └── tasks │ │ └── main.yml ├── register-virtual-machines │ └── tasks │ │ └── main.yml ├── setup-dns │ └── tasks │ │ └── main.yml ├── generate-ssl-certs │ └── tasks │ │ └── main.yml ├── setup-ssh │ └── tasks │ │ └── main.yml ├── setup-security-groups │ └── tasks │ │ └── main.yml └── setup-virtual-machines │ └── tasks │ └── main.yml ├── inventory └── inventory.cfg ├── docs └── network-topology-openshift.jpg ├── .gitignore ├── ansible.cfg ├── openshift-playbook-run.sh ├── openshift-teardown-run.sh ├── amis ├── core │ ├── root │ │ └── usr │ │ │ └── bin │ │ │ ├── rpm-file-permissions │ │ │ ├── fix-permissions │ │ │ └── prepare-yum-repositories │ ├── test │ │ └── run │ └── packer.json ├── README.md └── base │ ├── test │ └── run │ └── packer.json ├── teardown-playbook.yml ├── README.md ├── CONTRIBUTING.md ├── openshift-playbook.yml └── vars └── aws-config.yml /roles/openshift-pre-reqs/files/docker-storage-setup: -------------------------------------------------------------------------------- 1 | DEVS=/dev/xvdb 2 | VG=docker-vg -------------------------------------------------------------------------------- /inventory/inventory.cfg: -------------------------------------------------------------------------------- 1 | [local] 2 | localhost ansible_connection=local ansible_become=no ansible_python_interpreter=python 3 | -------------------------------------------------------------------------------- /docs/network-topology-openshift.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/gnunn1/openshift-aws-setup/HEAD/docs/network-topology-openshift.jpg -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # IDE 2 | *.iml 3 | .idea 4 | 5 | # Packer 6 | build.log 7 | vars.json 8 | 9 | # Compiled classes 10 | /target 11 | 12 | # pass 13 | vault_pass 14 | aws-credentials 15 | *.retry 16 | private* 17 | -------------------------------------------------------------------------------- /roles/update-virtual-machines/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: upgrade packages on all nodes 3 | yum: name=* state=latest 4 | register: result 5 | until: result | succeeded 6 | retries: 3 7 | delay: 5 8 | become: true -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [ssh_connection] 2 | pipelining = True 3 | control_path = /tmp/ansible-ssh-%%h-%%p-%%r 4 | ssh_args = -o ControlMaster=auto -o ControlPersist=1800s 5 | 6 | [defaults] 7 | forks = 19 8 | host_key_checking = False 9 | callback_whitelist = profile_tasks 10 | -------------------------------------------------------------------------------- /roles/gluster-node-prereqs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add gluster repo to node 3 | shell: 'subscription-manager repos --enable="rh-gluster-3-client-for-rhel-7-server-rpms"' 4 | register: result 5 | until: result is succeeded 6 | retries: 10 7 | delay: 5 8 | become: true 9 | -------------------------------------------------------------------------------- /roles/teardown-virtual-machines/tasks/unregister-vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - debug: 3 | msg: "Unsubscribing IP {{public_ip}}" 4 | 5 | - name: Unsubscribe VM 6 | redhat_subscription: 7 | state: absent 8 | delegate_to: "{{public_ip}}" 9 | remote_user: "{{amazon_user}}" 10 | become: true -------------------------------------------------------------------------------- /openshift-playbook-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | : ${AWS_ACCESS_KEY_ID?"Need to set AWS_ACCESS_KEY_ID"} 4 | : ${AWS_SECRET_ACCESS_KEY?"Need to set AWS_SECRET_ACCESS_KEY"} 5 | 6 | export ANSIBLE_HOST_KEY_CHECKING=False 7 | 8 | time ansible-playbook openshift-playbook.yml -i inventory/inventory.cfg "$@" -------------------------------------------------------------------------------- /openshift-teardown-run.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | : ${AWS_ACCESS_KEY_ID?"Need to set AWS_ACCESS_KEY_ID"} 4 | : ${AWS_SECRET_ACCESS_KEY?"Need to set AWS_SECRET_ACCESS_KEY"} 5 | 6 | export ANSIBLE_HOST_KEY_CHECKING=False 7 | 8 | time ansible-playbook teardown-playbook.yml -i inventory/inventory.cfg "$@" 9 | -------------------------------------------------------------------------------- /roles/add-users/tasks/add-user.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add user to htpasswd 3 | command: "htpasswd -b {{htpasswd_path}} {{user.name}} {{user.password}}" 4 | become: true 5 | 6 | - name: Cluster admin rights 7 | command: "oadm policy add-cluster-role-to-user cluster-admin {{user.name}}" 8 | when: user.admin 9 | become: true -------------------------------------------------------------------------------- /roles/gluster-master-prereqs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add gluster repo to master 3 | shell: 'subscription-manager repos --enable="rh-gluster-3-for-rhel-7-server-rpms"' 4 | register: result 5 | until: result is succeeded 6 | retries: 10 7 | delay: 5 8 | become: true 9 | 10 | - name: Install packages 11 | yum: 12 | name: 13 | - "heketi-client" 14 | - "cns-deploy" 15 | become: true -------------------------------------------------------------------------------- /roles/disable-service-catalog/files/show-templates.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | for x in $(oc get templates -n openshift --template='{{range .items}}{{.metadata.name}}{{"\n"}}{{end}}'); do 3 | tags=$(oc export template $x -n openshift | grep hidden) 4 | if [[ ${tags} =~ "hidden" ]]; then 5 | tags=`echo $tags | sed -e "s/,hidden//" -e "s/tags: //"` 6 | oc annotate template $x tags=${tags} --overwrite -n openshift 7 | fi 8 | done 9 | -------------------------------------------------------------------------------- /roles/grafana-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install grafana 3 | command: ansible-playbook ~{{amazon_user}}/openshift_inventory.cfg /usr/share/ansible/openshift-ansible/playbooks/openshift-grafana/config.yml 4 | when: deployment_type="openshift-enterprise" 5 | 6 | - name: Install grafana 7 | command: ansible-playbook ~{{amazon_user}}/openshift_inventory.cfg /home/{{amazon_user}}/openshift-ansible/playbooks/openshift-grafana/config.yml 8 | when: deployment_type="origin" -------------------------------------------------------------------------------- /amis/core/root/usr/bin/rpm-file-permissions: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | CHECK_DIRS="/ /opt /etc /usr /usr/bin /usr/lib /usr/lib64 /usr/share /usr/libexec" 4 | 5 | rpm_format="[%{FILESTATES:fstate} %7{FILEMODES:octal} %{FILENAMES:shescape}\n]" 6 | 7 | rpm -q --qf "[%{FILESTATES:fstate} %7{FILEMODES:octal} %{FILENAMES:shescape}\n]" filesystem | while read line 8 | do 9 | eval "set -- $line" 10 | 11 | case $1 in 12 | normal) ;; 13 | *) continue ;; 14 | esac 15 | 16 | case " $CHECK_DIRS " in 17 | *" $3 "*) 18 | chmod "$(echo $2 | sed 's/.*\(...\)/\1/')" "$3" 19 | ;; 20 | esac 21 | done 22 | -------------------------------------------------------------------------------- /roles/teardown-dns/tasks/delete-route.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Get Route Info 3 | route53: 4 | command: get 5 | zone: "{{ zone }}" 6 | record: "{{ record }}" 7 | type: A 8 | private_zone: "{{private_zone}}" 9 | register: rec 10 | ignore_errors: yes 11 | 12 | - name: Delete Route record 13 | route53: 14 | command: delete 15 | zone: "{{ zone }}" 16 | record: "{{ rec.set.record }}" 17 | ttl: "{{ rec.set.ttl }}" 18 | type: "{{ rec.set.type }}" 19 | value: "{{ rec.set.value }}" 20 | private_zone: "{{private_zone}}" 21 | when: not((rec.set.record is undefined) or (rec.set.record is none)) 22 | ignore_errors: yes -------------------------------------------------------------------------------- /teardown-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: Teardown Environment 4 | 5 | hosts: local 6 | 7 | vars_files: 8 | - vars/aws-config.yml 9 | 10 | pre_tasks: 11 | - name: Verify Ansible Version 12 | assert: 13 | that: 14 | - "ansible_version.major == 2" 15 | - "ansible_version.minor >= 4" 16 | msg: "This script is only supported with the 2.4 version or later of Ansible" 17 | 18 | post_tasks: 19 | - name: Completed 20 | debug: 21 | msg: 'Finished tearing down environment {{namespace}}' 22 | 23 | roles: 24 | - define-derived-vars 25 | - teardown-virtual-machines 26 | - teardown-security-groups 27 | - teardown-vpc 28 | - teardown-dns -------------------------------------------------------------------------------- /roles/teardown-security-groups/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Delete openshift-vpc security group 3 | ec2_group: 4 | name: "{{namespace}}-vpc" 5 | region: "{{region}}" 6 | state: "absent" 7 | 8 | - name: Delete openshift-public-ingress security group 9 | ec2_group: 10 | name: "{{namespace}}-public-ingress" 11 | region: "{{region}}" 12 | state: "absent" 13 | 14 | - name: Delete openshift-public-egress security group 15 | ec2_group: 16 | name: "{{namespace}}-public-egress" 17 | region: "{{region}}" 18 | state: "absent" 19 | 20 | - name: Delete openshift-ssh security group 21 | ec2_group: 22 | name: "{{namespace}}-ssh" 23 | region: "{{region}}" 24 | state: "absent" -------------------------------------------------------------------------------- /roles/update-ssl-cockpit/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create temporary file 3 | tempfile: 4 | state: file 5 | suffix: cert 6 | register: cockpit_cert 7 | delegate_to: localhost 8 | 9 | - name: append cockpit certs 10 | shell: "cat {{item}} >> {{cockpit_cert.path}}" 11 | with_items: 12 | - "{{master_ssl_cert_file}}" 13 | - "{{master_ssl_key_file}}" 14 | delegate_to: localhost 15 | 16 | - name: copy master cert to cockpit 17 | copy: 18 | src: "{{cockpit_cert.path}}" 19 | dest: "/etc/cockpit/ws-certs.d/1-my-cert.cert" 20 | become: true 21 | 22 | - name: restart cockpit 23 | systemd: 24 | name: cockpit 25 | enabled: yes 26 | state: restarted 27 | become: true -------------------------------------------------------------------------------- /roles/openshift-install/files/ansible.cfg: -------------------------------------------------------------------------------- 1 | # cat /etc/ansible/ansible.cfg 2 | # config file for ansible -- http://ansible.com/ 3 | # ============================================== 4 | [defaults] 5 | forks = 20 6 | host_key_checking = False 7 | #remote_user = ec2-user 8 | #roles_path = roles/ 9 | gathering = smart 10 | fact_caching = jsonfile 11 | fact_caching_connection = $HOME/ansible/facts 12 | #fact_caching_timeout = 600 13 | #log_path = $HOME/ansible.log 14 | #nocows = 1 15 | callback_whitelist = profile_tasks 16 | 17 | #[privilege_escalation] 18 | #become = False 19 | 20 | [ssh_connection] 21 | ssh_args = -o ControlMaster=auto -o ControlPersist=600s 22 | control_path = %(directory)s/%%h-%%r 23 | pipelining = True 24 | #timeout = 10 -------------------------------------------------------------------------------- /roles/openshift-install/files/install-openshift.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | export ANSIBLE_HOST_KEY_CHECKING=False 3 | 4 | export OA_PREFIX=/usr/share/ansible 5 | 6 | if [ $1 = "origin" ]; then 7 | OA_PREFIX=/home/{{amazon_user}} 8 | fi 9 | 10 | {% if ocp_version|version_compare('3.7', '<=') %} 11 | ansible-playbook -v -i ~{{amazon_user}}/openshift_inventory.cfg ${OA_PREFIX}/openshift-ansible/playbooks/byo/config.yml 12 | {% else %} 13 | ansible-playbook -v -i ~{{amazon_user}}/openshift_inventory.cfg ${OA_PREFIX}/openshift-ansible/playbooks/prerequisites.yml 14 | ansible-playbook -v -i ~{{amazon_user}}/openshift_inventory.cfg ${OA_PREFIX}/openshift-ansible/playbooks/deploy_cluster.yml 15 | {% if install_cloudforms %} 16 | ansible-playbook -v -i ~{{amazon_user}}/openshift_inventory.cfg ${OA_PREFIX}/openshift-ansible/playbooks/openshift-management/add_container_provider.yml 17 | {% endif %} 18 | {% endif %} 19 | -------------------------------------------------------------------------------- /amis/core/root/usr/bin/fix-permissions: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Allow this script to fail without failing a build 4 | set +e 5 | 6 | SYMLINK_OPT=${2:--L} 7 | 8 | # Fix permissions on the given directory or file to allow group read/write of 9 | # regular files and execute of directories. 10 | 11 | [ $(id -u) -ne 0 ] && CHECK_OWNER=" -uid $(id -u)" 12 | 13 | # If argument does not exist, script will still exit with 0, 14 | # but at least we'll see something went wrong in the log 15 | if ! [ -e "$1" ] ; then 16 | echo "ERROR: File or directory $1 does not exist." >&2 17 | # We still want to end successfully 18 | exit 0 19 | fi 20 | 21 | find $SYMLINK_OPT "$1" ${CHECK_OWNER} \! -gid 0 -exec chgrp 0 {} + 22 | find $SYMLINK_OPT "$1" ${CHECK_OWNER} \! -perm -g+rw -exec chmod g+rw {} + 23 | find $SYMLINK_OPT "$1" ${CHECK_OWNER} -perm /u+x -a \! -perm /g+x -exec chmod g+x {} + 24 | find $SYMLINK_OPT "$1" ${CHECK_OWNER} -type d \! -perm /g+x -exec chmod g+x {} + 25 | 26 | # Always end successfully 27 | exit 0 28 | -------------------------------------------------------------------------------- /roles/openshift-pre-reqs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install pre-req packages on nodes 3 | yum: 4 | name: 5 | - "wget" 6 | - "git" 7 | - "net-tools" 8 | - "bind-utils" 9 | - "iptables-services" 10 | - "bridge-utils" 11 | - "bash-completion" 12 | - "kexec-tools" 13 | - "sos" 14 | - "psacct" 15 | - "docker-1.13.1" 16 | - "ntp" 17 | - "cockpit" 18 | state: "present" 19 | become: true 20 | 21 | - name: configure docker storage 22 | template: 23 | src: "../files/docker-storage-setup" 24 | dest: "/etc/sysconfig/docker-storage-setup" 25 | become: true 26 | 27 | - name: configure docker storage 28 | command: "docker-storage-setup" 29 | become: true 30 | 31 | - name: Update docker options 32 | shell: "sed -i '/OPTIONS=.*/c\\OPTIONS=\"--selinux-enabled --insecure-registry 172.30.0.0/16\"' /etc/sysconfig/docker" 33 | become: true 34 | 35 | - name: enable docker on master and nodes 36 | systemd: 37 | name: docker 38 | enabled: yes 39 | state: started 40 | become: true 41 | -------------------------------------------------------------------------------- /roles/disable-service-catalog/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: copy update script 3 | copy: 4 | src: "files/show-templates.sh" 5 | dest: "/home/{{amazon_user}}" 6 | 7 | - name: oc login 8 | shell: "oc login -u system:admin" 9 | become: true 10 | 11 | - name: Set execute attribute on script 12 | file: 13 | path: "/home/{{amazon_user}}/show-templates.sh" 14 | state: touch 15 | mode: "o+x" 16 | 17 | - name: Remove hidden attribute from templates 18 | shell: "/home/{{amazon_user}}/show-templates.sh" 19 | become: true 20 | 21 | - name: Remove console extension 22 | lineinfile: 23 | dest: /etc/origin/master/master-config.yaml 24 | state: absent 25 | regexp: "/etc/origin/master/openshift-ansible-catalog-console\\.js" 26 | become: true 27 | 28 | - name: restart master-controller 29 | systemd: 30 | name: atomic-openshift-master-controllers.service 31 | enabled: yes 32 | state: restarted 33 | become: true 34 | 35 | - name: restart master-api 36 | systemd: 37 | name: atomic-openshift-master-api.service 38 | enabled: yes 39 | state: restarted 40 | become: true -------------------------------------------------------------------------------- /roles/define-derived-vars/tasks/main.yml: -------------------------------------------------------------------------------- 1 | # Sets up common derived variables that get re-used across different plays, putting them 2 | # here allows them to be managed centrally. 3 | --- 4 | - name: Set master dns 5 | set_fact: 6 | public_master_dns: "{{prefix_master_dns}}.{{public_dns_zone}}" 7 | when: "prefix_master_dns is defined and prefix_master_dns|length>0" 8 | 9 | - name: Set master dns 10 | set_fact: 11 | public_master_dns: "{{public_dns_zone}}" 12 | when: "prefix_master_dns is not defined or prefix_master_dns|length==0" 13 | 14 | - name: Set master SSL facts for lets encrypt 15 | set_fact: 16 | master_ssl_cert_file: certs/letsencrypt/{{public_master_dns}}/fullchain.pem 17 | master_ssl_key_file: certs/letsencrypt/{{public_master_dns}}/privkey.pem 18 | when: use_lets_encrypt 19 | 20 | - name: Set wildcard SSL facts 21 | set_fact: 22 | wildcard_ssl_cert_file: certs/letsencrypt/{{public_subdomain_prefix}}.{{public_dns_zone}}/cert.pem 23 | wildcard_ssl_key_file: certs/letsencrypt/{{public_subdomain_prefix}}.{{public_dns_zone}}/privkey.pem 24 | wildcard_ssl_fullchain_file: certs/letsencrypt/{{public_subdomain_prefix}}.{{public_dns_zone}}/fullchain.pem 25 | when: use_lets_encrypt 26 | 27 | -------------------------------------------------------------------------------- /amis/core/root/usr/bin/prepare-yum-repositories: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -ex 4 | 5 | DEFAULT_REPOS=${DEFAULT_REPOS:-"rhel-7-server-rpms rhel-7-server-extras-rpms rhel-7-server-optional-rpms"} 6 | SKIP_REPOS_ENABLE=${SKIP_REPOS_ENABLE:-false} 7 | SKIP_REPOS_DISABLE=${SKIP_REPOS_DISABLE:-false} 8 | 9 | is_subscribed() { 10 | for f in /run/secrets/etc-pki-entitlement/*.pem ; do 11 | [ -e "$f" ] && return 0 12 | break 13 | done 14 | return 1 15 | } 16 | 17 | if [ "$SKIP_REPOS_DISABLE" = false ] && is_subscribed; then 18 | # Disable only repos that might come from subscribed host, because there 19 | # might be other repos provided by user or build system 20 | 21 | disable_repos= 22 | # Lines look like: "Repo-id : dist-tag-override/x86_64" 23 | while IFS=' /' read -r _ _ repo_id _; do 24 | case $repo_id in 25 | rhel-*) disable_repos+=" $repo_id" ;; 26 | \* ) echo $repo_id ;; 27 | esac 28 | done <<<"$(yum repolist -v 2>/dev/null | grep Repo-id)" 29 | 30 | if test -n "$disable_repos"; then 31 | /usr/bin/yum-config-manager --disable $disable_repos &> /dev/null 32 | fi 33 | fi 34 | 35 | if [ ${SKIP_REPOS_ENABLE} = false ] && [ -n "${DEFAULT_REPOS}" -o $# -gt 0 ] ; then 36 | /usr/bin/yum-config-manager --enable ${DEFAULT_REPOS} "$@" 37 | fi 38 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # openshift-aws-setup 2 | 3 | ## Overview 4 | 5 | This is an Ansible auotmation playbook that provisions a small OpenShift environment (1 master, x app nodes) that is suitable for demos, POCs and small workshops. The playbook can deploy either Origin or Container Platform. 6 | 7 | AWS related configuration can be customised by modifying ```vars/aws-config.yaml```. Note that the number of application nodes is configurable, the default is 3. 8 | 9 | ## Usage 10 | 11 | Please see the branch that matches the version of OpenShift you want to install for usage instructions and further details. 12 | 13 | ## Network Topology 14 | 15 | ![Network Diagram](./docs/network-topology-openshift.jpg) 16 | 17 | A private VPC and DNS is used, OpenShift is installed using the private IP addresses. This means the IP addresses never change, unlike EC2 public addresses, and the environment can be stopped and started as needed. 18 | install_node_selector 19 | A bastion is created as part of the installation, however once the installation is complete it is no longer needed and may be stopped or terminated. Note that it can be handy to keep the bastion around in a stopped state in case you want to manually re-run the installation again. 20 | 21 | ## References 22 | 23 | - https://www.codeproject.com/Articles/1168687/Get-up-and-running-with-OpenShift-on-AWS 24 | - https://docs.openshift.org/latest/welcome/index.html -------------------------------------------------------------------------------- /roles/update-ssl-router/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set cert path 3 | set_fact: 4 | cert_path: /home/{{amazon_user}}/certs/{{public_subdomain_prefix}}.{{public_dns_zone}} 5 | 6 | - name: create certs directory on master 7 | file: path={{cert_path}} state=directory 8 | 9 | - name: copy wildcard certs to master 10 | copy: 11 | src: "{{item}}" 12 | dest: "{{cert_path}}/{{item | basename}}" 13 | with_items: 14 | - "{{wildcard_ssl_cert_file}}" 15 | - "{{wildcard_ssl_key_file}}" 16 | 17 | - name: oc login 18 | shell: "oc login -u system:admin" 19 | become: true 20 | 21 | - name: Scale router down to zero 22 | shell: "oc scale dc/router --replicas=0 -n default && sleep 10" 23 | become: true 24 | ignore_errors: true 25 | 26 | - name: Remove DEFAULT_CERTIFICATE_PATH environment variable from router 27 | shell: "oc env dc/router DEFAULT_CERTIFICATE_PATH-" 28 | become: true 29 | 30 | - name: Delete router secret 31 | shell: "oc delete secret router-certs -n default" 32 | become: true 33 | 34 | - name: Update router secret 35 | shell: "oc secrets new router-certs tls.crt={{cert_path}}/{{wildcard_ssl_cert_file | basename}} tls.key={{cert_path}}/{{wildcard_ssl_key_file | basename}} --type='kubernetes.io/tls' --confirm -n default" 36 | become: true 37 | 38 | - name: Scale router back to one 39 | shell: "oc scale dc/router --replicas=1 -n default && sleep 5" 40 | become: true 41 | ignore_errors: true -------------------------------------------------------------------------------- /roles/setup-vpc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Provision VPC 3 | ec2_vpc_net: 4 | name: "{{namespace}}-vpc" 5 | cidr_block: "{{ vpc_cidr }}" 6 | tags: 7 | Name: "{{namespace}}-vpc" 8 | namespace: "{{namespace}}" 9 | region: "{{ region }}" 10 | state: "present" 11 | register: vpc_facts 12 | 13 | - name: Provision internet gateway 14 | ec2_vpc_igw: 15 | vpc_id: "{{vpc_facts['vpc']['id']}}" 16 | region: "{{ region }}" 17 | state: "present" 18 | register: igw_facts 19 | 20 | - name: Provision subnet 21 | ec2_vpc_subnet: 22 | vpc_id: "{{vpc_facts['vpc']['id']}}" 23 | cidr: "{{subnet_cidr}}" 24 | tags: 25 | Name: "{{namespace}}-subnet" 26 | namespace: "{{namespace}}" 27 | region: "{{ region }}" 28 | state: "present" 29 | register: subnet_facts 30 | 31 | - name: Set up public subnet route table 32 | ec2_vpc_route_table: 33 | vpc_id: "{{vpc_facts['vpc']['id']}}" 34 | tags: 35 | Name: "{{namespace}}-route" 36 | namespace: "{{namespace}}" 37 | subnets: 38 | - "{{subnet_facts['subnet']['id']}}" 39 | routes: 40 | - dest: 0.0.0.0/0 41 | gateway_id: "{{igw_facts['gateway_id']}}" 42 | region: "{{ region }}" 43 | state: "present" 44 | register: route_facts 45 | 46 | - name: register vpc facts 47 | set_fact: 48 | vpc_id: "{{vpc_facts['vpc']['id']}}" 49 | 50 | - name: set availability zone fact 51 | set_fact: 52 | availability_zone: "{{subnet_facts['subnet']['availability_zone']}}" -------------------------------------------------------------------------------- /roles/add-users/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add users 3 | include: add-user.yml user={{ item }} 4 | with_items: "{{ users }}" 5 | 6 | - name: Create generic users dictionary 7 | set_fact: 8 | #generic_users: '{{generic_users|combine( {"name": generic_user_prefix+item, "admin":false, "password": generic_user_password} ) }}' 9 | generic_users: "{{ generic_users|default([]) + [ {'name': generic_user_prefix + item, 'admin': false, 'password': generic_user_password} ] }}" 10 | with_sequence: count={{generic_user_count}} format="%02d" 11 | when: create_generic_user 12 | 13 | - name: Add generic users 14 | include: add-user.yml user={{ item }} 15 | with_items: "{{ generic_users }}" 16 | when: create_generic_user 17 | 18 | - name: Add password for cockpit 19 | user: 20 | name: "{{amazon_user}}" 21 | password: "{{ cockpit_password | password_hash('sha512') }}" 22 | become: true 23 | when: cockpit_password is defined and cockpit_password|length>0 24 | 25 | # - name: Add admin accounts to master for Cockpit 26 | # shell: "useradd --home /home/{{item.name}} {{item.name}} && echo {{item.name}}:{{item.password}} | chpasswd –crypt-method=SHA512" 27 | # when: item.admin 28 | # with_items: "{{ users }}" 29 | # become: true 30 | 31 | # - name: oc login as admin users for cockpit to work 32 | # shell: "oc login -u {{item.name}} -p {{item.password}} https://localhost:8443 --insecure-skip-tls-verify=true" 33 | # when: item.admin 34 | # with_items: "{{ users }}" 35 | # sudo: true 36 | # sudo_user: "{{item.name}}" -------------------------------------------------------------------------------- /roles/teardown-vpc/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Gather VPC facts 3 | ec2_vpc_net_facts: 4 | region: "{{ region }}" 5 | filters: 6 | "tag:Name": "{{namespace}}-vpc" 7 | register: vpc_facts 8 | 9 | - debug: 10 | msg: "{{vpc_facts}}" 11 | 12 | - block: 13 | - name: Gather route table facts 14 | ec2_vpc_route_table_facts: 15 | region: "{{ region }}" 16 | filters: 17 | vpc-id: "{{ vpc_facts.vpcs[0].vpc_id }}" 18 | register: vpc_route_table_facts 19 | 20 | - name: Remove route tables from VPC 21 | ec2_vpc_route_table: 22 | vpc_id: "{{ item.vpc_id }}" 23 | route_table_id: "{{item.id}}" 24 | purge_routes: true 25 | purge_subnets: true 26 | region: "{{ region }}" 27 | state: absent 28 | lookup: id 29 | with_items: "{{ vpc_route_table_facts.route_tables }}" 30 | ignore_errors: true 31 | 32 | - name: Remove subnets from VPC 33 | ec2_vpc_subnet: 34 | vpc_id: "{{ item.id }}" 35 | cidr: "{{subnet_cidr}}" 36 | region: "{{ region }}" 37 | state: absent 38 | with_items: "{{ vpc_facts.vpcs }}" 39 | 40 | - name: Remove internet gateway 41 | ec2_vpc_igw: 42 | vpc_id: "{{ item.id }}" 43 | region: "{{ region }}" 44 | state: absent 45 | with_items: "{{ vpc_facts.vpcs }}" 46 | 47 | - name: Delete VPC 48 | ec2_vpc_net: 49 | name: "{{namespace}}-vpc" 50 | cidr_block: "{{ vpc_cidr }}" 51 | region: "{{ region }}" 52 | state: absent 53 | with_items: "{{ vpc_facts.vpcs }}" 54 | 55 | when: vpc_facts.vpcs|length>0 56 | -------------------------------------------------------------------------------- /amis/README.md: -------------------------------------------------------------------------------- 1 | # AMI Base Images 2 | 3 | ## Setup 4 | 5 | Install the [packer](https://www.packer.io) v1.2.3 release. 6 | 7 | ## Usage 8 | 9 | 10 | First build core, 11 | 12 | ```sh 13 | cd core 14 | packer build -var-file=vars.json -color=false packer.json | tee build.log 15 | ``` 16 | 17 | then build base using `AMI ID` of core. 18 | 19 | ```sh 20 | cd base 21 | packer build -var-file=vars.json -color=false packer.json | tee build.log 22 | ``` 23 | 24 | Forgot the ID? Try this: 25 | 26 | ```sh 27 | cd core 28 | egrep "${AWS_REGION}\:\sami\-" build.log | cut -d' ' -f2 > ami_id.txt 29 | ``` 30 | 31 | ### Example `vars.json` files 32 | 33 | ```json 34 | { 35 | "aws_access_key": "myaccesskey", 36 | "aws_secret_key": "mysecretkey", 37 | "aws_region": "us-east-1", 38 | "rhsm_username": "", 39 | "rhsm_password": "", 40 | "rhsm_org_id": "myrhsmorgid", 41 | "rhsm_key_id": "myrhsmkeyid", 42 | "rhsm_pool_id": "", 43 | "ami_id": "ami-c998b6b2" 44 | } 45 | ``` 46 | 47 | ```json 48 | { 49 | "aws_access_key": "myaccesskey", 50 | "aws_secret_key": "mysecretkey", 51 | "aws_region": "us-east-1", 52 | "rhsm_username": "myrhsmaccount", 53 | "rhsm_password": "myrhsmpassword", 54 | "rhsm_org_id": "", 55 | "rhsm_key_id": "", 56 | "rhsm_pool_id": "", 57 | "ami_id": "ami-c998b6b2" 58 | } 59 | ``` 60 | 61 | ```json 62 | { 63 | "aws_access_key": "myaccesskey", 64 | "aws_secret_key": "mysecretkey", 65 | "aws_region": "us-east-1", 66 | "rhsm_username": "myrhsmaccount", 67 | "rhsm_password": "myrhsmpassword", 68 | "rhsm_org_id": "", 69 | "rhsm_key_id": "", 70 | "rhsm_pool_id": "myrhsmpoolid", 71 | "ami_id": "ami-c998b6b2" 72 | } 73 | ``` 74 | -------------------------------------------------------------------------------- /roles/teardown-dns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: List all hosted zones 3 | route53_facts: 4 | query: hosted_zone 5 | register: hosted_zones 6 | 7 | - name: Delete master and bastion routes 8 | include: delete-route.yml zone={{ dns_zone }} record="{{item}}.{{ dns_zone }}" private_zone="true" 9 | with_items: 10 | - "bastion" 11 | - "master" 12 | 13 | - name: Delete node routes 14 | include: delete-route.yml zone={{ dns_zone }} record="node{{item}}.{{ dns_zone }}" private_zone="true" 15 | with_sequence: start=1 end={{app_nodes|int}} 16 | ignore_errors: true 17 | 18 | # - name: Delete private DNS zone for openshift 19 | # route53_zone: 20 | # zone: "{{dns_zone}}" 21 | # state: "absent" 22 | # vpc_region: "{{region}}" 23 | # ignore_errors: yes 24 | # register: delete_output 25 | 26 | - name: Get private DNS zone for openshift 27 | shell: aws route53 list-hosted-zones --query 'HostedZones[*]' --output text | grep '\/hostedzone\/.*{{ dns_zone }}' | sed -e 's/.*\///' -e 's/[^a-zA-Z0-9].*//' 28 | register: hosted_zone_id 29 | 30 | - debug: var=hosted_zone_id.stdout_lines[0] 31 | 32 | - name: Delete private DNS zone for openshift 33 | shell: aws route53 delete-hosted-zone --id '{{ hosted_zone_id.stdout_lines[0] }}' 34 | when: hosted_zone_id.stdout_lines[0] is defined 35 | 36 | - name: Delete Public Route 37 | include: delete-route.yml zone={{ public_dns_zone }} record={{ public_master_dns }} private_zone="false" 38 | 39 | - name: Delete Public Wildcard Route 40 | include: delete-route.yml zone={{ public_dns_zone }} record="*.{{public_subdomain_prefix}}.{{ public_dns_zone }}" private_zone="false" 41 | -------------------------------------------------------------------------------- /amis/core/test/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEMPLATE_NAME=${TEMPLATE_NAME-template.json} 4 | SCRIPT_PATH="${BASH_SOURCE[0]}" 5 | SCRIPT_DIRS="$( cd "`dirname "${SCRIPT_PATH}"`" && pwd )" 6 | SCRIPT_FILE="${SCRIPT_PATH##*\/}" 7 | SCRIPT_NAME="${SCRIPT_FILE%.*}" 8 | SCRIPT_TYPE="${SCRIPT_FILE##*\.}" 9 | SCRIPT_RELATIVE_PATH="$( cd "`dirname "${SCRIPT_PATH}"`" && pwd )/${SCRIPT_FILE}" 10 | 11 | pushd "${SCRIPT_RELATIVE_PATH}" &>/dev/null 12 | 13 | if [ -h "$( pwd )" ]; then 14 | SCRIPT_ABSOLUTE_PATH="$( readlink ${SCRIPT_RELATIVE_PATH} )" 15 | else 16 | SCRIPT_ABSOLUTE_PATH="${SCRIPT_RELATIVE_PATH}" 17 | fi 18 | 19 | popd &>/dev/null 20 | 21 | test_packer_validate_usage() { 22 | printf "Testing 'packer validate' usage...\n" 23 | packer validate "$(dirname $SCRIPT_ABSOLUTE_PATH)/../${TEMPLATE_NAME}" 24 | } 25 | 26 | check_result() { 27 | local result="$1" 28 | if [[ "$result" != "0" ]]; then 29 | printf "Validate '%s' test FAILED (exit code: %s)\n" "${TEMPLATE_NAME}" "${result}" 30 | return $result 31 | fi 32 | } 33 | 34 | test_packer_validate_usage 35 | 36 | if ! check_result $?; then 37 | printf "** Source File PATHs **\n" "$(basename "`test -L ${BASH_SOURCE[0]} && readlink ${BASH_SOURCE[0]} || echo ${BASH_SOURCE[0]}`")" 38 | printf "SCRIPT_PATH \xE2\x9E\xA1 %s\n" "${SCRIPT_PATH}" 39 | printf "SCRIPT_DIRS \xE2\x9E\xA1 %s\n" "${SCRIPT_DIRS}" 40 | printf "SCRIPT_FILE \xE2\x9E\xA1 %s\n" "${SCRIPT_FILE}" 41 | printf "SCRIPT_NAME \xE2\x9E\xA1 %s\n" "${SCRIPT_NAME}" 42 | printf "SCRIPT_TYPE \xE2\x9E\xA1 %s\n" "${SCRIPT_TYPE}" 43 | printf "SCRIPT_RELATIVE_PATH \xE2\x9E\xA1 %s\n" "${SCRIPT_RELATIVE_PATH}" 44 | printf "SCRIPT_ABSOLUTE_PATH \xE2\x9E\xA1 %s\n" "${SCRIPT_ABSOLUTE_PATH}" 45 | fi 46 | -------------------------------------------------------------------------------- /amis/base/test/run: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TEMPLATE_NAME=${TEMPLATE_NAME-template.json} 4 | SCRIPT_PATH="${BASH_SOURCE[0]}" 5 | SCRIPT_DIRS="$( cd "`dirname "${SCRIPT_PATH}"`" && pwd )" 6 | SCRIPT_FILE="${SCRIPT_PATH##*\/}" 7 | SCRIPT_NAME="${SCRIPT_FILE%.*}" 8 | SCRIPT_TYPE="${SCRIPT_FILE##*\.}" 9 | SCRIPT_RELATIVE_PATH="$( cd "`dirname "${SCRIPT_PATH}"`" && pwd )/${SCRIPT_FILE}" 10 | 11 | pushd "${SCRIPT_RELATIVE_PATH}" &>/dev/null 12 | 13 | if [ -h "$( pwd )" ]; then 14 | SCRIPT_ABSOLUTE_PATH="$( readlink ${SCRIPT_RELATIVE_PATH} )" 15 | else 16 | SCRIPT_ABSOLUTE_PATH="${SCRIPT_RELATIVE_PATH}" 17 | fi 18 | 19 | popd &>/dev/null 20 | 21 | test_packer_validate_usage() { 22 | printf "Testing 'packer validate' usage...\n" 23 | packer validate "$(dirname $SCRIPT_ABSOLUTE_PATH)/../${TEMPLATE_NAME}" 24 | } 25 | 26 | check_result() { 27 | local result="$1" 28 | if [[ "$result" != "0" ]]; then 29 | printf "Validate template '%s' test FAILED (exit code: %s)\n" "${TEMPLATE_NAME}" "${result}" 30 | return $result 31 | fi 32 | } 33 | 34 | test_packer_validate_usage 35 | 36 | if ! check_result $?; then 37 | printf "** Source File '%s' PATHs **\n" "$(basename "`test -L ${BASH_SOURCE[0]} && readlink ${BASH_SOURCE[0]} || echo ${BASH_SOURCE[0]}`")" 38 | printf "SCRIPT_PATH \xE2\x9E\xA1 %s\n" "${SCRIPT_PATH}" 39 | printf "SCRIPT_DIRS \xE2\x9E\xA1 %s\n" "${SCRIPT_DIRS}" 40 | printf "SCRIPT_FILE \xE2\x9E\xA1 %s\n" "${SCRIPT_FILE}" 41 | printf "SCRIPT_NAME \xE2\x9E\xA1 %s\n" "${SCRIPT_NAME}" 42 | printf "SCRIPT_TYPE \xE2\x9E\xA1 %s\n" "${SCRIPT_TYPE}" 43 | printf "SCRIPT_RELATIVE_PATH \xE2\x9E\xA1 %s\n" "${SCRIPT_RELATIVE_PATH}" 44 | printf "SCRIPT_ABSOLUTE_PATH \xE2\x9E\xA1 %s\n" "${SCRIPT_ABSOLUTE_PATH}" 45 | fi 46 | -------------------------------------------------------------------------------- /roles/setup-host-groups/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add master/bastion to created_vms group 3 | add_host: 4 | name: "{{ item.ip }}" 5 | groups: created_vms 6 | instance_name: "{{ item.name }}" 7 | with_items: 8 | - { name: 'master', ip: '{{master_public_ip}}' } 9 | - { name: 'bastion', ip: '{{bastion_public_ip}}' } 10 | 11 | - name: Add nodes to created_vms group 12 | add_host: 13 | name: "{{ item.public_ip }}" 14 | groups: created_vms 15 | instance_name: "node{{ item.index }}" 16 | with_items: "{{ nodes }}" 17 | 18 | - name: Add master to created_nodes group 19 | add_host: 20 | name: "{{ item.ip }}" 21 | groups: created_nodes 22 | instance_name: "{{ item.name }}" 23 | with_items: 24 | - { name: 'master', ip: '{{master_public_ip}}' } 25 | 26 | - name: Add nodes to created_nodes group 27 | add_host: 28 | name: "{{ item.public_ip }}" 29 | groups: created_nodes 30 | instance_name: "node{{ item.index }}" 31 | with_items: "{{ nodes }}" 32 | 33 | - name: Master host 34 | add_host: 35 | name: "{{ master_public_ip}}" 36 | groups: master 37 | instance_name: master 38 | 39 | - name: Bastion host 40 | add_host: 41 | name: "{{ bastion_public_ip}}" 42 | groups: bastion 43 | instance_name: bastion 44 | availability_zone: "{{availability_zone}}" 45 | master_private_dns_name: "{{master_private_dns_name}}" 46 | public_master_dns: "{{public_master_dns}}" 47 | nodes: "{{ nodes }}" 48 | bastion_public_ip: "{{bastion_public_ip}}" 49 | 50 | - name: Gluster hosts 51 | add_host: 52 | name: "{{ item.public_ip }}" 53 | groups: gluster 54 | instance_name: "{{ item.index}}" 55 | with_items: "{{nodes[0:3]}}" 56 | when: install_gluster -------------------------------------------------------------------------------- /roles/register-virtual-machines/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Force unregister before register 3 | redhat_subscription: 4 | state: absent 5 | ignore_errors: true 6 | become: true 7 | 8 | - name: Customer portal registration 9 | redhat_subscription: 10 | state: present 11 | username: "{{rhsm_username}}" 12 | password: "{{rhsm_password}}" 13 | register: result 14 | until: result is succeeded 15 | retries: 10 16 | delay: 5 17 | become: true 18 | when: (rhsm_username is defined) and 19 | (rhsm_password is defined) 20 | 21 | - name: Activation key registration 22 | redhat_subscription: 23 | state: "present" 24 | activationkey: "{{ rhsm_key_id }}" 25 | org_id: "{{ rhsm_org_id }}" 26 | register: result 27 | until: result is succeeded 28 | retries: 10 29 | delay: 5 30 | become: true 31 | when: (rhsm_key_id is defined) and 32 | (rhsm_org_id is defined) 33 | 34 | - name: Subscribe to pool 35 | shell: /usr/bin/subscription-manager attach --pool={{ rhsm_pool }} 36 | register: task_result 37 | until: task_result.rc == 0 38 | retries: 10 39 | delay: 5 40 | ignore_errors: no 41 | become: true 42 | when: rhsm_pool is defined 43 | 44 | - name: Disable Repos 45 | shell: 'subscription-manager repos --disable="*"' 46 | register: result 47 | until: result is succeeded 48 | retries: 10 49 | delay: 5 50 | become: true 51 | 52 | - name: Enable required repos 53 | shell: 'subscription-manager repos --enable="rhel-7-server-rpms" --enable="rhel-7-server-extras-rpms" --enable="rhel-7-server-ose-{{ocp_version}}-rpms" --enable="rhel-7-fast-datapath-rpms" --enable="rhel-7-server-ansible-2.4-rpms"' 54 | register: result 55 | until: result is succeeded 56 | retries: 10 57 | delay: 5 58 | become: true -------------------------------------------------------------------------------- /roles/setup-dns/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | - name: create DNS zone for openshift 4 | route53_zone: 5 | zone: "{{dns_zone}}" 6 | state: "present" 7 | vpc_id: "{{vpc_id}}" 8 | vpc_region: "{{region}}" 9 | comment: "Internal zone for Openshift" 10 | register: aws_zone 11 | 12 | - name: add bastion dns 13 | route53: 14 | zone: "{{dns_zone}}" 15 | record: "bastion.{{dns_zone}}" 16 | type: A 17 | ttl: 300 18 | value: "{{bastion_private_ip}}" 19 | wait: yes 20 | vpc_id: "{{vpc_id}}" 21 | private_zone: true 22 | command: "create" 23 | overwrite: yes 24 | 25 | - name: add master dns 26 | route53: 27 | zone: "{{dns_zone}}" 28 | record: "master.{{dns_zone}}" 29 | type: A 30 | ttl: 300 31 | value: "{{master_private_ip}}" 32 | wait: yes 33 | vpc_id: "{{vpc_id}}" 34 | private_zone: true 35 | command: "create" 36 | overwrite: yes 37 | 38 | - name: add node dns 39 | route53: 40 | zone: "{{dns_zone}}" 41 | record: "node{{item.index}}.{{dns_zone}}" 42 | type: A 43 | ttl: 300 44 | value: "{{item.private_ip}}" 45 | wait: yes 46 | vpc_id: "{{vpc_id}}" 47 | private_zone: true 48 | command: "create" 49 | overwrite: yes 50 | with_items: "{{ nodes }}" 51 | 52 | - name: add public dns 53 | route53: 54 | zone: "{{public_dns_zone}}" 55 | record: "{{public_master_dns}}" 56 | type: A 57 | ttl: 300 58 | value: "{{master_public_ip}}" 59 | wait: yes 60 | command: "create" 61 | overwrite: yes 62 | 63 | - name: add public wildcard dns 64 | route53: 65 | zone: "{{public_dns_zone}}" 66 | record: "*.{{public_subdomain_prefix}}.{{public_dns_zone}}" 67 | type: A 68 | ttl: 300 69 | value: "{{master_public_ip}}" 70 | wait: yes 71 | command: "create" 72 | overwrite: yes -------------------------------------------------------------------------------- /roles/openshift-install/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: install pre-req packages on bastion 3 | yum: 4 | name: 5 | - "git" 6 | - "@Development Tools" 7 | - "openssl-devel" 8 | - "python-devel" 9 | - "gcc" 10 | - "libffi-devel" 11 | - "ansible" 12 | - "atomic-openshift-utils" 13 | state: "present" 14 | become: true 15 | when: deployment_type == 'origin' 16 | 17 | - name: install atomic-openshift-utils on bastion 18 | yum: 19 | name: 20 | - "atomic-openshift-utils" 21 | state: "present" 22 | become: true 23 | when: deployment_type != 'origin' and ocp_version is version_compare('3.9', '<=') 24 | 25 | - name: install openshift-ansible on bastion 26 | yum: 27 | name: 28 | - "openshift-ansible" 29 | state: "present" 30 | become: true 31 | when: deployment_type != 'origin' 32 | 33 | - name: copy ansible.cfg file 34 | copy: 35 | src: ../files/ansible.cfg 36 | dest: "~{{amazon_user}}/ansible.cfg" 37 | 38 | - name: copy inventory for openshift istallation 39 | template: 40 | src: "../files/openshift_inventory.cfg" 41 | dest: "~{{amazon_user}}/openshift_inventory.cfg" 42 | 43 | - name: checkout openshift ansible repo on bastion 44 | git: 45 | repo: "https://github.com/openshift/openshift-ansible" 46 | dest: "~{{amazon_user}}/openshift-ansible" 47 | version: "{{openshift_branch}}" 48 | when: deployment_type == 'origin' 49 | 50 | - name: copy script for openshift installation 51 | template: 52 | src: "../files/install-openshift.sh" 53 | dest: "~{{amazon_user}}/install-openshift.sh" 54 | mode: "0775" 55 | remote_user: "{{amazon_user}}" 56 | 57 | - name: provide details to check installation 58 | debug: 59 | msg: "Check file install-openshift.log in Bastion 'ssh ec2-user@{{bastion_public_ip}}' " 60 | 61 | - name: run openshift installation script 62 | shell: './install-openshift.sh {{deployment_type}} |& tee install-openshift.log' -------------------------------------------------------------------------------- /roles/teardown-virtual-machines/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Gather EC2 facts 3 | ec2_remote_facts: 4 | region: "{{ region }}" 5 | filters: 6 | "tag:namespace": "{{namespace}}" 7 | register: ec2_facts 8 | 9 | - name: Unsubscribe VMs 10 | include: unregister-vm.yml public_ip={{ item.public_ip_address }} 11 | with_items: "{{ ec2_facts.instances }}" 12 | when: ((deployment_type == 'openshift-enterprise') and (item.state == 'running')) 13 | ignore_errors: yes 14 | 15 | - name: Notify of VMs that could not be unsubscribed 16 | debug: 17 | msg: "The VM {{ item.private_dns_name }} could not be unsubscribed because it was stopped." 18 | with_items: "{{ ec2_facts.instances }}" 19 | when: ((deployment_type == 'openshift-enterprise') and (item.state == 'stopped')) 20 | ignore_errors: yes 21 | 22 | - name: Disassociate elastic IP 23 | ec2_eip: 24 | device_id: "{{item.id}}" 25 | ip: "{{item.public_ip_address}}" 26 | region: "{{region}}" 27 | state: absent 28 | release_on_disassociation: yes 29 | with_items: "{{ ec2_facts.instances }}" 30 | when: ((item is defined) and (item.tags.Name == 'master-' ~ namespace) and (item.state != 'terminated')) 31 | 32 | - name: Terminate EC2 VMs 33 | ec2: 34 | state: absent 35 | region: "{{ region }}" 36 | instance_ids: "{{ item.id }}" 37 | wait: yes 38 | with_items: "{{ ec2_facts.instances }}" 39 | when: item.state != 'terminated' 40 | 41 | # In OCP 3.7 volumes are tagged using the kube cluster tag 42 | # so we can be sure we are deleting volumes belonging to the 43 | # cluster we are tearing down 44 | - name: Retrieve dynamic volumes 45 | ec2_vol_facts: 46 | region: "{{region}}" 47 | filters: 48 | "{'tag:kubernetes.io/cluster/{{namespace}}':'owned','status':'available'}" 49 | register: aws_volumes 50 | 51 | - debug: 52 | msg: "{{aws_volumes}}" 53 | 54 | - name: Delete dynamic volumes 55 | ec2_vol: 56 | region: "{{region}}" 57 | state: "absent" 58 | id: "{{item.id}}" 59 | with_items: "{{aws_volumes.volumes}}" 60 | -------------------------------------------------------------------------------- /roles/generate-ssl-certs/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add extras and optional repo 3 | shell: "yum-config-manager --enable rhui-REGION-rhel-server-extras rhui-REGION-rhel-server-optional" 4 | become: true 5 | 6 | - name: install epel repository 7 | yum: 8 | name: "{{epel_repo_url}}" 9 | state: present 10 | register: result 11 | until: result | succeeded 12 | retries: 5 13 | delay: 10 14 | become: true 15 | 16 | - name: yum update 17 | yum: name=* state=latest 18 | register: result 19 | until: result | succeeded 20 | retries: 3 21 | delay: 5 22 | become: true 23 | 24 | - name: install pyOpenSSL 16.2 for certbot 25 | yum: 26 | name: "{{ pyopenssl_url }}" 27 | state: present 28 | register: result 29 | until: result | succeeded 30 | retries: 3 31 | delay: 10 32 | become: true 33 | 34 | - name: install certbot 35 | yum: 36 | name: 37 | - "certbot" 38 | - "python2-certbot-dns-route53" 39 | state: present 40 | retries: 3 41 | delay: 5 42 | become: true 43 | 44 | # Generate master SSL certificate 45 | - name: generate master ssl certificate 46 | shell: "certbot certonly -d {{public_master_dns}} --standalone -m {{domain_email_address}} -n --agree-tos" 47 | become: true 48 | 49 | # Generate wildcard SSL certificate 50 | - name: generate wildcard ssl certificate 51 | shell: "certbot certonly -d *.{{public_subdomain_prefix}}.{{public_dns_zone}} -m {{domain_email_address}} -n --agree-tos --server=https://acme-v02.api.letsencrypt.org/directory --dns-route53" 52 | environment: 53 | - AWS_ACCESS_KEY_ID: "{{ lookup('env','AWS_ACCESS_KEY_ID') }}" 54 | - AWS_SECRET_ACCESS_KEY: "{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}" 55 | become: true 56 | 57 | - name: disable epel repo 58 | shell: "yum erase -y epel-release" 59 | become: true 60 | 61 | - name: fetch master SSL certificates to local machine 62 | fetch: 63 | src: /etc/letsencrypt/live/{{public_master_dns}}/{{item}} 64 | dest: certs/letsencrypt/{{public_master_dns}}/{{item}} 65 | flat: true 66 | become: true 67 | with_items: 68 | - fullchain.pem 69 | - privkey.pem 70 | - cert.pem 71 | - chain.pem 72 | 73 | - name: fetch wildcard SSL certificates to local machine 74 | fetch: 75 | src: /etc/letsencrypt/live/{{public_subdomain_prefix}}.{{public_dns_zone}}/{{item}} 76 | dest: certs/letsencrypt/{{public_subdomain_prefix}}.{{public_dns_zone}}/{{item}} 77 | flat: true 78 | become: true 79 | with_items: 80 | - fullchain.pem 81 | - privkey.pem 82 | - cert.pem 83 | - chain.pem -------------------------------------------------------------------------------- /roles/setup-ssh/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # access for master 4 | - name: Generating master key for ec2-user 5 | user: "name={{amazon_user}} generate_ssh_key=yes" 6 | delegate_to: "{{master_public_ip}}" 7 | remote_user: "{{amazon_user}}" 8 | 9 | - name: register master pub key 10 | shell: "cat ~/.ssh/id_rsa.pub" 11 | delegate_to: "{{master_public_ip}}" 12 | remote_user: "{{amazon_user}}" 13 | register: "master_rsa_pub" 14 | 15 | - name: install master pub key on bastion 16 | authorized_key: user="{{amazon_user}}" key="{{ master_rsa_pub.stdout }}" 17 | delegate_to: "{{item}}" 18 | remote_user: "{{amazon_user}}" 19 | with_items: 20 | - "{{bastion_public_ip}}" 21 | 22 | - name: install master pub key on nodes 23 | authorized_key: user="{{amazon_user}}" key="{{ master_rsa_pub.stdout }}" 24 | delegate_to: "{{item.public_ip}}" 25 | remote_user: "{{amazon_user}}" 26 | with_items: 27 | - "{{nodes}}" 28 | 29 | # access for bastion 30 | - name: Generating bastion key for ec2-user 31 | user: "name={{amazon_user}} generate_ssh_key=yes" 32 | delegate_to: "{{bastion_public_ip}}" 33 | remote_user: "{{amazon_user}}" 34 | 35 | - name: register bastion pub key 36 | shell: "cat ~/.ssh/id_rsa.pub" 37 | delegate_to: "{{bastion_public_ip}}" 38 | remote_user: "{{amazon_user}}" 39 | register: "bastion_rsa_pub" 40 | 41 | - name: install bastion pub key on master 42 | authorized_key: user="{{amazon_user}}" key="{{ bastion_rsa_pub.stdout }}" 43 | delegate_to: "{{item}}" 44 | remote_user: "{{amazon_user}}" 45 | with_items: 46 | - "{{master_public_ip}}" 47 | 48 | - name: install bastion pub key on nodes 49 | authorized_key: user="{{amazon_user}}" key="{{ bastion_rsa_pub.stdout }}" 50 | delegate_to: "{{item.public_ip}}" 51 | remote_user: "{{amazon_user}}" 52 | with_items: 53 | - "{{nodes}}" 54 | 55 | - name: touch host known_hosts file on bastion 56 | file: 57 | path: ~/.ssh/known_hosts 58 | state: touch 59 | mode: "0644" 60 | delegate_to: "{{bastion_public_ip}}" 61 | remote_user: "{{amazon_user}}" 62 | 63 | - name: run ssh-keyscan to add master to known_hosts 64 | command: /usr/bin/ssh-keyscan -t rsa {{ master_private_dns_name }} >> ~/.ssh/known_hosts 65 | delegate_to: "{{bastion_public_ip}}" 66 | remote_user: "{{amazon_user}}" 67 | 68 | - name: run ssh-keyscan to add nodes to known_hosts 69 | command: /usr/bin/ssh-keyscan -t rsa {{ item.private_dns_name }} >> ~/.ssh/known_hosts 70 | delegate_to: "{{bastion_public_ip}}" 71 | remote_user: "{{amazon_user}}" 72 | with_items: 73 | - "{{nodes}}" 74 | 75 | - name: touch host known_hosts file again, but add/remove some permissions 76 | file: 77 | path: ~/.ssh/known_hosts 78 | state: touch 79 | mode: "0600" 80 | delegate_to: "{{bastion_public_ip}}" 81 | remote_user: "{{amazon_user}}" 82 | -------------------------------------------------------------------------------- /roles/setup-security-groups/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | #This security group allows intra-node communication on all ports with all protocols. 4 | - name: create openshift-vpc 5 | ec2_group: 6 | name: "{{namespace}}-vpc" 7 | description: "Default security group that allows all instances in the VPC to talk to each other over any port and protocol." 8 | vpc_id: "{{vpc_id}}" 9 | region: "{{region}}" 10 | state: "present" 11 | rules: 12 | - proto: "-1" 13 | from_port: "0" 14 | to_port: "0" 15 | group_name: "{{namespace}}-vpc" 16 | rules_egress: 17 | - proto: "-1" 18 | from_port: "0" 19 | to_port: "0" 20 | group_name: "{{namespace}}-vpc" 21 | 22 | # This security group allows public ingress to the instances for HTTP, HTTPS and common HTTP/S proxy ports. 23 | - name: create openshift-public-ingress 24 | ec2_group: 25 | name: "{{namespace}}-public-ingress" 26 | description: "Security group that allows public ingress to instances, HTTP, HTTPS and more." 27 | vpc_id: "{{vpc_id}}" 28 | region: "{{region}}" 29 | state: "present" 30 | rules: 31 | # HTTP 32 | - proto: "tcp" 33 | from_port: "80" 34 | to_port: "80" 35 | cidr_ip: "0.0.0.0/0" 36 | # HTTP_PROXY 37 | - proto: "tcp" 38 | from_port: "8080" 39 | to_port: "8080" 40 | cidr_ip: "0.0.0.0/0" 41 | # HTTPS 42 | - proto: "tcp" 43 | from_port: "443" 44 | to_port: "443" 45 | cidr_ip: "0.0.0.0/0" 46 | # HTTPS_PROXY 47 | - proto: "tcp" 48 | from_port: "8443" 49 | to_port: "8443" 50 | cidr_ip: "0.0.0.0/0" 51 | # COCKPIT 52 | - proto: "tcp" 53 | from_port: "9090" 54 | to_port: "9090" 55 | cidr_ip: "0.0.0.0/0" 56 | rules_egress: [] 57 | 58 | # This security group allows public ingress to the instances for HTTP, HTTPS and common HTTP/S proxy ports. 59 | - name: create openshift-public-egress 60 | ec2_group: 61 | name: "{{namespace}}-public-egress" 62 | description: "This security group allows public egress from the instances for HTTP and HTTPS, which is needed for yum updates, git access etc etc." 63 | vpc_id: "{{vpc_id}}" 64 | region: "{{region}}" 65 | state: "present" 66 | rules_egress: 67 | # HTTP 68 | - proto: "tcp" 69 | from_port: "80" 70 | to_port: "80" 71 | cidr_ip: "0.0.0.0/0" 72 | # HTTPS 73 | - proto: "tcp" 74 | from_port: "443" 75 | to_port: "443" 76 | cidr_ip: "0.0.0.0/0" 77 | - proto: "tcp" 78 | from_port: "8443" 79 | to_port: "8443" 80 | cidr_ip: "0.0.0.0/0" 81 | # NTP/chrony 82 | - proto: "udp" 83 | from_port: "123" 84 | to_port: "123" 85 | cidr_ip: "0.0.0.0/0" 86 | # Security group which allows SSH access to a host. Used for the bastion. 87 | - name: create openshift-ssh 88 | ec2_group: 89 | name: "{{namespace}}-ssh" 90 | description: "Default security group that allows all instances in the VPC to talk to each other over any port and protocol." 91 | vpc_id: "{{vpc_id}}" 92 | region: "{{region}}" 93 | state: "present" 94 | rules: 95 | - proto: "tcp" 96 | from_port: "22" 97 | to_port: "22" 98 | cidr_ip: "0.0.0.0/0" 99 | rules_egress: [] -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | > **WARNING:** This guide is a work in progress and will continue to evolve over 4 | > time. If you have content to contribute, please refer to this document 5 | > each time as things may have changed since the last time you contributed. 6 | > 7 | > This warning will be removed once we have settled on a reasonable set of 8 | > guidelines for contributions. 9 | 10 | Thank you for contributing to OpenShift AWS Setup. This document explains how the 11 | repository is organized, and how to submit contributions. 12 | 13 | ### 1 Fork the openshift-aws-setup repo 14 | 15 | Forking openshift-aws-setup is a simple two-step process. 16 | 17 | 1. On GitHub, navigate to the repo. 18 | 2. In the top-right corner of the page, click **Fork**. 19 | 20 | That's it! Now, you have a [fork][git-fork] of the original gnunn1/openshift-aws-setup repo. 21 | 22 | ### 2 Create a local clone of your fork 23 | 24 | Right now, you have a fork of the openshift-aws-setup repo, but you don't have the files in that repo on your computer. Let's create a [clone][git-clone] of your fork locally on your computer. 25 | 26 | ```sh 27 | git clone git@github.com:your-username/openshift-aws-setup 28 | cd openshift-aws-setup 29 | 30 | # Configure git to sync your fork with the original repo 31 | git remote add upstream https://github.com/gnunn1/openshift-aws-setup 32 | 33 | # Never push to upstream repo 34 | git remote set-url --push upstream no_push 35 | ``` 36 | 37 | ### 3 Verify your [remotes][git-remotes] 38 | 39 | To verify the new upstream repo you've specified for your fork, type 40 | `git remote -v`. You should see the URL for your fork as `origin`, and the URL for the original repo as `upstream`. 41 | 42 | ```sh 43 | origin git@github.com:your-username/openshift-aws-setup (fetch) 44 | origin git@github.com:your-username/openshift-aws-setup (push) 45 | upstream https://github.com/gnunn1/openshift-aws-setup (fetch) 46 | upstream no_push (push) 47 | ``` 48 | 49 | ### 4 Modify your `develop` 50 | 51 | Get your local `develop` [branch][git-branch], up to date: 52 | 53 | ```sh 54 | git checkout develop 55 | git fetch upstream 56 | git merge upstream/develop 57 | ``` 58 | 59 | Then build your local `develop` branch, make changes, etc. 60 | 61 | ### 5 Keep your local clone in sync 62 | 63 | ```sh 64 | git fetch upstream 65 | git merge upstream/master 66 | git merge upstream/develop 67 | ``` 68 | 69 | ### 6 [Commit][git-commit] your `develop` 70 | 71 | ```sh 72 | git commit 73 | ``` 74 | 75 | Likely you'll go back and edit, build, test, etc. 76 | 77 | ### 7 [Push][git-push] your `develop` 78 | 79 | When ready to review (or just to establish an offsite backup of your work), 80 | push your branch to your fork on `github.com`: 81 | 82 | ```sh 83 | git push 84 | ``` 85 | 86 | ### 8 Submit a [pull request][git-pull-request] 87 | 88 | 1. Visit your fork at 89 | 2. Click the `Compare & Pull Request` button next to your `develop` branch. 90 | 91 | At this point you're waiting on us. We may suggest some changes or improvements or alternatives. We'll do our best to review and at least comment within 3 business days (often much sooner). 92 | 93 | _If you have upstream write access_, please refrain from using the GitHub UI for creating PRs, because GitHub will create the PR branch inside the main repo rather than inside your fork. 94 | 95 | [git-fork]: https://help.github.com/articles/fork-a-repo/ 96 | [git-clone]: https://git-scm.com/docs/git-clone 97 | [git-remotes]: https://git-scm.com/book/en/v2/Git-Basics-Working-with-Remotes 98 | [git-branch]: https://git-scm.com/docs/git-branch 99 | [git-commit]: https://git-scm.com/docs/git-commit 100 | [git-push]: https://git-scm.com/docs/git-push 101 | [git-pull-request]: https://github.com/gnunn1/openshift-aws-setup/compare/ 102 | -------------------------------------------------------------------------------- /roles/setup-virtual-machines/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Find ami without ami id 3 | ec2_ami_find: 4 | name: "RHEL-7.3*" 5 | owner: "{{redhat_owner_id}}" 6 | architecture: "x86_64" 7 | virtualization_type: "hvm" 8 | sort: "creationDate" 9 | sort_order: "descending" 10 | sort_end: 1 11 | region: "{{region}}" 12 | register: "ami_find" 13 | when: ami_id is not defined 14 | 15 | - name: Find ami id with ami 16 | ec2_ami_find: 17 | ami_id: "{{ami_id}}" 18 | region: "{{region}}" 19 | register: "ami_find" 20 | when: ami_id is defined 21 | 22 | - name: Provision master 23 | ec2: 24 | instance_type: "{{master_ami_size}}" 25 | image: "{{ami_find['results'][0]['ami_id']}}" 26 | region: "{{region}}" 27 | wait: true 28 | key_name: "{{aws_key_pair}}" 29 | vpc_subnet_id: "{{subnet_facts['subnet']['id']}}" 30 | group: ['{{namespace}}-vpc', '{{namespace}}-public-ingress', '{{namespace}}-public-egress', '{{namespace}}-ssh'] 31 | volumes: 32 | - device_name: "{{ami_find['results'][0]['root_device_name']}}" 33 | volume_size: "{{master_root_volume_size}}" 34 | volume_type: gp2 35 | delete_on_termination: true 36 | - device_name: "/dev/xvdb" 37 | volume_size: "{{docker_volume_size}}" 38 | volume_type: gp2 39 | delete_on_termination: true 40 | count_tag: 41 | Name: "master-{{namespace}}" 42 | instance_tags: 43 | "{'Name':'master-{{namespace}}','namespace':'{{namespace}}','kubernetes.io/cluster/{{namespace}}':'{{namespace}}'}" 44 | exact_count: 1 45 | register: ec2_master 46 | 47 | - name: allocate a new elastic IP and associate it with master 48 | ec2_eip: 49 | device_id: "{{ec2_master['tagged_instances'][0]['id']}}" 50 | region: "{{region}}" 51 | in_vpc: "yes" 52 | release_on_disassociation: true 53 | register: master_elastic_ip 54 | 55 | - name: Provision nodes 56 | ec2: 57 | instance_type: "{{node_ami_size}}" 58 | image: "{{ami_find['results'][0]['ami_id']}}" 59 | region: "{{region}}" 60 | wait: true 61 | key_name: "{{aws_key_pair}}" 62 | vpc_subnet_id: "{{subnet_facts['subnet']['id']}}" 63 | group: ['{{namespace}}-vpc', '{{namespace}}-public-ingress', '{{namespace}}-public-egress', '{{namespace}}-ssh'] 64 | volumes: 65 | - device_name: "{{ami_find['results'][0]['root_device_name']}}" 66 | volume_size: "{{node_root_volume_size}}" 67 | volume_type: gp2 68 | delete_on_termination: true 69 | - device_name: "/dev/xvdb" 70 | volume_size: "{{docker_volume_size}}" 71 | volume_type: gp2 72 | delete_on_termination: true 73 | count_tag: 74 | Name: "node{{item}}-{{namespace}}" 75 | instance_tags: 76 | "{'Name':'node{{item}}-{{namespace}}','namespace':'{{namespace}}','kubernetes.io/cluster/{{namespace}}':'{{namespace}}'}" 77 | exact_count: 1 78 | assign_public_ip: yes 79 | with_sequence: start=1 end={{app_nodes|int}} 80 | register: ec2_nodes 81 | 82 | - debug: 83 | msg: "{{ec2_nodes}}" 84 | 85 | - name: Provision gluster volumes 86 | ec2_vol: 87 | instance: "{{item.tagged_instances[0].id}}" 88 | volume_size: "{{gluster_volume_size}}" 89 | volume_type: gp2 90 | device_name: "/dev/xvdc" 91 | delete_on_termination: true 92 | region: "{{region}}" 93 | with_items: "{{ec2_nodes.results[0:3]}}" 94 | when: install_gluster 95 | 96 | - name: Provision bastion 97 | ec2: 98 | instance_type: "{{bastion_ami_size}}" 99 | image: "{{ami_find['results'][0]['ami_id']}}" 100 | region: "{{region}}" 101 | wait: true 102 | key_name: "{{aws_key_pair}}" 103 | vpc_subnet_id: "{{subnet_facts['subnet']['id']}}" 104 | group: ['{{namespace}}-vpc', '{{namespace}}-public-ingress', '{{namespace}}-public-egress', '{{namespace}}-ssh'] 105 | count_tag: 106 | Name: "bastion-{{namespace}}" 107 | instance_tags: 108 | Name: "bastion-{{namespace}}" 109 | namespace: "{{namespace}}" 110 | exact_count: 1 111 | assign_public_ip: yes 112 | register: ec2_bastion 113 | 114 | - name: Register VM facts 115 | set_fact: 116 | master_private_ip: "{{ec2_master['tagged_instances'][0]['private_ip']}}" 117 | master_public_ip: "{{master_elastic_ip['public_ip']}}" 118 | master_private_dns_name: "{{ec2_master['tagged_instances'][0]['private_dns_name']}}" 119 | bastion_private_ip: "{{ec2_bastion['tagged_instances'][0]['private_ip']}}" 120 | bastion_public_ip: "{{ec2_bastion['tagged_instances'][0]['public_ip']}}" 121 | 122 | # Create a dict of node public and private ip addresses 123 | - name: Register Node facts 124 | set_fact: 125 | nodes: "{{ nodes|default([]) + [ {'index': item.item, 'public_ip': item.tagged_instances[0].public_ip, 'private_ip': item.tagged_instances[0].private_ip, 'private_dns_name': item.tagged_instances[0].private_dns_name} ] }}" 126 | with_items: "{{ec2_nodes.results}}" 127 | 128 | - name: Node IP provisioned 129 | debug: 130 | msg: "{{ nodes }}" 131 | 132 | - name: Wait for master and bastion SSH to come up 133 | local_action: wait_for 134 | host={{ item }} 135 | port=22 136 | state=started 137 | with_items: 138 | - "{{bastion_public_ip}}" 139 | - "{{master_public_ip}}" 140 | 141 | - name: Wait for node SSH to come up 142 | local_action: wait_for 143 | host={{ item.public_ip }} 144 | port=22 145 | state=started 146 | with_items: "{{ nodes }}" -------------------------------------------------------------------------------- /amis/base/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "git_commit": "57c4fd41cc3c2602ef62716e0fb867e8439d29c0", 4 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 5 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 6 | "aws_region": "{{env `AWS_DEFAULT_REGION`}}", 7 | "rhsm_username": null, 8 | "rhsm_password": null, 9 | "rhsm_org_id": "", 10 | "rhsm_key_id": "", 11 | "rhsm_pool_id": null, 12 | "ami_id": null 13 | }, 14 | "builders": [{ 15 | "type": "amazon-ebs", 16 | "access_key": "{{user `aws_access_key`}}", 17 | "secret_key": "{{user `aws_secret_key`}}", 18 | "region": "{{user `aws_region`}}", 19 | "source_ami": "{{user `ami_id`}}", 20 | "instance_type": "t2.micro", 21 | "ssh_username": "ec2-user", 22 | "ami_name": "Red Hat Enterprise Linux 7 AMI {{timestamp | clean_ami_name}}", 23 | "ami_description": "Base image built upon the rhel7-core image, this provides any images layered on top of it with all the tools needed to use common functionality as well as libraries needed for it to serve as a base for other builder images, e.g. python or ruby.", 24 | "tags": { 25 | "Component": "rhel-server-packer", 26 | "AMI": "{{ .SourceAMIName }}", 27 | "Vendor": "Red Hat, Inc.", 28 | "URL": "https://access.redhat.com/articles/3135091", 29 | "Summary": "Base image with essential libraries and tools used as a base for builder images like perl, python, ruby, etc.", 30 | "VCSType": "git", 31 | "Name": "rhel7-base", 32 | "VCSRef": "{{user `git_commit`}}", 33 | "Version": "7.4", 34 | "Architecture": "x86_64", 35 | "Release": "1", 36 | "BuildDate": "{{isotime | clean_ami_name}}" 37 | } 38 | }], 39 | "provisioners": [{ 40 | "type": "shell", 41 | "execute_command": "{{ .Vars }} sudo -E sh '{{ .Path }}'", 42 | "inline": [ 43 | "/usr/bin/printf '%s\n' '209.132.183.44 xmlrpc.rhn.redhat.com' >> /etc/hosts", 44 | "/usr/bin/printf '%s\n' '23.204.148.218 content-xmlrpc.rhn.redhat.com' >> /etc/hosts", 45 | "/usr/bin/printf '%s\n' '209.132.183.49 subscription.rhn.redhat.com' >> /etc/hosts", 46 | "/usr/bin/printf '%s\n' '209.132.183.108 subscription.rhsm.redhat.com' >> /etc/hosts", 47 | "/usr/bin/printf '%s\n' '209.132.182.63 registry.access.redhat.com' >> /etc/hosts", 48 | "/usr/bin/printf '%s\n' '209.132.182.33 repository.jboss.org' >> /etc/hosts", 49 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/amazon-id.conf", 50 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/rhui-lb.conf", 51 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum.repos.d/redhat-rhui.repo", 52 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum.repos.d/redhat-rhui-client-config.repo", 53 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/product-id.conf", 54 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/subscription-manager.conf", 55 | "if [ ! -z \"{{user `rhsm_username`}}\" ] && [ ! -z \"{{user `rhsm_password`}}\" ]; then /usr/bin/subscription-manager register --username={{user `rhsm_username`}} --password={{user `rhsm_password`}} --auto-attach; fi", 56 | "if [ ! -z \"{{user `rhsm_org_id`}}\" ] && [ ! -z \"{{user `rhsm_key_id`}}\" ]; then /usr/bin/subscription-manager register --org={{user `rhsm_org_id` }} --activationkey={{user `rhsm_key_id` }}; fi", 57 | "count=0; while [ $? -ne 0 ] || (( count++ >= 5 )); do !!; /usr/bin/sleep $count; done", 58 | "/usr/bin/subscription-manager refresh", 59 | "if [ ! -z \"{{user `rhsm_pool_id`}}\" ]; then /usr/bin/subscription-manager subscribe --pool={{user `rhsm_pool_id` }}; fi" 60 | ] 61 | }, 62 | { 63 | "type": "shell", 64 | "execute_command": "{{ .Vars }} sudo -E sh '{{ .Path }}'", 65 | "inline": [ 66 | "/usr/bin/subscription-manager repos --disable=\"*\"", 67 | "/usr/bin/yum-config-manager --disable \\*", 68 | "/usr/bin/subscription-manager repos --enable=rhel-7-server-rpms --enable=rhel-7-server-extras-rpms --enable=rhel-7-server-optional-rpms", 69 | "/usr/bin/rpmkeys --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release", 70 | "/usr/bin/yum check-update", 71 | "INSTALL_PKGS='autoconf automake bzip2 gcc-c++ gd-devel gdb git libcurl-devel libxml2-devel libxslt-devel lsof make mariadb-devel mariadb-libs openssl-devel patch postgresql-devel procps-ng sqlite-devel unzip wget which zlib-devel'", 72 | "/usr/bin/yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS", 73 | "/usr/bin/rpm -V $INSTALL_PKGS", 74 | "/usr/bin/yum clean all -y", 75 | "/usr/bin/yum update -y", 76 | "/usr/bin/systemctl reboot" 77 | ], 78 | "expect_disconnect": true 79 | }, 80 | { 81 | "type": "shell", 82 | "execute_command": "{{ .Vars }} sudo -E sh '{{ .Path }}'", 83 | "inline": [ 84 | "/usr/bin/subscription-manager remove --all", 85 | "/usr/bin/subscription-manager unregister", 86 | "/usr/bin/subscription-manager clean", 87 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/amazon-id.conf", 88 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/rhui-lb.conf", 89 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum.repos.d/redhat-rhui.repo", 90 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum.repos.d/redhat-rhui-client-config.repo", 91 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/product-id.conf", 92 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/subscription-manager.conf", 93 | "/usr/bin/printf '%s\n' '127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4' > /etc/hosts", 94 | "/usr/bin/printf '%s\n' '::1 localhost localhost.localdomain localhost6 localhost6.localdomain6' >> /etc/hosts", 95 | "/usr/bin/rm -rf /var/cache/yum" 96 | ], 97 | "pause_before": "20s" 98 | } 99 | ] 100 | } 101 | -------------------------------------------------------------------------------- /amis/core/packer.json: -------------------------------------------------------------------------------- 1 | { 2 | "variables": { 3 | "git_commit": "57c4fd41cc3c2602ef62716e0fb867e8439d29c0", 4 | "aws_access_key": "{{env `AWS_ACCESS_KEY_ID`}}", 5 | "aws_secret_key": "{{env `AWS_SECRET_ACCESS_KEY`}}", 6 | "aws_region": "{{env `AWS_DEFAULT_REGION`}}", 7 | "rhsm_username": null, 8 | "rhsm_password": null, 9 | "rhsm_org_id": "", 10 | "rhsm_key_id": "", 11 | "rhsm_pool_id": null, 12 | "ami_id": null 13 | }, 14 | "builders": [{ 15 | "type": "amazon-ebs", 16 | "access_key": "{{user `aws_access_key`}}", 17 | "secret_key": "{{user `aws_secret_key`}}", 18 | "region": "{{user `aws_region`}}", 19 | "source_ami": "{{user `ami_id`}}", 20 | "instance_type": "t2.micro", 21 | "ssh_username": "ec2-user", 22 | "ami_name": "Red Hat Enterprise Linux 7 AMI {{timestamp | clean_ami_name}}", 23 | "ami_description": "This base image provides language runtimes and tools to run, maintain, and troubleshoot applications while keeping the image size small. For further information on how this image was built look at the /root/anacanda-ks.cfg file.", 24 | "tags": { 25 | "Component": "rhel-server-packer", 26 | "AMI": "{{ .SourceAMIName }}", 27 | "Vendor": "Red Hat, Inc.", 28 | "URL": "https://access.redhat.com/articles/3135091", 29 | "Summary": "Provides the latest release of Red Hat Enterprise Linux 7 in a fully featured and supported base image.", 30 | "VCSType": "git", 31 | "Name": "rhel7-core", 32 | "VCSRef": "{{user `git_commit`}}", 33 | "Version": "7.4", 34 | "Architecture": "x86_64", 35 | "Release": "3", 36 | "BuildDate": "{{isotime | clean_ami_name}}" 37 | } 38 | }], 39 | "provisioners": [{ 40 | "type": "file", 41 | "destination": "/tmp/", 42 | "source": "./root" 43 | }, 44 | { 45 | "type": "shell", 46 | "execute_command": "{{ .Vars }} sudo -E sh '{{ .Path }}'", 47 | "inline": [ 48 | "/usr/bin/chown -R root:root /tmp/root/", 49 | "/usr/bin/cp -rf /tmp/root/* /", 50 | "/usr/bin/printf '%s\n' '209.132.183.44 xmlrpc.rhn.redhat.com' >> /etc/hosts", 51 | "/usr/bin/printf '%s\n' '23.204.148.218 content-xmlrpc.rhn.redhat.com' >> /etc/hosts", 52 | "/usr/bin/printf '%s\n' '209.132.183.49 subscription.rhn.redhat.com' >> /etc/hosts", 53 | "/usr/bin/printf '%s\n' '209.132.183.108 subscription.rhsm.redhat.com' >> /etc/hosts", 54 | "/usr/bin/printf '%s\n' '209.132.182.63 registry.access.redhat.com' >> /etc/hosts", 55 | "/usr/bin/printf '%s\n' '209.132.182.33 repository.jboss.org' >> /etc/hosts", 56 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/amazon-id.conf", 57 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/rhui-lb.conf", 58 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum.repos.d/redhat-rhui.repo", 59 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum.repos.d/redhat-rhui-client-config.repo", 60 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/product-id.conf", 61 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/subscription-manager.conf", 62 | "if [ ! -z \"{{user `rhsm_username`}}\" ] && [ ! -z \"{{user `rhsm_password`}}\" ]; then /usr/bin/subscription-manager register --username={{user `rhsm_username`}} --password={{user `rhsm_password`}} --auto-attach; fi", 63 | "if [ ! -z \"{{user `rhsm_org_id`}}\" ] && [ ! -z \"{{user `rhsm_key_id`}}\" ]; then /usr/bin/subscription-manager register --org={{user `rhsm_org_id` }} --activationkey={{user `rhsm_key_id` }}; fi", 64 | "count=0; while [ $? -ne 0 ] || (( count++ >= 5 )); do !!; /usr/bin/sleep $count; done", 65 | "/usr/bin/subscription-manager refresh", 66 | "if [ ! -z \"{{user `rhsm_pool_id`}}\" ]; then /usr/bin/subscription-manager subscribe --pool={{user `rhsm_pool_id` }}; fi" 67 | ] 68 | }, 69 | { 70 | "type": "shell", 71 | "execute_command": "{{ .Vars }} sudo -E sh '{{ .Path }}'", 72 | "inline": [ 73 | "/usr/bin/subscription-manager repos --disable=\"*\"", 74 | "/usr/bin/yum-config-manager --disable \\*", 75 | "/usr/bin/subscription-manager repos --enable=rhel-7-server-rpms --enable=rhel-7-server-extras-rpms --enable=rhel-7-server-optional-rpms", 76 | "/usr/bin/rpmkeys --import /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-beta /etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release", 77 | "/usr/bin/yum check-update", 78 | "INSTALL_PKGS='bind-utils bridge-utils findutils gettext git httpd-tools iptables-services kexec-tools net-tools psacct sos tar unzip wget yum-utils'", 79 | "/usr/bin/yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS", 80 | "/usr/bin/rpm -V $INSTALL_PKGS", 81 | "/usr/bin/yum clean all -y", 82 | "/usr/bin/yum update -y", 83 | "/usr/bin/systemctl reboot" 84 | ], 85 | "expect_disconnect": true 86 | }, 87 | { 88 | "type": "shell", 89 | "execute_command": "{{ .Vars }} sudo -E sh '{{ .Path }}'", 90 | "inline": [ 91 | "/usr/bin/subscription-manager remove --all", 92 | "/usr/bin/subscription-manager unregister", 93 | "/usr/bin/subscription-manager clean", 94 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/amazon-id.conf", 95 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum/pluginconf.d/rhui-lb.conf", 96 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum.repos.d/redhat-rhui.repo", 97 | "/usr/bin/sed -i -e 's/^enabled=0/enabled=1/g' /etc/yum.repos.d/redhat-rhui-client-config.repo", 98 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/product-id.conf", 99 | "/usr/bin/sed -i -e 's/^enabled=1/enabled=0/g' /etc/yum/pluginconf.d/subscription-manager.conf", 100 | "/usr/bin/printf '%s\n' '127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4' > /etc/hosts", 101 | "/usr/bin/printf '%s\n' '::1 localhost localhost.localdomain localhost6 localhost6.localdomain6' >> /etc/hosts", 102 | "/usr/bin/rpm-file-permissions", 103 | "/usr/bin/rm -rf /tmp/root", 104 | "/usr/bin/rm -rf /var/cache/yum" 105 | ], 106 | "pause_before": "20s" 107 | } 108 | ] 109 | } 110 | -------------------------------------------------------------------------------- /openshift-playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ############################################################# 3 | # Provision AWS infrastructure 4 | ############################################################# 5 | - name: Create Infrastructure 6 | hosts: local 7 | 8 | vars_files: 9 | - vars/aws-config.yml 10 | 11 | roles: 12 | - define-derived-vars 13 | - setup-vpc 14 | - setup-security-groups 15 | - setup-virtual-machines 16 | - setup-dns 17 | - setup-ssh 18 | - setup-host-groups 19 | 20 | pre_tasks: 21 | - name: Verify Ansible Version 22 | assert: 23 | that: 24 | - "ansible_version.major == 2" 25 | - "ansible_version.minor >= 4" 26 | msg: "This script is only supported with the 2.4.1 version or later of Ansible" 27 | 28 | - name: Verify App Nodes For Gluster 29 | assert: 30 | that: 31 | - "app_nodes >= 3" 32 | msg: "To install gluster you must have at least three app nodes defined, only {{app_nodes}} defined in variables" 33 | when: install_gluster 34 | 35 | 36 | - fail: msg="Variables required to register subscriptions are missing, please confirm that either rhsm_username, rhsm_password and rhsm_pool OR rhsm_key_id and rhsm_org_id is defined" 37 | when: (deployment_type == 'openshift-enterprise') and not (((rhsm_username is defined) and (rhsm_password is defined) and (rhsm_pool is defined)) or ((rhsm_key_id is defined) and (rhsm_org_id is defined))) 38 | 39 | ############################################################# 40 | # Register and update virtual machines 41 | ############################################################# 42 | - name: Setup VM hosts 43 | hosts: localhost 44 | 45 | vars_files: 46 | - vars/aws-config.yml 47 | 48 | - name: Register and update virtual machines 49 | hosts: created_vms 50 | remote_user: "{{ amazon_user }}" 51 | 52 | vars_files: 53 | - vars/aws-config.yml 54 | 55 | vars: 56 | state: 'present' 57 | 58 | roles: 59 | 60 | - {role: register-virtual-machines, when: deployment_type == 'openshift-enterprise'} 61 | - update-virtual-machines 62 | 63 | ############################################################# 64 | # Generate SSL certificates using lets encrypt, fetch to 65 | # local system and then set ssl vars 66 | ############################################################# 67 | - name: Generate SSL certificates 68 | hosts: master 69 | remote_user: "{{ amazon_user }}" 70 | 71 | vars_files: 72 | - vars/aws-config.yml 73 | 74 | roles: 75 | - define-derived-vars 76 | - {role: generate-ssl-certs, when: use_lets_encrypt} 77 | 78 | ############################################################# 79 | # Copy SSL certificates to bastion 80 | ############################################################# 81 | - name: Copy SSL certificates to bastion 82 | hosts: bastion 83 | remote_user: "{{ amazon_user }}" 84 | 85 | vars_files: 86 | - vars/aws-config.yml 87 | 88 | roles: 89 | - define-derived-vars 90 | 91 | post_tasks: 92 | - name: create directory for certs 93 | file: path=certs/{{public_master_dns}} state=directory 94 | 95 | - name: create directory for certs 96 | file: path=certs/{{public_subdomain_prefix}}.{{public_dns_zone}} state=directory 97 | 98 | - name: copy master SSL certificates to bastion 99 | copy: 100 | src: "{{item}}" 101 | dest: certs/{{public_master_dns}}/{{item | basename}} 102 | with_items: 103 | - "{{master_ssl_cert_file}}" 104 | - "{{master_ssl_key_file}}" 105 | when: master_ssl_cert_file is defined 106 | 107 | - name: copy wildcard SSL certificates to bastion 108 | copy: 109 | src: "{{item}}" 110 | dest: certs/{{public_subdomain_prefix}}.{{public_dns_zone}}/{{item | basename}} 111 | with_items: 112 | - "{{wildcard_ssl_cert_file}}" 113 | - "{{wildcard_ssl_key_file}}" 114 | - "{{wildcard_ssl_fullchain_file}}" 115 | when: wildcard_ssl_cert_file is defined 116 | 117 | ############################################################# 118 | # Openshift Prerequisites 119 | ############################################################# 120 | - name: Setup Node hosts 121 | hosts: localhost 122 | 123 | vars_files: 124 | - vars/aws-config.yml 125 | 126 | - name: OpenShift Prerequisites 127 | hosts: created_nodes 128 | remote_user: "{{ amazon_user }}" 129 | 130 | vars_files: 131 | - vars/aws-config.yml 132 | 133 | vars: 134 | state: 'present' 135 | 136 | roles: 137 | - openshift-pre-reqs 138 | 139 | ############################################################# 140 | # Gluster Prerequisites 141 | ############################################################# 142 | - name: Setup gluster master prereqs 143 | hosts: master 144 | remote_user: "{{ amazon_user }}" 145 | 146 | vars_files: 147 | - vars/aws-config.yml 148 | 149 | roles: 150 | - {role: gluster-master-prereqs, when: install_gluster} 151 | 152 | - name: Setup gluster node prereqs 153 | hosts: gluster 154 | remote_user: "{{ amazon_user }}" 155 | 156 | vars_files: 157 | - vars/aws-config.yml 158 | 159 | roles: 160 | - {role: gluster-node-prereqs, when: install_gluster} 161 | 162 | ############################################################# 163 | # Install Openshift 164 | ############################################################# 165 | - name: Install OpenShift 166 | hosts: bastion 167 | remote_user: "{{ amazon_user }}" 168 | 169 | vars_files: 170 | - vars/aws-config.yml 171 | 172 | vars: 173 | # nodes: "{{ lookup('file','nodes.json') | from_json }}" 174 | 175 | roles: 176 | - define-derived-vars 177 | - openshift-install 178 | - {role: grafana-install, when: install_grafana} 179 | 180 | ############################################################# 181 | # Post-install tasks 182 | ############################################################# 183 | - name: Post-Install tasks 184 | hosts: master 185 | remote_user: "{{ amazon_user }}" 186 | 187 | vars_files: 188 | - vars/aws-config.yml 189 | 190 | post_tasks: 191 | - name: SSL certificate information 192 | debug: 193 | msg: 194 | - 'SSL certificates have been generated using letsencrypt and backed up locally:' 195 | - '\tcerts/letsencrypt\n' 196 | - 'Note that letsencrypt is rate limited to 20 requests a week and SSL certificates last for 90 days.' 197 | - 'If you need to run the playbook again, consider using the certificates backed up locally by' 198 | - 'setting the following variables in vars/aws_config.yml' 199 | - '\tmaster_ssl_cert_file: "{{master_ssl_cert_file}}"' 200 | - '\tmaster_ssl_key_file: "{{master_ssl_key_file}}"' 201 | - '\wildcard_ssl_cert_file: "{{wildcard_ssl_cert_file}}"' 202 | - '\twildcard_ssl_key_file: "{{wildcard_ssl_key_file}}"' 203 | - '\twildcard_ssl_fullchain_file: "{{wildcard_ssl_fullchain_file}}"' 204 | when: use_lets_encrypt 205 | 206 | roles: 207 | - define-derived-vars 208 | - add-users 209 | - {role: update-ssl-cockpit, when: master_ssl_cert_file is defined} 210 | - {role: disable-service-catalog, when: disable_service_catalog} 211 | 212 | ############################################################# 213 | # Output environment information 214 | ############################################################# 215 | - name: OpenShift Information 216 | hosts: localhost 217 | 218 | vars_files: 219 | - vars/aws-config.yml 220 | 221 | roles: 222 | - define-derived-vars 223 | 224 | post_tasks: 225 | - name: Confirmation message 226 | debug: 227 | msg: 228 | - 'The script has completed successfully.' 229 | - 'Master public IP: {{master_public_ip}}' 230 | - 'OpenShift console: https://{{public_master_dns}}:8443' -------------------------------------------------------------------------------- /vars/aws-config.yml: -------------------------------------------------------------------------------- 1 | ##################################################################### 2 | # Variables you should update for your installation 3 | ##################################################################### 4 | 5 | # AWS region to install environment into 6 | region: "ca-central-1" 7 | 8 | # The public DNS for cluster, you must have a Route 53 zone configured for this 9 | public_dns_zone: "ocplab.com" 10 | # Prefix for the master, i.e. if public zone is 'ocplab.com' and the master prefix is 'master' 11 | # then the OpenShift console will be available at 'master.ocplab.com'. If you leave this as an empty 12 | # string it will use the public_dns_zone as the A record 13 | prefix_master_dns: "" 14 | # Prefix for subdomain for wildcard DNS, i.e. *.apps.ocplab.com 15 | public_subdomain_prefix: "apps" 16 | 17 | # Amazon key pair to use, must exist before running script. Please generate in AWS management console. 18 | aws_key_pair: "mykeypair" 19 | 20 | # You have a choice, if you know the specific AMI you want to use, reference it by ami_id 21 | # If you do not know the ami_id, comment out the ami_id variable and uncomment the redhat_owner_id 22 | 23 | # Use the owner tag if you want to look up the AMI automatically 24 | #redhat_owner_id: "309956199498" 25 | 26 | # Use ami_id if you know the specific ami you want to use, note the ami 27 | # below refers to an ami in ca-central-1, it's unlikely you have access 28 | # to it so change this variable to something appropriate. Use the AWS console 29 | # to locate a suitable image for RHEL 7.5 (for OCP) or Centos 7.5 (for Origin) 30 | ami_id: "ami-fc20ad98" 31 | 32 | # Whether to install cloudforms 33 | install_cloudforms: false 34 | # Whether to install metrics 35 | install_metrics: false 36 | # Whether to install prometheus 37 | install_prometheus: false 38 | # Whether to install logging 39 | install_logging: false 40 | # Whether to install grafana, make sure to install prometheus too 41 | # Note grafana playbook currently broken in OCP, do not set to true 42 | # See https://github.com/openshift/openshift-ansible/issues/7375 43 | install_grafana: false 44 | 45 | # Uncomment and set password to set passwd for ec2-user 46 | # which can then be used to login into cockpit. Please be 47 | # careful and do not check this file into github. In fact 48 | # just use the -e option when running ansible to set this 49 | # password. 50 | # cockpit_password: "changeme!" 51 | 52 | # Users to create in OpenShift, highly recommend changing password 53 | # Set admin to true for users you want to be the cluster administrator 54 | # Any admin users are also added to master linux VM for cockpit 55 | users: 56 | - {name: admin, password: changeme!, admin: true} 57 | 58 | # Generates a set of non-admin users that can be used for workshops, 59 | # demos, etc. Users are created with the prefix plus a number using 60 | # the password specified 61 | create_generic_user: true 62 | generic_user_prefix: developer 63 | generic_user_count: 5 64 | generic_user_password: changeme! 65 | 66 | ##################################################################### 67 | # SSL configuration - Optional 68 | ##################################################################### 69 | 70 | # Option 1: Install SSL certificates for the master that you provide or 71 | # were previously generated by Option 2 72 | # master_ssl_cert_file: "certs/letsencrypt/ocplab.com/fullchain.pem" 73 | # master_ssl_key_file: "certs/letsencrypt/ocplab.com/privkey.pem" 74 | # wildcard_ssl_fullchain_file: "certs/letsencrypt/apps.ocplab.com/fullchain.pem" 75 | # wildcard_ssl_cert_file: "certs/letsencrypt/apps.ocplab.com/fullchain.pem" 76 | # wildcard_ssl_key_file: "certs/letsencrypt/apps.ocplab.com/privkey.pem" 77 | 78 | 79 | # Option 2: Use let's encrypt to generate certs for master and hawkular 80 | # note certs will be fetched to location where script is run to back 81 | # them up and possible re-use 82 | use_lets_encrypt: false 83 | # e-mail address that owns the certs generated by letsencrypt 84 | domain_email_address: "me@me.com" 85 | 86 | ##################################################################### 87 | # Variables you may optionally change for your installation 88 | ##################################################################### 89 | 90 | # Variables for node configuration 91 | bastion_ami_size: "t2.small" 92 | master_ami_size: "m4.xlarge" 93 | node_ami_size: "t2.large" 94 | master_root_volume_size: 50 95 | node_root_volume_size: 30 96 | docker_volume_size: 50 97 | 98 | # The number of application nodes in the cluster 99 | app_nodes: 3 100 | 101 | # The type of OpenShift deployment, must be either origin or openshift-enterprise. 102 | # To install OpenShift Enterprise, you must have a valid OpenShift subscription 103 | # with Red Hat and provide your subscription login and pool 104 | deployment_type: "openshift-enterprise" 105 | 106 | # Only needed for Origin 107 | # openshift_branch: "release-3.6"ocplab.com 108 | 109 | # Only needed for Container Platform 110 | ocp_version: "3.10" 111 | 112 | # If true, openshift will be installed with a default node selector set to region=primary 113 | # to prevent applications from running on the master 114 | install_node_selector: true 115 | 116 | # The network plugin use, uses the flat plugin by default 117 | os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet" 118 | # os_sdn_network_plugin_name: "redhat/openshift-ovs-multitenant" 119 | # os_sdn_network_plugin_name: "redhat/openshift-ovs-networkpolicy" 120 | 121 | ##################################################################### 122 | # This playbook can optionally install CNS (Gluster) for applications. 123 | # It will use the first three app nodes as the gluster nodes, all 124 | # infra components requiring storage will use gluster instead of 125 | # AWS. 126 | # 127 | # Also note that the AWS cloud provider will be disabled if installing 128 | # gluster. 129 | # 130 | # Note some changes were made to support 3.9, this is may not be useable 131 | # in 3.7. 132 | # 133 | # Note when using gluster make sure to increase the app nodes in node_ami_size 134 | # to have 4 vCPU and 16 GB memory at a minimum, say a t2.xlarge or m4.xlarge. 135 | # 136 | ##################################################################### 137 | install_gluster: false 138 | # Total storage available to OpenShift from gluster 139 | gluster_volume_size: 300 140 | # The block volume size, should be less then gluster_volume_size but big 141 | # enough to host metrics and logging blocks 142 | gluster_block_host_volume_size: 150 143 | 144 | ##################################################################### 145 | # Variables you do not want to change unless you are sure you know 146 | # what you are doing 147 | ##################################################################### 148 | 149 | # metrics volume size, if using gluster and change make sure it fits in block 150 | # volume size 151 | metrics_volume_size: 50 152 | 153 | # logging volume size, if using gluster and change make sure it fits in block 154 | # volume size 155 | logging_volume_size: 50 156 | 157 | # Registry volume size, only used when gluster is used 158 | registry_volume_size: 50 159 | 160 | # namespace is used to uniquely differentiate the stack 161 | namespace: "openshift" 162 | # dns_zone is private zone, created and deleted by scripts 163 | dns_zone: "ose.local" 164 | 165 | vpc_cidr: "10.0.0.0/16" 166 | subnet_cidr: "10.0.1.0/24" 167 | 168 | amazon_user: "ec2-user" 169 | 170 | # DEPRECATED: This is no longer configurable in 3.10. 171 | htpasswd_path: "/etc/origin/master/htpasswd" 172 | 173 | # epel repo url, needed for certbot and lets encrypt 174 | epel_repo_url: "https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm" 175 | 176 | pyopenssl_url: "http://cbs.centos.org/kojifiles/packages/pyOpenSSL/16.2.0/3.el7/noarch/python2-pyOpenSSL-16.2.0-3.el7.noarch.rpm" 177 | 178 | # If true, the service catalog will be disabled 179 | disable_service_catalog: false 180 | -------------------------------------------------------------------------------- /roles/openshift-install/files/openshift_inventory.cfg: -------------------------------------------------------------------------------- 1 | # Create an OSEv3 group that contains the masters and nodes groups 2 | [OSEv3:children] 3 | masters 4 | nodes 5 | {% if install_gluster %} 6 | glusterfs 7 | {% endif %} 8 | 9 | # Set variables common for all OSEv3 hosts 10 | [OSEv3:vars] 11 | 12 | # SSH user, this user should allow ssh based auth without requiring a password 13 | ansible_ssh_user=ec2-user 14 | 15 | # If ansible_ssh_user is not root, ansible_become must be set to true 16 | ansible_become=true 17 | 18 | deployment_type={{deployment_type}} 19 | # Enable ntp 20 | openshift_clock_enabled=true 21 | # Network plugin 22 | os_sdn_network_plugin_name='{{os_sdn_network_plugin_name}}' 23 | 24 | # Enable cockpit 25 | osm_use_cockpit=true 26 | # Set cockpit plugins 27 | osm_cockpit_plugins=['cockpit-kubernetes','cockpit-dashboard'] 28 | 29 | # To fix bug: https://bugzilla.redhat.com/show_bug.cgi?id=1588435 30 | oreg_url=registry.access.redhat.com/openshift3/ose-${component}:${version} 31 | 32 | {% if install_gluster %} 33 | # Install CNS 34 | openshift_storage_glusterfs_namespace=glusterfs 35 | openshift_storage_glusterfs_name=storage 36 | openshift_storage_glusterfs_storageclass_default=true 37 | openshift_storage_glusterfs_block_storageclass=true 38 | openshift_storage_glusterfs_block_host_vol_size={{gluster_block_host_volume_size}} 39 | openshift_storage_glusterfs_heketi_wipe=true 40 | openshift_storage_glusterfs_wipe=true 41 | 42 | # Automatically deploy the registry using glusterfs 43 | openshift_hosted_manage_registry=true 44 | openshift_hosted_registry_storage_kind=glusterfs 45 | openshift_hosted_registry_storage_volume_size={{registry_volume_size}}Gi 46 | # openshift_hosted_registry_selector='region=infra' 47 | 48 | # Don't set AWS as default storage provider when using gluster 49 | openshift_storageclass_default=false 50 | {% endif %} 51 | 52 | openshift_master_dynamic_provisioning_enabled=true 53 | dynamic_volumes_check=False 54 | 55 | # Set cloud provider to AWS 56 | openshift_cloudprovider_kind=aws 57 | openshift_clusterid={{namespace}} 58 | openshift_cloudprovider_aws_access_key={{ lookup('env','AWS_ACCESS_KEY_ID') }} 59 | openshift_cloudprovider_aws_secret_key={{ lookup('env','AWS_SECRET_ACCESS_KEY') }} 60 | openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false', 'zone': '{{availability_zone}}'} 61 | 62 | # We need a wildcard DNS setup for our public access to services 63 | openshift_master_default_subdomain={{public_subdomain_prefix}}.{{public_dns_zone}} 64 | 65 | # Comment the following to enable htpasswd authentication; defaults to DenyAllPasswordIdentityProvider 66 | openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] 67 | 68 | {% if install_cloudforms %} 69 | # CloudForms, see: https://docs.openshift.com/container-platform/3.9/install_config/cfme/role_variables.html 70 | openshift_management_install_management=true 71 | openshift_management_install_beta=true 72 | openshift_management_app_template=cfme-template 73 | openshift_management_template_parameters={'APPLICATION_MEM_REQ': '3000Mi', 'POSTGRESQL_MEM_REQ': '1Gi', 'ANSIBLE_MEM_REQ': '512Mi', 'APPLICATION_DOMAIN': 'cloudforms.{{public_subdomain_prefix}}.{{public_dns_zone}}'} 74 | openshift_management_storage_class=cloudprovider 75 | {% endif %} 76 | 77 | {% if install_metrics %} 78 | # Metrics, see: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html 79 | openshift_metrics_install_metrics=true 80 | openshift_metrics_cassandra_storage_type=dynamic 81 | openshift_metrics_cassandra_pvc_size={{metrics_volume_size}}Gi 82 | {% if ocp_version|version_compare('3.9', '<=') %} 83 | openshift_metrics_hawkular_nodeselector={'region':'infra'} 84 | openshift_metrics_heapster_nodeselector={'region':'infra'} 85 | openshift_metrics_cassandra_nodeselector={'region':'infra'} 86 | {% else %} 87 | openshift_metrics_hawkular_nodeselector={'node-role.kubernetes.io/infra':'true'} 88 | openshift_metrics_heapster_nodeselector={'node-role.kubernetes.io/infra':'true'} 89 | openshift_metrics_cassandra_nodeselector={'node-role.kubernetes.io/infra':'true'} 90 | {% endif %} 91 | {% if install_gluster %} 92 | openshift_metrics_cassandra_pvc_storage_class_name=glusterfs-storage-block 93 | {% endif %} 94 | {% endif %} 95 | 96 | {% if install_logging %} 97 | # Logging, see: https://docs.openshift.com/enterprise/latest/install_config/aggregate_logging.html 98 | openshift_logging_install_logging=true 99 | openshift_logging_es_pvc_dynamic=true 100 | openshift_logging_es_pvc_size={{logging_volume_size}}Gi 101 | {% if ocp_version|version_compare('3.9', '<=') %} 102 | openshift_logging_curator_nodeselector={'region':'infra'} 103 | openshift_logging_es_nodeselector={'region':'infra'} 104 | openshift_logging_kibana_nodeselector={'region':'infra'} 105 | {% else %} 106 | openshift_logging_curator_nodeselector={'node-role.kubernetes.io/infra':'true'} 107 | openshift_logging_es_nodeselector={'node-role.kubernetes.io/infra':'true'} 108 | openshift_logging_kibana_nodeselector={'node-role.kubernetes.io/infra':'true'} 109 | {% endif %} 110 | openshift_logging_es_memory_limit=4G 111 | {% if install_gluster %} 112 | openshift_logging_es_pvc_storage_class_name=glusterfs-storage-block 113 | {% endif %} 114 | {% endif %} 115 | 116 | {% if install_prometheus %} 117 | # Prometheus 118 | openshift_hosted_prometheus_deploy=true 119 | openshift_prometheus_node_selector={'region':'infra'} 120 | # Workaround for BZ https://bugzilla.redhat.com/show_bug.cgi?id=1549936 121 | openshift_prometheus_node_exporter_image_version=v{{ocp_version}} 122 | # Workaround for BZ https://bugzilla.redhat.com/show_bug.cgi?id=1563888 123 | openshift_node_open_ports=[{"service":"Prometheus Node Exporter", "port":"9100/tcp"},{"service":"Prometheus Router Stats", "port":"1936/tcp"}] 124 | 125 | # Prometheus storage 126 | openshift_prometheus_storage_access_modes=['ReadWriteOnce'] 127 | openshift_prometheus_storage_type=pvc 128 | openshift_prometheus_storage_volume_name=prometheus 129 | openshift_prometheus_storage_volume_size=10Gi 130 | 131 | # Prometheus AlertManager storage 132 | openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce'] 133 | openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager 134 | openshift_prometheus_alertmanager_storage_volume_size=10Gi 135 | openshift_prometheus_alertmanager_storage_type='pvc' 136 | 137 | # Prometheus AlertBuffer storage 138 | openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce'] 139 | openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer 140 | openshift_prometheus_alertbuffer_storage_volume_size=10Gi 141 | openshift_prometheus_alertbuffer_storage_type='pvc' 142 | 143 | openshift_prometheus_sc_name="gp2" 144 | openshift_prometheus_alertmanager_sc_name="gp2" 145 | openshift_prometheus_alertbuffer_sc_name="gp2" 146 | {% endif %} 147 | 148 | {% if install_grafana %} 149 | openshift_grafana_storage_type=dynamic 150 | openshift_grafana_dashboards={'openshift-cluster-monitoring.json','node-exporter-full-dashboard.json'} 151 | {% endif %} 152 | 153 | {% if master_ssl_key_file is defined and master_ssl_key_file is defined %} 154 | openshift_master_named_certificates=[{"certfile":"/home/{{amazon_user}}/certs/{{public_master_dns}}/{{ master_ssl_cert_file | basename }}","keyfile":"/home/{{amazon_user}}/certs/{{public_master_dns}}/{{master_ssl_key_file | basename }}"}] 155 | openshift_master_overwrite_named_certificates=true 156 | {% endif %} 157 | 158 | {% if wildcard_ssl_key_file is defined and wildcard_ssl_cert_file is defined and wildcard_ssl_fullchain_file is defined %} 159 | openshift_hosted_router_certificate={"certfile":"/home/{{amazon_user}}/certs/{{public_subdomain_prefix}}.{{public_dns_zone}}/{{ wildcard_ssl_cert_file | basename }}","keyfile":"/home/{{amazon_user}}/certs/{{public_subdomain_prefix}}.{{public_dns_zone}}/{{wildcard_ssl_key_file | basename }}","cafile": "/home/{{amazon_user}}/certs/{{public_subdomain_prefix}}.{{public_dns_zone}}/{{ wildcard_ssl_fullchain_file | basename }}"} 160 | {% endif %} 161 | 162 | {% if install_node_selector and ocp_version|version_compare('3.9', '<=') %} 163 | # Do not run apps on master, note selector is set by default in 3.10 164 | osm_default_node_selector="region=primary" 165 | {% endif %} 166 | 167 | {% if disable_service_catalog %} 168 | openshift_enable_service_catalog=false 169 | {% endif %} 170 | 171 | # Disable checks 172 | openshift_disable_check=docker_storage,docker_storage_driver,memory_availability,package_version 173 | 174 | # Create the masters host group 175 | [masters] 176 | {{master_private_dns_name}} openshift_public_hostname={{public_master_dns}} openshift_schedulable=true 177 | 178 | # Host group for etcd 179 | [etcd] 180 | {{master_private_dns_name}} 181 | 182 | # Host group for 3.9 and less nodes, includes region info 183 | {% if ocp_version|version_compare('3.9', '<=') %} 184 | [nodes] 185 | {{master_private_dns_name}} openshift_node_labels="{'region': 'infra', 'zone': 'default'}" 186 | {% for node in nodes %} 187 | {{node.private_dns_name}} openshift_node_labels="{'region': 'primary', 'zone': '{{ 'west' if loop.index is divisibleby 2 else 'east'}}'}" 188 | {% endfor %} 189 | {% else %} 190 | [nodes] 191 | {{master_private_dns_name}} openshift_node_group_name='node-config-master-infra' 192 | {% for node in nodes %} 193 | {{node.private_dns_name}} openshift_node_group_name='node-config-compute' 194 | {% endfor %} 195 | {% endif %} 196 | 197 | {% if install_gluster %} 198 | [glusterfs] 199 | {% for node in nodes %} 200 | {% if loop.index < 4 %} 201 | {{node.private_dns_name}} glusterfs_ip={{node.private_ip}} glusterfs_zone={{loop.index}} glusterfs_devices="[ '/dev/xvdc' ]" 202 | {% endif %} 203 | {% endfor %} 204 | {% endif %} --------------------------------------------------------------------------------