├── LICENSE ├── README.md ├── adhoc-convert-from-puppet.yml ├── adhoc-convert-rhel.yml ├── adhoc-convert-stream.yml ├── adhoc-create-keytab-secret.yml ├── adhoc-deploy-ci-seamicro.yml ├── adhoc-deploy-kvm-guest.yml ├── adhoc-deploy-ocp-aws.yml ├── adhoc-deploy-one-vm.yml ├── adhoc-deploy-qa-kvm-guest.yml ├── adhoc-deprovision-ci-seamicro.yml ├── adhoc-deprovision-ec2-ci.yml ├── adhoc-deprovision-onevm-ci.yml ├── adhoc-grant-access.yml ├── adhoc-host-getvars.yml ├── adhoc-init-node.yml ├── adhoc-ipmi-poweroff.yml ├── adhoc-ipmi-poweron.yml ├── adhoc-ipmi-reset-pxe.yml ├── adhoc-ipmi-reset.yml ├── adhoc-kojid-upgrade.yml ├── adhoc-node-pdns-modify.yml ├── adhoc-ocp-create-pv.yml ├── adhoc-ocp-deploy-jenkins-for-ci-tenant.yml ├── adhoc-ocp-deploy-jenkins.yml ├── adhoc-openshift-auth.yml ├── adhoc-openshift-pv.yml ├── adhoc-openshift-resources.yaml ├── adhoc-provision-ec2-ci.yml ├── adhoc-provision-node-qa.yml ├── adhoc-provision-node.yml ├── adhoc-provision-ocp4-kvm-guest.yml ├── adhoc-provision-ocp4-node.yml ├── adhoc-provision-onevm-ci.yml ├── adhoc-refresh-facts.yml ├── adhoc-remove-mirror-stream-role.yml ├── adhoc-remove-node.yml ├── adhoc-reset-vm.yml ├── adhoc-seamicro-poweron.yml ├── adhoc-sshd_sign_host_key.yml ├── adhoc-update-phpbb.yml ├── adhoc-update-reboot.yml ├── adhoc-update-zabbix.yml ├── adhoc-upgrade-jenkins.yml ├── adhoc-upgrade-wordpress.yml ├── adhoc_install_rhcos_stg.yml ├── collections-ci.in ├── collections-dev.in ├── collections-duffy.in ├── collections-production.in ├── collections-staging.in ├── collections-stream.in ├── deploy-pagure-repospanner.yml ├── deploy-reimzul.yml ├── files └── file ├── handlers └── main.yml ├── requirements-ci.yml ├── requirements-dev.yml ├── requirements-duffy.yml ├── requirements-production.yml ├── requirements-staging.yml ├── requirements-stream.yml ├── role-all.yml ├── role-ansible-host.yml ├── role-artifacts-srv.yml ├── role-baseline.yml ├── role-bind.yml ├── role-boot-server.yml ├── role-cachethq.yml ├── role-centbot.yml ├── role-centos-backup.yml ├── role-certbot.yml ├── role-debuginfod.yml ├── role-distgit-lookaside.yml ├── role-duffy.yml ├── role-fedmsg.yml ├── role-fedora-messaging-client.yml ├── role-geoip2.yml ├── role-gitea.yml ├── role-gitlab-runner.yml ├── role-haproxy.yml ├── role-httpd-armv7.yml ├── role-httpd-centosproject.yml ├── role-httpd-compose.yml ├── role-httpd-console-qa.yml ├── role-httpd-docs-infra.yml ├── role-httpd-docs-sigs.yml ├── role-httpd-docs.yml ├── role-httpd-feeds.yml ├── role-httpd-git-websitent-content.yml ├── role-httpd-internal-mirrorlist.yml ├── role-httpd-people.yml ├── role-httpd-reposnap.yml ├── role-httpd-shared-dir.yml ├── role-httpd-www-staging.yml ├── role-httpd-www.yml ├── role-httpd.yml ├── role-ipa-client.yml ├── role-ipsilon.yml ├── role-iptables.yml ├── role-iscsid-target.yml ├── role-jenkins-server.yml ├── role-kanboard.yml ├── role-keepalived.yml ├── role-koji-client.yml ├── role-kojibot.yml ├── role-kojid.yml ├── role-kojifiles.yml ├── role-kojihub.yml ├── role-krb5-client.yml ├── role-kvm-host.yml ├── role-lsyncd.yml ├── role-mailman.yml ├── role-mailman3.yml ├── role-mantisbt.yml ├── role-mbs.yml ├── role-mirmon.yml ├── role-mirror-buildlogs.yml ├── role-mirror-cloud.yml ├── role-mirror-debuginfo.yml ├── role-mirror-master.yml ├── role-mirror-qa.yml ├── role-mirror-vault.yml ├── role-mirror.yml ├── role-mirrorlist.yml ├── role-moin.yml ├── role-mqtt.yml ├── role-mysql.yml ├── role-nfs-server.yml ├── role-ocp-admin-node.yml ├── role-odcs-backend.yml ├── role-odcs-frontend.yml ├── role-opennebula-frontend.yml ├── role-opennebula-kvm-host.yml ├── role-opentracker.yml ├── role-pagure.yml ├── role-pdns-pipe.yml ├── role-phpbb.yml ├── role-planet.yml ├── role-podman-host.yml ├── role-postfix.yml ├── role-postgresql.yml ├── role-redis.yml ├── role-reimzul-bstore.yml ├── role-reimzul-builder.yml ├── role-reimzul-controller.yml ├── role-repospanner.yml ├── role-restic.yml ├── role-robosignatory.yml ├── role-rsnapshot.yml ├── role-rsyncd.yml ├── role-smtp-relay.yml ├── role-sshd.yml ├── role-stikked.yml ├── role-stylo.yml ├── role-sync2git.yml ├── role-sync2s3.yml ├── role-tinyproxy.yml ├── role-ucarp.yml ├── role-unbound.yml ├── role-vbox-host.yml ├── role-vdo-host.yml ├── role-vsftpd.yml ├── role-wp.yml ├── role-zabbix-agent.yml ├── role-zabbix-proxy.yml ├── role-zabbix-server.yml ├── tasks └── main.yml ├── templates ├── ansible-hostvars.j2 ├── ansible-virt-install-ocp.j2 ├── ansible-virt-install.j2 ├── convert-stream-8 ├── convert-to-rhel ├── empty.cfg.j2 ├── install-config.yaml.j2 ├── kickstarts │ ├── centos-10-stream-ks.cfg.j2 │ ├── centos-7-ks.cfg.j2 │ ├── centos-8-ks.cfg.j2 │ ├── centos-8-stream-ks.cfg.j2 │ ├── centos-9-stream-ks.cfg.j2 │ ├── ci-centos-7-ks.j2 │ ├── ci-centos-8-ks.j2 │ ├── ci-centos-8-stream-ks.j2 │ ├── ci-centos-9-stream-ks.j2 │ ├── kvm-guest-c10-stream-ks.j2 │ ├── kvm-guest-c7-ks.j2 │ ├── kvm-guest-c8-ks.j2 │ ├── kvm-guest-c8-stream-ks.j2 │ ├── kvm-guest-c9-stream-ks.j2 │ ├── kvm-guest-rhel8-ks.j2 │ ├── kvm-guest-rhel9-ks.j2 │ ├── rhel-8-ks.cfg.j2 │ └── rhel-9-ks.cfg.j2 ├── ocp-treeinfo.j2 ├── ocp4.3_install-config.yaml.j2 ├── ocp_pxeboot.j2 ├── openshift-idp.yml ├── openshift-pv-storage │ ├── persistent-volume.json.j2 │ └── pv.yml.j2 ├── openshift-resources.j2 ├── openshift │ ├── jenkins-ci-workspace.env.j2 │ ├── jenkins-ci-workspace.yml │ └── jenkins-workspace.yml ├── puppet-cron-compare.j2 ├── pxe-grub.cfg.j2 ├── pxeboot.j2 └── sudofile.j2 └── vars └── main.yml /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 CentOS Project 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | 23 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # CentOS.org Ansible Infra playbooks 2 | 3 | Just a placeholder for the Ansible playbooks used in the CentOS Infrastructure. 4 | Mainly divided into : 5 | 6 | * playbooks only including role (and so applied based on group membership) 7 | * ad-hoc tasks playbooks, called on demand when needed 8 | 9 | ## Contributing to Ansible infra (playbooks or roles) 10 | When you want to contribute to playbooks or roles, you should always open a merge request (PR) against `staging` branch and not `master` branch. 11 | One reviewer from the correct org will then get notification and will discuss/review your PR and eventually guide you. 12 | Ideally just look at the common way roles are organised, to reuse other roles and convention. 13 | Always have default variables for *Everything*, with safe default values (of course never the ones deployed for staging/prod) 14 | When proposing a change in the behaviour, always make that change a opt-in, that defaults to "no" (safest) so that only that change would be applied on other nodes *if* variable used to include that task would be turned on. Of course we can have on real needs a default to `True` if we know that such change would need to be replicated by default on all nodes controlled by Ansible and using that role. 15 | 16 | ## Naming convention 17 | ### Roles (regular playbooks used at regular interval) 18 | The playbooks that will be played for roles will start with `role-` 19 | A all-in-one roles-all.yml will just include all the role-.yml when we want to just ensure the whole infra is configured the way it should. 20 | Each playbook for a role target a group called `hostgroup-role-`. 21 | 22 | There a small exceptions where some role- playbooks will be small variants of a role, so also with other tasks to call specific tasks for an existing role (so when for example a vhost for httpd is a variant of the httpd role) 23 | 24 | #### "pre-flight" check 25 | For each playbook configuring a role, there is an option (in case of) to end the play if we have to. 26 | Basically touching /etc/no-ansible on a managed node would ensure that the playbook is ended. That permits to have (in emergency for example) someone having a look at a node and ensuring that ansible doesn't modify the node at the same time. After each role configuration, a file is also created (monitored by Zabbix) to ensure that nodes are always configured as they have to 27 | 28 | 29 | ### Deploy (on demand/triggered) 30 | Deploy playbooks (can combine also other playbooks) can be named `deploy-` 31 | 32 | 33 | ### Ad-Hoc tasks (on demand/triggered) 34 | Simple ad-hoc playbooks can just be named/start with `adhoc-`. 35 | Those specific playbooks can need some tasks/vars/handlers, so for those special ones (as each role has it own set) we'll include those in the same repository, but it's up to the process deploying those for the ansible-host role to setup correctly the needed symlinks for the normal hierarchy. 36 | 37 | ## Complete needed structure (needed on ansible mgmt node) 38 | The "on-disk" ansible directory should then look like this : 39 | 40 | ``` 41 | . 42 | ├── ansible.cfg 43 | ├── files -> playbooks/files 44 | ├── handlers -> playbooks/handlers 45 | ├── filestore 46 | ├── inventory 47 | ├── pkistore 48 | ├── playbooks 49 | │   ├── files 50 | │   ├── handlers 51 | │   ├── requirements.yml 52 | │   └── vars 53 | │   └── templates 54 | ├── roles 55 | │   ├── 56 | └── templates -> playbooks/templates 57 | └── vars -> playbooks/vars 58 | 59 | ``` 60 | 61 | ## Ansible roles setup 62 | All roles will be deployed for a list of individual git repositories, each one being its own role. 63 | A requirements.yml file will be used to declare which roles (and from where to get them) and so downloaded on the ansible host through ansible-galaxy 64 | 65 | ## Inventory and encrypted files 66 | Inventory is itself a different git repository, git-crypted and that will be checked-out on the ansible host 67 | Same for the two following git (crypted) repositories: 68 | * pkistore (holding some PKI key/certs) 69 | * filestore (holding some other files/secrets that aren't templates but that should be crypted/non public, so not in roles either) 70 | 71 | ## License 72 | MIT (see [LICENSE file](LICENSE) ) 73 | 74 | -------------------------------------------------------------------------------- /adhoc-convert-from-puppet.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: adhoc-sshd_sign_host_key.yml 3 | - import_playbook: adhoc-host-getvars.yml 4 | 5 | - hosts: all 6 | become: True 7 | 8 | tasks: 9 | - name: Stopping puppet 10 | service: 11 | name: puppet 12 | state: stopped 13 | enabled: no 14 | 15 | - name: Clearing puppet classes.txt (was checked by Zabbix in default template) 16 | file: 17 | path: /var/lib/puppet/classes.txt 18 | state: absent 19 | 20 | - name: Deleting old wrapper scripts used by puppet 21 | file: 22 | path: "/usr/local/bin/{{ item }}" 23 | state: absent 24 | with_items: 25 | - areca-cli 26 | - centos-updates 27 | - build_firewall 28 | - build_firewall6 29 | - zabbix-check-cloud-status.sh 30 | - zabbix-check-debuginfo-status.sh 31 | - zabbix-check-eth-settings.sh 32 | - zabbix-check-iptables.sh 33 | - zabbix-check-msync-status.sh 34 | - zabbix-check-ro.sh 35 | - zabbix-check-vault-status.sh 36 | - zabbix-hw-raid-check.sh 37 | - zabbix-mdstat-check.sh 38 | - zabbix-mirmon-check.sh 39 | - zabbix-os-stats.sh 40 | - zabbix-pdns-stats.sh 41 | - zabbix-raid-areca-check.sh 42 | 43 | - name: Deleting old iptables setup from puppet 44 | file: 45 | path: "/etc/sysconfig/{{ item }}" 46 | state: absent 47 | with_items: 48 | - iptables.d 49 | - ip6tables.d 50 | 51 | - name: Distributing cron compare script 52 | template: 53 | src: puppet-cron-compare.j2 54 | dest: /var/tmp/puppet-cron-compare 55 | mode: 0750 56 | owner: root 57 | 58 | - name: Merging cron users list if needed 59 | set_fact: 60 | puppet_cron_users: "{{ puppet_cron_users + [ 'centos' ] }}" 61 | when: "'msync-nodes' in group_names or 'cloud-nodes' in group_names or 'vault-nodes' in group_names or 'debuginfo-nodes' in group_names or 'buildlogs-nodes' in group_names" 62 | 63 | - name: Saving previous users crontab 64 | shell: "egrep -v '^#' /var/spool/cron/{{ item }} > /root/backup-crontab-{{ item }}" 65 | args: 66 | creates: "/root/backup-crontab-{{ item }}" 67 | with_items: "{{ puppet_cron_users }}" 68 | 69 | - name: Clearing now puppet cron jobs 70 | shell: "test -e /var/spool/cron/{{ item }} && su -c 'crontab -r' - {{ item }} || /bin/true" 71 | with_items: "{{ puppet_cron_users }}" 72 | 73 | # Just the new baseline everywhere 74 | - import_playbook: role-baseline.yml 75 | # And now the roles based on inventory match / group membership 76 | - import_playbook: role-all.yml 77 | 78 | # Now that Ansible ran, let's verify if we have a diff in cron jobs removed/applied back (in case of manual undocumented jobs) 79 | - hosts: all 80 | become: True 81 | 82 | tasks: 83 | - name: Verifying if we have same number of cron jobs 84 | shell: "/var/tmp/puppet-cron-compare {{ item }}" 85 | with_items: 86 | - root 87 | - centos 88 | register: "cron_output" 89 | 90 | - name: Sending mail report if issues 91 | mail: 92 | from: "{{ ansible_mail_from }}" 93 | to: "{{ ansible_mail_to }}" 94 | subject: "[CentOS Ansible] Issues found when converting {{ inventory_hostname }}" 95 | body: "Cron jobs number is different for user {{ item.item }}. Please check backup files (/root/backup-crontab-{{ item.item }})" 96 | delegate_to: "{{ ansible_mail_srv }}" 97 | with_items: "{{ cron_output.results }}" 98 | when: "'Difference' in item.stdout" 99 | loop_control: 100 | label: "{{ item.item }}" 101 | -------------------------------------------------------------------------------- /adhoc-convert-rhel.yml: -------------------------------------------------------------------------------- 1 | # Convert CentOS Stream 8 nodes to RHEL 8 2 | --- 3 | - hosts: "{{ target }}" 4 | vars_prompt: 5 | - name: "target" 6 | prompt: "Host[s]/Group[s] to convert from Stream to RHEL ? => " 7 | private: no 8 | become: True 9 | gather_facts: True 10 | 11 | 12 | tasks: 13 | 14 | - import_role: 15 | name: baseline 16 | tasks_from: rhel 17 | when: 18 | - ansible_distribution_major_version == '8' 19 | - ansible_distribution == 'CentOS' 20 | 21 | - block: 22 | - name: Ensuring we can drop some TLS cdn files 23 | file: 24 | path: "{{ item }}" 25 | state: directory 26 | with_items: 27 | - /etc/rhsm/ca 28 | - /etc/pki/entitlement 29 | 30 | - name: Ensuring we have correct RHEL gpg pub key 31 | copy: 32 | src: "{{ item.file }}" 33 | dest: "{{ item.dest }}" 34 | loop: 35 | - { file: '{{ pkistore }}/rpm/redhat-uep.pem', dest: '/etc/rhsm/ca/redhat-uep.pem' } 36 | - { file: '{{ pkistore }}/rpm/RPM-GPG-KEY-redhat-release', dest: '/etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release' } 37 | 38 | - name: Importing convert shell script 39 | template: 40 | src: convert-to-rhel 41 | dest: /var/tmp/convert-to-rhel 42 | mode: 0750 43 | owner: root 44 | 45 | - name: Converting to RHEL 8 46 | command: 47 | cmd: /var/tmp/convert-to-rhel 48 | register: result 49 | changed_when: result.rc == 2 50 | failed_when: result.rc == 1 51 | when: 52 | - ansible_distribution == 'CentOS' 53 | - ansible_distribution_major_version == '8' 54 | -------------------------------------------------------------------------------- /adhoc-convert-stream.yml: -------------------------------------------------------------------------------- 1 | # Convert CentOS 8 nodes to CentOS Stream 8 2 | --- 3 | - hosts: all 4 | become: True 5 | 6 | tasks: 7 | - block: 8 | - name: Importing convert shell script 9 | template: 10 | src: convert-stream-8 11 | dest: /var/tmp/convert-stream-8 12 | mode: 0750 13 | owner: root 14 | 15 | - name: Converting to stream 8 16 | command: 17 | cmd: /var/tmp/convert-stream-8 18 | register: result 19 | changed_when: result.rc == 2 20 | failed_when: result.rc == 1 21 | when: ansible_distribution_major_version == '8' 22 | -------------------------------------------------------------------------------- /adhoc-create-keytab-secret.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ############## 3 | # 4 | # Extra variables need to be passed to this playbook at runtime to create the 5 | # secret which will contain the keytab for the group sync authoprization operator 6 | # 7 | # centosci_ocp_api_token_stg 8 | # centosci_ocp_api_host_stg e.g. https://api.ocpci.os.stg.ci.centos.org:6443 9 | # 10 | ############## 11 | 12 | 13 | - name: Create openshift secret containing keytab 14 | hosts: localhost 15 | 16 | tasks: 17 | 18 | - name: Create the CentosCIAuthorization operator keytab k8s Secret staging 19 | community.okd.k8s: 20 | api_key: "{{ centosci_ocp_api_token_stg }}" 21 | host: "{{ centosci_ocp_api_host_stg }}" 22 | validate_certs: no 23 | state: present 24 | definition: 25 | apiVersion: v1 26 | kind: Secret 27 | metadata: 28 | name: "centos-ci-keytab-secret" 29 | namespace: "centos-ci-authorization-operator" 30 | data: 31 | centos-ci-authorization-keytab: 32 | "{{ centosci_authorization_keytab_file | b64encode }}" 33 | vars: 34 | centosci_authorization_keytab_file: 35 | "{{ lookup('file', 36 | '../pkistore/aws-ocp/centos-ci-authorization-operator-stg-keytab.kt') 37 | }}" 38 | -------------------------------------------------------------------------------- /adhoc-deploy-ci-seamicro.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This ad-hoc playbook is written only to support Seamicro Chassis (now dead product) 3 | # It uses some variables like user/pass to access Chassis rest API and configure/reset compute nodes 4 | # By default, for CI purposes we are ensuring that we add a second path to chassis/fabric to present multipath and so increase available bandwdith during tests 5 | 6 | - hosts: "{{ node_name }}" 7 | gather_facts: False 8 | become: False 9 | 10 | # Some pre-defined list that will be added for 'uri' module and converted to json for rest api 11 | vars: 12 | seamicro_add_disk_body: 13 | value: "{{ seamicro_diskid }}" 14 | readonly: false 15 | username: "{{ seamicro_chassis_user }}" 16 | password: "{{ seamicro_chassis_pass }}" 17 | seamicro_reset_body: 18 | action: reset 19 | using-pxe: "true" 20 | username: "{{ seamicro_chassis_user }}" 21 | password: "{{ seamicro_chassis_pass }}" 22 | seamicro_poweroff_body: 23 | action: power-off 24 | force: "true" 25 | username: "{{ seamicro_chassis_user }}" 26 | password: "{{ seamicro_chassis_pass }}" 27 | 28 | 29 | tasks: 30 | - block: 31 | - name: "Generate tftp config [{{ centos_dist }}] for Seamicro node" 32 | template: 33 | src: pxeboot.j2 34 | dest: "/var/lib/tftpboot/pxelinux.cfg/01-{{ mac_address | lower | replace(':','-') }}" 35 | mode: 0755 36 | tags: 37 | - pxe 38 | - name: "Generate kickstart file [{{centos_dist }}] for Seamicro node[s]" 39 | template: 40 | src: "{{ item }}" 41 | dest: "/var/www/html/ks/{{ inventory_hostname }}-ks.cfg" 42 | mode: 0755 43 | with_first_found: 44 | - "../templates/kickstarts/ci-centos-{{ centos_dist }}-ks.j2" 45 | tags: 46 | - pxe 47 | - ks 48 | delegate_to: "{{ deploy_node }}" 49 | 50 | - block: 51 | - name: Deleting additional path to the Seamicro node[s] 52 | uri: 53 | url: "https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}/vdisk/{{ seamicro_vdisk_slot }}?username={{ seamicro_chassis_user }}&password={{ seamicro_chassis_pass }}" 54 | validate_certs: no 55 | method: DELETE 56 | status_code: 200,400 57 | timeout: 120 58 | 59 | - name: Wait for the disk to be removed 60 | uri: 61 | url: "https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}/vdisk/{{ seamicro_vdisk_slot }}?username={{ seamicro_chassis_user }}&password={{ seamicro_chassis_pass }}" 62 | validate_certs: no 63 | method: GET 64 | timeout: 120 65 | status_code: 200,201,404 66 | register: http_result 67 | until: http_result['status'] == 404 68 | retries: 5 69 | delay: 10 70 | 71 | - name: Adding additional path to vdisk to the Seamicro node[s] 72 | uri: 73 | url: "https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}/vdisk/{{ seamicro_vdisk_slot }}" 74 | validate_certs: no 75 | method: PUT 76 | body_format: json 77 | body: "{{ seamicro_add_disk_body | to_json }}" 78 | status_code: 201 79 | timeout: 120 80 | 81 | - name: Resetting the Seamicro node[s] 82 | uri: 83 | url: "https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}" 84 | validate_certs: no 85 | method: POST 86 | body_format: json 87 | body: "{{ seamicro_reset_body | to_json }}" 88 | timeout: 180 89 | tags: 90 | - reset 91 | 92 | - name: Waiting for Seamicro node[s] to be available through ssh 93 | wait_for: 94 | port: 22 95 | host: "{{ ip }}" 96 | timeout: 1200 97 | 98 | - name: Sending back useful information for duffy provisioner 99 | set_fact: 100 | duffy_out: 101 | nodes: 102 | - hostname: "{{ node_name }}" 103 | ipaddr: "{{ ip }}" 104 | 105 | 106 | delegate_to: localhost 107 | 108 | 109 | -------------------------------------------------------------------------------- /adhoc-deploy-kvm-guest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This Ad Hoc task will deploy a KVM virtual machine on existing KVM host 3 | # If you define a bridge network, it has to exist first of course on the kvm host, defined by the ansible-role-kvm-host role 4 | # Needed variables (per host or group inheritance): 5 | # kvm_host: The kvm host on which to deploy the guest (string) 6 | # kvm_guest_vcpus: number or cores to add to define the vm (string) 7 | # kvm_guest_memory: available RAM for the guest (string, in MB) 8 | # kvm_guest_disk_size: the qcow2 image size for the guest (string, in GB) 9 | # kvm_guest_luks_encrypted: if we want to encrypt disk (boolean : True|False) 10 | # kvm_guest_luks_passphrase: if we enabled it (see above) : the passphrase to crypt the disk 11 | # Account: 12 | # kvm_guest_root_pass: the root account password to define by default 13 | # Network settings: 14 | # kvm_guest_bridge: (boolean, True|False) : using an existing bridge on the kvm host 15 | # kvm_host_bridge: the bridge name to use if above is enabled (string, like br0) 16 | # kvm_guest_default_net: if kvm_guest_bridge is False, which other network we want to use, like 'default' (string) 17 | # kvm_guest_arch: the architecture for the guest (string, like x86_64, aarch64, ppc64le, etc) 18 | # kvm_guest_ip: ip address for the guest (string) 19 | # kvm_guest_netmask: netmask for the guest (string) 20 | # kvm_guest_gateway: gateway ip address for the guest (string) 21 | # kvm_guest_nameserver: resolver ip address to use for the guest (string) 22 | # 23 | # Operating Systems 24 | # 25 | # rhel_version: if defined at host/group level, use RHEL and that version (string) and not centos at all 26 | # centos_version: which centos os version to deploy (string, like 8, 8-stream, etc) 27 | 28 | 29 | - hosts: "{{ init_kvm_guests }}" 30 | become: true 31 | gather_facts: false 32 | vars_prompt: 33 | - name: "init_kvm_guests" 34 | prompt: "[WARNING] KVM guests to be deployed with CentOS => " 35 | private: no 36 | 37 | tasks: 38 | - name: Checking Virtual Guest exists 39 | virt: 40 | name: "{{ inventory_hostname }}" 41 | state: running 42 | ignore_errors: yes 43 | no_log: True 44 | delegate_to: "{{ kvm_host }}" 45 | register: vm_exists 46 | 47 | - name: Creating kickstart to provision VM[s] 48 | template: 49 | src: "../templates/kickstarts/kvm-guest-rhel{{ rhel_version }}-ks.j2" 50 | dest: "/var/lib/libvirt/local-kickstarts/{{ inventory_hostname }}.cfg" 51 | delegate_to: "{{ kvm_host }}" 52 | when: rhel_version is defined 53 | tags: 54 | - ks 55 | 56 | - name: Creating kickstart to provision VM[s] 57 | template: 58 | src: "../templates/kickstarts/kvm-guest-c{{ kvm_guest_distro_release }}-ks.j2" 59 | dest: "/var/lib/libvirt/local-kickstarts/{{ inventory_hostname }}.cfg" 60 | delegate_to: "{{ kvm_host }}" 61 | when: rhel_version is not defined 62 | tags: 63 | - ks 64 | 65 | - name: Creating temporary virt-install command 66 | template: 67 | src: ../templates/ansible-virt-install.j2 68 | dest: "/var/lib/libvirt/virt-install-{{ inventory_hostname }}" 69 | mode: 0750 70 | owner: root 71 | group: root 72 | delegate_to: "{{ kvm_host }}" 73 | tags: 74 | - ks 75 | 76 | - name: Provisining the Virtual Guest[s] 77 | command: "/var/lib/libvirt/virt-install-{{ inventory_hostname }}" 78 | when: vm_exists is failed 79 | delegate_to: "{{ kvm_host }}" 80 | 81 | - name: Waiting for sshd to be available on the deployed node 82 | wait_for: 83 | port: 22 84 | host: "{{ kvm_guest_ip }}" 85 | timeout: 1200 86 | delegate_to: "{{ kvm_host }}" 87 | when: vm_exists is failed 88 | 89 | - name: Ensuring we remove the templatized virt-install script 90 | file: 91 | path: "/var/lib/libvirt/virt-install-{{ inventory_hostname }}" 92 | state: absent 93 | delegate_to: "{{ kvm_host }}" 94 | -------------------------------------------------------------------------------- /adhoc-deploy-ocp-aws.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | # This playbook requires a file provided to it for vars 4 | # An example of the file exists in inventory/group_vars/openshift-ci-stg 5 | # Needed variables: 6 | # openshift_cluster_base_domain: cloud.stg.ci.centos.org # This is the Route53 delegated zone 7 | # openshift_cluster_name: ocp # sub-domain under base domain that will be the openshift cluster name 8 | # openshift_control_node_instance_type: "m5.xlarge" # EC2 instance type 9 | # openshift_control_node_replicas: 3 # number of control plane nodes 10 | # openshift_control_node_ebs_size: 200 # size in Gigabytes 11 | # openshift_control_node_ebs_type: gp3 # Better throughput than gp2 but less than io1/io2 12 | # openshift_worker_node_instance_type: "m5.xlarge" # ec2 instance type for workers/compute nodes 13 | # openshift_worker_node_replicas: 3 # number of compute ondes 14 | # openshift_worker_node_ebs_size: 200 # size in Gigabytes 15 | # openshift_worker_node_ebs_type: gp3 # Better throughput than gp2 but less than io1/io2 16 | # openshift_install_pull_secret: '' # To be retrieved from https://console.redhat.com/openshift/downloads (limited validity cert) 17 | # openshift_node_ssh_public_key: "ssh-rsa ..." # ssh pub key to login into rhcos nodes 18 | # openshift_ci_access_key: #IAM access key needed to create/deploy resources 19 | # openshift_ci_secret_key: #IAM secret key for access key above 20 | # openshift_build_path: "{{ ansible_env.HOME }}/tmp/ocp-aws-ci-stg" # Tmp and local directory created by openshift-install 21 | # openshift_pvc_machine_network: 172.25.0.0/16 # The large subnet that be splitted in new VPC. Ensure it doesn't overlap for peering/routing/vpn (it will be internal/private in vpc anyway) 22 | # 23 | 24 | # Pre-checks: 25 | # It also assumes that an empty Route53 zone exists for {{ openshift_cluster_base_domain }} (to be created first) 26 | 27 | # it creates some files locally which should be pushed to the filestore as 28 | # they contain install info needed for the cluster 29 | 30 | # how to call it : ansible-playbook playbook/adhoc-deploy-ocp-aws.yml -e '@/path/to/inventory/group_vars/cluster_name' 31 | 32 | - hosts: localhost 33 | 34 | tasks: 35 | - name: create build directory for deployment artifacts 36 | file: 37 | path: "{{ openshift_build_path }}" 38 | state: directory 39 | mode: 0700 40 | 41 | - name: download and extract openshift installer (if needed) 42 | unarchive: 43 | src: https://mirror.openshift.com/pub/openshift-v4/clients/ocp/stable/openshift-install-linux.tar.gz 44 | dest: "{{ openshift_build_path }}" 45 | remote_src: yes 46 | creates: "{{ openshift_build_path }}/openshift-install" 47 | 48 | - name: create openshift install-config.yaml file 49 | template: 50 | src: "install-config.yaml.j2" 51 | dest: "{{ openshift_build_path }}/install-config.yaml" 52 | 53 | - name: backup install-config.yaml file as install-config.yaml.backup 54 | copy: 55 | src: "{{ openshift_build_path }}/install-config.yaml" 56 | dest: "{{ openshift_build_path }}/install-config.yaml.backup" 57 | 58 | - name: Run openshift-install cli to deploy cluster 59 | command: "{{ openshift_build_path }}/openshift-install create cluster --dir={{ openshift_build_path }}" 60 | environment: 61 | AWS_ACCESS_KEY: "{{openshift_ci_access_key}}" 62 | AWS_SECRET_ACCESS_KEY: "{{openshift_ci_secret_key}}" 63 | -------------------------------------------------------------------------------- /adhoc-deploy-one-vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook can be called on demand to provision an Opennebula template/instance 3 | # It needs to be called with some variables (see below) and will only deploy one instance, and wait for ip from the opennebula network/pool to be available , including sshd port 4 | # 5 | # You can call it like this: 6 | # ansible-playbook playbooks/adhoc-deploy-one-vm.yml --extra-vars "one_controller_host=controller.domain.com one_api_username=duffy one_api_password=NiceTry one_api_url=http://localhost:2633/RPC2 one_template_id=14" 7 | # 8 | # Variables: 9 | # 10 | # one_controller_host: the opennebule frontend node (for delegation) 11 | # one_api_username: username to use auth for opennebula 12 | # one_api_password: password for that user 13 | # one_api_url: opennebula RPC url, http://localhost:2633/RPC2 is the default one 14 | # one_template_id: the 'existing' template ID (containing image, network, etc) that we want to provision 15 | 16 | 17 | - hosts: "{{ one_controller_host }}" 18 | vars: 19 | - one_api_username: "" 20 | - one_api_password: "" 21 | - one_api_url: "" 22 | - one_template_id: "" 23 | 24 | tasks: 25 | - name: Deploying Opennebula template 26 | one_vm: 27 | template_id: "{{ one_template_id }}" 28 | api_username: "{{ one_api_username }}" 29 | api_password: "{{ one_api_password }}" 30 | api_url: "{{ one_api_url }}" 31 | count: 1 32 | attributes: 33 | name: 'cico-###' 34 | register: onevm_result 35 | 36 | - debug: 37 | var: onevm_result.instances[0].networks[0].ip 38 | 39 | - name: Waiting for OneVM to be available on network 40 | wait_for: 41 | host: '{{ onevm_result.instances[0].networks[0].ip }}' 42 | port: 22 43 | delay: 5 44 | -------------------------------------------------------------------------------- /adhoc-deploy-qa-kvm-guest.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This Ad Hoc task will deploy a KVM virtual machine on existing KVM host 3 | # If you define a bridge network, it has to exist first of course on the kvm host, defined by the ansible-role-kvm-host role 4 | # Needed variables (per host or group inheritance): 5 | # kvm_host: The kvm host on which to deploy the guest (string) 6 | # kvm_guest_vcpus: number or cores to add to define the vm (string) 7 | # kvm_guest_memory: available RAM for the guest (string, in MB) 8 | # kvm_guest_disk_size: the qcow2 image size for the guest (string, in GB) 9 | # kvm_guest_luks_encrypted: if we want to encrypt disk (boolean : True|False) 10 | # kvm_guest_luks_passphrase: if we enabled it (see above) : the passphrase to crypt the disk 11 | # Account: 12 | # kvm_guest_root_pass: the root account password to define by default 13 | # Network settings: 14 | # kvm_guest_bridge: (boolean, True|False) : using an existing bridge on the kvm host 15 | # kvm_host_bridge: the bridge name to use if above is enabled (string, like br0) 16 | # kvm_guest_default_net: if kvm_guest_bridge is False, which other network we want to use, like 'default' (string) 17 | # kvm_guest_arch: the architecture for the guest (string, like x86_64, aarch64, ppc64le, etc) 18 | # kvm_guest_ip: ip address for the guest (string) 19 | # kvm_guest_netmask: netmask for the guest (string) 20 | # kvm_guest_gateway: gateway ip address for the guest (string) 21 | # kvm_guest_nameserver: resolver ip address to use for the guest (string) 22 | # 23 | # Operating Systems 24 | # 25 | # rhel_version: if defined at host/group level, use RHEL and that version (string) and not centos at all 26 | # centos_version: which centos os version to deploy (string, like 8, 8-stream, etc) 27 | 28 | 29 | - hosts: kvm-guests 30 | become: true 31 | gather_facts: false 32 | 33 | tasks: 34 | - name: Destroying Virtual Guest (if running) 35 | virt: 36 | name: "{{ inventory_hostname }}" 37 | state: destroyed 38 | ignore_errors: yes 39 | no_log: True 40 | delegate_to: "{{ kvm_host }}" 41 | 42 | - name: Undefining Virtual Guest 43 | virt: 44 | name: "{{ inventory_hostname }}" 45 | command: undefine 46 | ignore_errors: yes 47 | delegate_to: "{{ kvm_host }}" 48 | 49 | - name: Undefining UEFI VMs with nvram 50 | shell: "virsh undefine --nvram {{ inventory_hostname }}" 51 | when: 52 | - kvm_guest_arch == "aarch64" 53 | ignore_errors: yes 54 | delegate_to: "{{ kvm_host }}" 55 | 56 | - name: Creating kickstart to provision VM[s] 57 | template: 58 | src: "../templates/kickstarts/kvm-guest-rhel{{ rhel_version }}-ks.j2" 59 | dest: "/var/lib/libvirt/local-kickstarts/{{ inventory_hostname }}.cfg" 60 | delegate_to: "{{ kvm_host }}" 61 | when: rhel_version is defined 62 | tags: 63 | - ks 64 | 65 | - name: Creating kickstart to provision VM[s] 66 | template: 67 | src: "../templates/kickstarts/kvm-guest-c{{ kvm_guest_distro_release }}-ks.j2" 68 | dest: "/var/lib/libvirt/local-kickstarts/{{ inventory_hostname }}.cfg" 69 | delegate_to: "{{ kvm_host }}" 70 | when: rhel_version is not defined 71 | tags: 72 | - ks 73 | 74 | - name: Creating temporary virt-install command 75 | template: 76 | src: ../templates/ansible-virt-install.j2 77 | dest: "/var/lib/libvirt/virt-install-{{ inventory_hostname }}" 78 | mode: 0750 79 | delegate_to: "{{ kvm_host }}" 80 | tags: 81 | - ks 82 | 83 | - name: Provisining the Virtual Guest[s] 84 | command: 85 | cmd: "/var/lib/libvirt/virt-install-{{ inventory_hostname }}" 86 | timeout: 1200 87 | delegate_to: "{{ kvm_host }}" 88 | 89 | - name: Waiting for sshd to be available on the deployed node 90 | wait_for: 91 | port: 22 92 | host: "{{ kvm_guest_ip }}" 93 | timeout: 1200 94 | delegate_to: "{{ kvm_host }}" 95 | 96 | - name: Ensuring we remove the templatized virt-install script 97 | file: 98 | path: "/var/lib/libvirt/virt-install-{{ inventory_hostname }}" 99 | state: absent 100 | delegate_to: "{{ kvm_host }}" 101 | -------------------------------------------------------------------------------- /adhoc-deprovision-ci-seamicro.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This ad-hoc playbook is written only to support Seamicro Chassis (now dead product) 3 | # It uses some variables like user/pass to access Chassis rest API and configure/reset compute nodes 4 | # By default, for CI purposes we are ensuring that we add a second path to chassis/fabric to present multipath and so increase available bandwdith during tests 5 | 6 | - hosts: "{{ node_name }}" 7 | gather_facts: False 8 | become: False 9 | 10 | # Some pre-defined list that will be added for 'uri' module and converted to json for rest api 11 | vars: 12 | seamicro_add_disk_body: 13 | value: "{{ seamicro_diskid }}" 14 | readonly: false 15 | username: "{{ seamicro_chassis_user }}" 16 | password: "{{ seamicro_chassis_pass }}" 17 | seamicro_reset_body: 18 | action: reset 19 | using-pxe: "true" 20 | username: "{{ seamicro_chassis_user }}" 21 | password: "{{ seamicro_chassis_pass }}" 22 | seamicro_poweroff_body: 23 | action: power-off 24 | force: "true" 25 | username: "{{ seamicro_chassis_user }}" 26 | password: "{{ seamicro_chassis_pass }}" 27 | 28 | 29 | tasks: 30 | - block: 31 | - name: Shutting down Seamicro node[s] 32 | uri: 33 | url: "https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}" 34 | validate_certs: no 35 | method: POST 36 | body_format: json 37 | body: "{{ seamicro_poweroff_body | to_json }}" 38 | timeout: 180 39 | tags: 40 | - reset 41 | 42 | - name: Sending back useful information for duffy provisioner 43 | set_fact: 44 | duffy_out: 45 | nodes: 46 | - hostname: "{{ node_name }}" 47 | ipaddr: "{{ ip }}" 48 | 49 | 50 | delegate_to: localhost 51 | 52 | 53 | -------------------------------------------------------------------------------- /adhoc-deprovision-ec2-ci.yml: -------------------------------------------------------------------------------- 1 | # This playbook will need some variables to terminate instances, passed as variables 2 | # needed variables: 3 | # aws_ec2_region: us-east-2 4 | # aws_access_key: 5 | # aws_secret_key: 6 | # aws_ec2_instance_ids: 7 | # - i-longinstanceid 8 | # - i-otherlonginstid 9 | # 10 | --- 11 | - hosts: localhost 12 | gather_facts: False 13 | become: False 14 | 15 | tasks: 16 | - name: Delete EC2 instance[s] 17 | amazon.aws.ec2_instance: 18 | aws_access_key: "{{ aws_access_key }}" 19 | aws_secret_key: "{{ aws_secret_key }}" 20 | region: "{{ aws_ec2_region }}" 21 | instance_ids: >- 22 | {{ duffy_in.nodes | json_query(nodes_id_query) }} 23 | state: absent 24 | wait: False 25 | register: ec2_delete_result 26 | vars: 27 | nodes_id_query: "[*].data.provision.ec2_instance_id" 28 | 29 | - name: Deleting route53 record[s] 30 | community.aws.route53: 31 | aws_access_key: "{{ aws_access_key }}" 32 | aws_secret_key: "{{ aws_secret_key }}" 33 | hosted_zone_id: "{{ aws_route53_zoneid }}" 34 | overwrite: True 35 | record: "{{ item.hostname }}" 36 | state: absent 37 | type: A 38 | wait: no 39 | with_items: "{{ duffy_in.nodes | json_query(nodes_name_query ) }}" 40 | vars: 41 | nodes_name_query: "[*].data.provision" 42 | loop_control: 43 | label: "{{ item.hostname }}" 44 | 45 | - name: Sending data back to Duffy 46 | set_fact: 47 | duffy_out: 48 | nodes: "{{ duffy_in.nodes | json_query(ec2_delete_result_query) }}" 49 | vars: 50 | ec2_delete_result_query: "[*].data.provision" 51 | -------------------------------------------------------------------------------- /adhoc-deprovision-onevm-ci.yml: -------------------------------------------------------------------------------- 1 | - hosts: localhost 2 | become: no 3 | gather_facts: no 4 | vars: 5 | - one_api_url: http://localhost:2633 6 | 7 | tasks: 8 | # The playbook consumes structures of this form passed in by the mechanism 9 | # in the Ansible variable `duffy_in`: 10 | # 11 | # duffy_in -> 12 | # { 13 | # "nodes": [ 14 | # { 15 | # "id": 1, 16 | # "hostname": "...", 17 | # "ipaddr": "...", 18 | # "data": {"provision": {"id": 1, ...}} 19 | # }, 20 | # { 21 | # "id": 2, 22 | # "hostname": "...", 23 | # "ipaddr": "...", 24 | # "data": {"provision": {"id": 2, ...}} 25 | # }, 26 | # ... 27 | # ] 28 | # } 29 | # 30 | # The data.provision field of a node contains the result returned for that 31 | # node from the provisioning playbook and should contain all necessary 32 | # information to perform the deprovisioning (e.g. a machine id specific to 33 | # the used cloud management software). 34 | 35 | # This task emulates deprovisioning the nodes, its result can be arbitrary 36 | # in principle, but ... 37 | - name: "Deprovision the things!" 38 | delegate_to: "{{ one_controller_host }}" 39 | one_vm: 40 | api_username: "{{ one_api_username }}" 41 | api_password: "{{ one_api_password }}" 42 | api_url: "{{ one_api_url }}" 43 | hard: yes 44 | state: absent 45 | instance_ids: >- 46 | {{ duffy_in.nodes | json_query(nodes_id_query) }} 47 | register: one_vm_result 48 | vars: 49 | nodes_id_query: "[*].data.provision.opennebula.id" 50 | 51 | # ... this (mandatory) task has to be able to transform it into the 52 | # expected output format, i.e. set a fact `duffy_out` which repeats enough 53 | # of data.provision passed into the playbook for each successfully 54 | # deprovisioned node to clearly correlate results with node objects so 55 | # Duffy can chalk up nodes for reuse, or mark them as retired or failed 56 | # appropriately. 57 | # 58 | # duffy_out -> 59 | # { 60 | # "nodes": [{"id": 1, ...}, {"id": 2, ...}] 61 | # } 62 | - name: "Summarize the things!" 63 | set_fact: 64 | duffy_out: 65 | nodes: "{{ duffy_in.nodes | json_query(one_vm_result_query) }}" 66 | vars: 67 | one_vm_result_query: "[*].data.provision" 68 | -------------------------------------------------------------------------------- /adhoc-grant-access.yml: -------------------------------------------------------------------------------- 1 | # This playbook can be used just to grant access to the node for ssh 2 | # Useful for newly provisioned node/donated node on which we just have root with pass auth (before reinstall) 3 | # So it can be used before init-node and so with also -u $user -k (if needed) 4 | 5 | - hosts: all 6 | become: True 7 | gather_facts: False 8 | tasks: 9 | 10 | - name: Adding admin group 11 | group: 12 | name: admins 13 | state: present 14 | 15 | - name: Adding admin user[s] 16 | user: 17 | name: "{{ item.login_name }}" 18 | state: present 19 | comment: "{{ item.full_name }}" 20 | shell: /bin/bash 21 | append: yes 22 | groups: admins 23 | with_items: "{{ admins_list }}" 24 | when: admins_list is defined 25 | loop_control: 26 | label: "{{ item.login_name }}" 27 | - name: Importing admin user[s] ssh pub keys 28 | authorized_key: 29 | user: "{{ item.0.login_name }}" 30 | key: "{{ item.1 }}" 31 | with_subelements: 32 | - "{{ admins_list }}" 33 | - ssh_pub_key 34 | when: admins_list is defined 35 | loop_control: 36 | label: "{{ item.0.login_name }}" 37 | tags: 38 | - users 39 | 40 | - name: Giving sudo access for admin users when needed 41 | template: 42 | src: templates/sudofile.j2 43 | dest: /etc/sudoers.d/{{ item.login_name }} 44 | mode: 0440 45 | owner: root 46 | group: root 47 | with_items: "{{ admins_list }}" 48 | loop_control: 49 | label: "{{ item.login_name }}" 50 | when: admins_list is defined and item.sudo_right 51 | tags: 52 | - users 53 | 54 | -------------------------------------------------------------------------------- /adhoc-host-getvars.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: True 4 | 5 | tasks: 6 | - name: Find our public ip 7 | uri: 8 | url: http://ipv4.icanhazip.com 9 | return_content: yes 10 | register: public_ip 11 | when: 12 | - dnf_use_proxy is defined and not dnf_use_proxy 13 | - ansible_bios_vendor == 'Amazon EC2' 14 | - set_fact: 15 | pub_ip: "{{ public_ip.content | replace('\n', '') }}" 16 | when: 17 | - ansible_bios_vendor == 'Amazon EC2' 18 | - dnf_use_proxy is defined and not dnf_use_proxy 19 | 20 | - name: Generate initial host_vars/{{ inventory_hostname }} in ../out 21 | template: 22 | src: ansible-hostvars.j2 23 | dest: "{{ out_dir }}/{{ inventory_hostname }}" 24 | delegate_to: localhost 25 | become: False 26 | -------------------------------------------------------------------------------- /adhoc-init-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | vars_prompt: 4 | - name: "warning_message" 5 | prompt: "[WARNING] : have you called this script with --limit to apply only to some nodes/groups ? CTRL+C if not the case" 6 | private: no 7 | become: True 8 | gather_facts: True 9 | 10 | tasks: 11 | - block: 12 | - name: Find our public ip 13 | uri: 14 | url: http://ipv4.icanhazip.com 15 | return_content: yes 16 | register: public_ip 17 | - set_fact: 18 | pub_ip: "{{ public_ip.content | replace('\n', '') }}" 19 | 20 | - name: Granting IP access in firewall for the new nodes to infra nodes 21 | shell: "/usr/local/bin/authorize_ip_host {{ pub_ip }}" 22 | delegate_to: "{{ item }}" 23 | with_items: "{{ infra_admin_hosts }}" 24 | when: 25 | - infra_admin_hosts is defined 26 | - dnf_use_proxy is defined and not dnf_use_proxy 27 | 28 | - import_playbook: adhoc-sshd_sign_host_key.yml 29 | when: ssh_ca_sign is defined and ssh_ca_sign 30 | - import_playbook: adhoc-host-getvars.yml 31 | 32 | # Just the new baseline everywhere 33 | - import_playbook: role-baseline.yml 34 | # And now the roles based on inventory match / group membership 35 | - import_playbook: role-all.yml 36 | 37 | -------------------------------------------------------------------------------- /adhoc-ipmi-poweroff.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ ipmi_target }}" 3 | vars_prompt: 4 | - name: "ipmi_target" 5 | prompt: "Host[s] to power off through ipmi ? " 6 | private: no 7 | gather_facts: False 8 | 9 | tasks: 10 | - name: Powering off node[s] through ipmi 11 | shell: ipmitool -H "{{ ipmi_ip }}" -U "{{ ipmi_user }}" -P "{{ ipmi_pass }}" -I lanplus chassis power off 12 | delegate_to: "{{ ipmitool_host }}" 13 | 14 | -------------------------------------------------------------------------------- /adhoc-ipmi-poweron.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ ipmi_target }}" 3 | vars_prompt: 4 | - name: "ipmi_target" 5 | prompt: "Host[s] to power on through ipmi ? " 6 | private: no 7 | gather_facts: False 8 | 9 | tasks: 10 | - name: Powering on node[s] through ipmi 11 | shell: ipmitool -H "{{ ipmi_ip }}" -U "{{ ipmi_user }}" -P "{{ ipmi_pass }}" -I lanplus chassis power on 12 | delegate_to: "{{ ipmitool_host }}" 13 | 14 | -------------------------------------------------------------------------------- /adhoc-ipmi-reset-pxe.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ ipmi_target }}" 3 | vars_prompt: 4 | - name: "ipmi_target" 5 | prompt: "Host to reset and boot over pxe ? " 6 | private: no 7 | gather_facts: False 8 | 9 | tasks: 10 | - name: == Hardware provisioning == configuring pxe boot 11 | command: ipmitool -I lanplus -U "{{ ipmi_user }}" -P "{{ ipmi_pass }}" -H "{{ ipmi_ip }}" chassis bootdev pxe 12 | delegate_to: "{{ ipmitool_host }}" 13 | 14 | - name: Resetting node through ipmi (in case it's already powered on) 15 | shell: ipmitool -H "{{ ipmi_ip }}" -U "{{ ipmi_user }}" -P "{{ ipmi_pass }}" -I lanplus chassis power reset 16 | delegate_to: "{{ ipmitool_host }}" 17 | 18 | - name: Powering on node[s] through ipmi (in case it's powered off) 19 | shell: ipmitool -H "{{ ipmi_ip }}" -U "{{ ipmi_user }}" -P "{{ ipmi_pass }}" -I lanplus chassis power on 20 | delegate_to: "{{ ipmitool_host }}" 21 | 22 | -------------------------------------------------------------------------------- /adhoc-ipmi-reset.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ ipmi_target }}" 3 | vars_prompt: 4 | - name: "ipmi_target" 5 | prompt: "Host to reset through ipmi ? " 6 | private: no 7 | gather_facts: False 8 | 9 | tasks: 10 | - name: Resetting node through ipmi 11 | shell: ipmitool -H "{{ ipmi_ip }}" -U "{{ ipmi_user }}" -P "{{ ipmi_pass }}" -I lanplus chassis power reset 12 | delegate_to: "{{ ipmitool_host }}" 13 | 14 | -------------------------------------------------------------------------------- /adhoc-kojid-upgrade.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: koji-builders 3 | become: True 4 | gather_facts: False 5 | 6 | tasks: 7 | - name: kojid is running 8 | service: 9 | name: kojid 10 | state: restarted 11 | 12 | 13 | -------------------------------------------------------------------------------- /adhoc-node-pdns-modify.yml: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env ansible-playbook 2 | # This playbook can be used to disable/enable a msync/vault/other role that is delegated to Pdns(PowerDNS) through our GeoIP backend 3 | --- 4 | - hosts: "{{ target }}" 5 | vars_prompt: 6 | - name: "target" 7 | prompt: "Host to modify in PowerDNS ? => " 8 | private: no 9 | - name: "pdns_action" 10 | prompt: "Action (enable|disable) ? => " 11 | private: no 12 | become: True 13 | strategy: linear 14 | gather_facts: False 15 | 16 | tasks: 17 | - name: Enable/Disable msync node in PowerDNS geoip backend 18 | shell: 19 | cmd: /var/lib/centos-infra/host-modify -a "{{ pdns_action }}" -n "{{ target }}" 20 | chdir: /var/lib/centos-infra 21 | delegate_to: "{{ pdns_db_host }}" 22 | 23 | - name: Reloading directly powerdns zone on pdns nodes 24 | command: 25 | cmd: /var/lib/centos-pdns/backend_download 26 | delegate_to: "{{ item }}" 27 | with_items: "{{ groups['pdns-nodes'] }}" 28 | tags: 29 | - pdns 30 | 31 | -------------------------------------------------------------------------------- /adhoc-ocp-create-pv.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is to create a persistent volume for openshift CI namespace 3 | # The node on which we'll run this should have the exported NFS volume mounted locally 4 | # using variables from inventory: 5 | # ocp_nfs_server: (like node.domain) 6 | # ocp_nfs_export: ocp-staging 7 | 8 | - hosts: localhost 9 | vars_prompt: 10 | - name: "ocp_env" 11 | prompt: | 12 | Select OCP env: 13 | - stream-rdu2 14 | - ci-aws 15 | - ci-aws-stg 16 | 17 | private: False 18 | vars: 19 | ocp_groups: 20 | stream-rdu2: ocp-admin-nodes 21 | ci-aws-stg: ocp-stg-ci-management 22 | ci-aws: aws-ocp-ci-management 23 | 24 | tasks: 25 | - set_fact: 26 | mgmt_hosts: "{{ ocp_groups[ocp_env] }}" 27 | - add_host: 28 | name: "{{ item }}" 29 | groups: ocp_target_host 30 | with_items: "{{ groups[mgmt_hosts] }}" 31 | 32 | - hosts: ocp_target_host 33 | become: true 34 | become_user: "{{ ocp_service_account }}" 35 | vars_prompt: 36 | - name: "ocp_project" 37 | prompt: "Existing project for which we'll just create a PV " 38 | private: no 39 | - name: "pv_name" 40 | prompt: "Reason for PV (computed into uuid) " 41 | private: no 42 | - name: "pv_claimref" 43 | prompt: "PV Reference Name for the Claim " 44 | private: no 45 | - name: "pv_size" 46 | prompt: "Persistent Volume size (example 10Gi) " 47 | private: no 48 | 49 | 50 | tasks: 51 | - name: Generate a UUID 52 | set_fact: 53 | pv_uuid: "{{ (ocp_project + '-' + pv_claimref|default('noclaimref', true)) + '-' + pv_name | to_uuid }}" 54 | tags: 55 | - pv 56 | - nfs 57 | 58 | - name: UUID Generated 59 | debug: 60 | var: pv_uuid 61 | tags: 62 | - pv 63 | - nfs 64 | 65 | - name: Make a pv name 66 | set_fact: 67 | pv_name: "pv-{{ pv_size | lower }}-{{ pv_uuid }}" 68 | tags: 69 | - pv 70 | - nfs 71 | 72 | - name: UUID Generated 73 | debug: 74 | var: pv_name 75 | tags: 76 | - pv 77 | - nfs 78 | 79 | - name: See if the PV already exists 80 | command: 81 | cmd: "bin/oc get pv/{{ pv_name }}" 82 | chdir: "/home/{{ ocp_service_account }}" 83 | register: results 84 | changed_when: false 85 | failed_when: 86 | - results.rc == 0 87 | tags: 88 | - pv 89 | 90 | - block: 91 | - name: Ensuring we have local mount point 92 | file: 93 | path: /mnt/ocp_store 94 | state: directory 95 | 96 | - name: Ensuring nfs export is mounted on mgmt station 97 | mount: 98 | fstype: nfs 99 | src: "{{ ocp_nfs_server }}:{{ ocp_nfs_export }}" 100 | path: /mnt/ocp_store 101 | state: mounted 102 | 103 | - name: make directories for each PV 104 | file: 105 | path: "/mnt/ocp_store/{{ pv_name }}" 106 | owner: nobody 107 | group: nobody 108 | mode: 0777 109 | state: directory 110 | become_user: root 111 | tags: 112 | - pv 113 | - nfs 114 | 115 | - name: Ensuring we can store pv config files to apply 116 | file: 117 | path: "/home/{{ ocp_service_account }}/pv_configs/" 118 | state: directory 119 | 120 | - name: create json files for PV 121 | template: 122 | src: "templates/openshift-pv-storage/pv.yml.j2" 123 | dest: "/home/{{ ocp_service_account }}/pv_configs/{{ pv_name }}.yml" 124 | register: pv_init 125 | tags: 126 | - pv 127 | 128 | - name: apply the transformation 129 | command: 130 | cmd: "bin/oc create -f pv_configs/{{ pv_name }}.yml" 131 | chdir: "/home/{{ ocp_service_account }}" 132 | when: pv_init is changed 133 | tags: 134 | - pv 135 | 136 | 137 | 138 | -------------------------------------------------------------------------------- /adhoc-ocp-deploy-jenkins.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is to create a persistent volume for openshift CI namespace 3 | # The node on which we'll run this should have the exported NFS volume mounted locally 4 | # using variables from inventory: 5 | # ocp_nfs_server: (like node.domain) 6 | # ocp_nfs_export: ocp-staging 7 | 8 | 9 | - hosts: ocp-admin-nodes 10 | become: true 11 | become_user: "{{ ocp_service_account }}" 12 | vars_prompt: 13 | - name: "ocp_project" 14 | prompt: "Existing project/namespace in ocp we'll deploy jenkins to/for (has to exist before !) " 15 | private: no 16 | - name: "pv_size" 17 | prompt: "Persistent Volume size (example 10Gi) " 18 | private: no 19 | vars: 20 | pv_claimref: jenkins 21 | pv_name: jenkins 22 | 23 | tasks: 24 | - name: Generate a UUID 25 | set_fact: 26 | pv_uuid: "{{ (ocp_project + '-' + pv_claimref|default('noclaimref', true)) + '-' + pv_name | to_uuid }}" 27 | tags: 28 | - pv 29 | - nfs 30 | 31 | - name: UUID Generated 32 | debug: 33 | var: pv_uuid 34 | tags: 35 | - pv 36 | - nfs 37 | 38 | - name: Make a pv name 39 | set_fact: 40 | pv_name: "pv-{{ pv_size | lower }}-{{ pv_uuid }}" 41 | tags: 42 | - pv 43 | - nfs 44 | 45 | - name: UUID Generated 46 | debug: 47 | var: pv_name 48 | tags: 49 | - pv 50 | - nfs 51 | 52 | - block: 53 | - name: Ensuring we have local mount point 54 | file: 55 | path: /mnt/ocp_store 56 | state: directory 57 | 58 | - name: Ensuring nfs export is mounted on mgmt station 59 | mount: 60 | fstype: nfs 61 | src: "{{ ocp_nfs_server }}:{{ ocp_nfs_export }}" 62 | path: /mnt/ocp_store 63 | state: mounted 64 | 65 | - name: make directories for each PV 66 | file: 67 | path: "/mnt/ocp_store/{{ pv_name }}" 68 | owner: nobody 69 | group: nobody 70 | mode: 0777 71 | state: directory 72 | become_user: root 73 | tags: 74 | - pv 75 | - nfs 76 | 77 | - name: create json files for PV 78 | template: 79 | src: "templates/openshift-pv-storage/pv.yml.j2" 80 | dest: "/home/{{ ocp_service_account }}/pv_configs/{{ pv_name }}.yml" 81 | register: pv_init 82 | tags: 83 | - pv 84 | 85 | - name: apply the transformation 86 | command: 87 | cmd: "bin/oc create -f pv_configs/{{ pv_name }}.yml" 88 | chdir: "/home/{{ ocp_service_account }}" 89 | when: pv_init is changed 90 | tags: 91 | - pv 92 | 93 | - name: Importing basic jenkins-ci-workspace template 94 | template: 95 | src: openshift/jenkins-workspace.yml 96 | dest: "/home/{{ ocp_service_account }}/ocp_configs/{{ ocp_project }}-jenkins.yml" 97 | tags: 98 | - template 99 | 100 | - name: "Deploy jenkins under namespace {{ ocp_project }}" 101 | shell: 102 | cmd: "bin/oc process -f ocp_configs/{{ ocp_project }}-jenkins.yml | bin/oc create -n {{ ocp_project }} -f -" 103 | chdir: "/home/{{ ocp_service_account }}" 104 | tags: 105 | - deploy 106 | 107 | 108 | -------------------------------------------------------------------------------- /adhoc-openshift-auth.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ################################################# 3 | # 4 | # This creates applies config to allow oauth through an openid provider 5 | # A vars file needs to be passed at run time with the correct config defined 6 | # This playbook assumes a logged in admin oc user locally 7 | # 8 | ################################################# 9 | 10 | - hosts: localhost 11 | name: Idp config setup 12 | 13 | 14 | tasks: 15 | 16 | - name: Create secret for openid 17 | command: /usr/bin/oc create secret generic {{idp_secret_name}} --from-literal=clientSecret={{idp_secret}} -n openshift-config 18 | 19 | - name: Try toGenerate and apply config 20 | block: 21 | - name: generate config 22 | template: 23 | src: templates/openshift-idp.yml 24 | dest: templates/openshift-idp-complete.yml 25 | 26 | - name: Apply oauth config 27 | command: /usr/bin/oc apply -f templates/openshift-idp-complete.yml 28 | 29 | always: 30 | - name: remove temp config file 31 | file: 32 | path: templates/openshift-idp-complete.yml 33 | state: absent 34 | 35 | -------------------------------------------------------------------------------- /adhoc-openshift-pv.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is to create a persistent volume for openshift CI namespace 3 | # Needed variables as extra-vars: 4 | # - pv_size (like 100Gi) 5 | # - cico_project_name (registry) 6 | # - host (where to execute command, like ocp-admin.ci.centos.org) 7 | # Example to run the playbook 8 | # ansible-playbook-ci playbooks/adhoc-openshift-pv.yml -e "host=n4-136.cloud.ci.centos.org" -e "pv_size=10Gi" -e "cico_project_name=test" 9 | # The node on which we'll run this should have the exported NFS volume mounted locally 10 | # using variables from inventory: 11 | # ocp_nfs_server: (like node.domain) 12 | # ocp_nfs_export: ocp-staging 13 | 14 | 15 | - hosts: "{{ host }}" 16 | become: true 17 | become_user: "{{ ocp_service_account }}" 18 | 19 | tasks: 20 | - name: Generate a UUID 21 | set_fact: 22 | pv_uuid: "{{ (cico_project_name + pv_claimref|default('noclaimref', true)) | to_uuid }}" 23 | tags: 24 | - pv 25 | - nfs 26 | 27 | - name: UUID Generated 28 | debug: 29 | var: pv_uuid 30 | tags: 31 | - pv 32 | - nfs 33 | 34 | - name: Make a pv name 35 | set_fact: 36 | pv_name: "pv-{{ pv_size | lower }}-{{ pv_uuid }}" 37 | tags: 38 | - pv 39 | - nfs 40 | 41 | - name: UUID Generated 42 | debug: 43 | var: pv_name 44 | tags: 45 | - pv 46 | - nfs 47 | 48 | - name: See if the PV already exists 49 | command: "/home/{{ ocp_service_account }}/bin/oc get pv/{{ pv_name }}" 50 | register: results 51 | changed_when: false 52 | failed_when: 53 | - results.rc == 0 54 | tags: 55 | - pv 56 | 57 | - block: 58 | - name: Ensuring we have local mount point 59 | file: 60 | path: /mnt/ocp_store 61 | state: directory 62 | 63 | - name: Ensuring nfs export is mounted on mgmt station 64 | mount: 65 | fstype: nfs 66 | src: "{{ ocp_nfs_server }}:{{ ocp_nfs_export }}" 67 | path: /mnt/ocp_store 68 | state: mounted 69 | 70 | - name: make directories for each PV 71 | file: 72 | path: "/mnt/ocp_store/{{ pv_name }}" 73 | owner: nobody 74 | group: nobody 75 | mode: 0777 76 | state: directory 77 | become_user: root 78 | tags: 79 | - pv 80 | - nfs 81 | 82 | - name: create json files for PV 83 | template: 84 | src: "templates/openshift-pv-storage/persistent-volume.json.j2" 85 | dest: "/home/{{ ocp_service_account }}/{{ pv_name }}.json" 86 | tags: 87 | - pv 88 | 89 | - name: apply the transformation 90 | command: "/home/{{ ocp_service_account }}/bin/oc create -f /home/{{ ocp_service_account }}/{{ pv_name }}.json" 91 | tags: 92 | - pv 93 | 94 | -------------------------------------------------------------------------------- /adhoc-openshift-resources.yaml: -------------------------------------------------------------------------------- 1 | # This playbook is to use templates/openshift-resources.j2 and extra vars 2 | # (project details) and apply the resources created from the template. 3 | # ansible-playbook playbooks/adhoc-openshift-resources.yaml -e "host=ocp-ci-management" -e "@projects_vars.yaml" 4 | # 5 | # Example of project_vars.yaml 6 | # ``` 7 | # project_name: billionDollarProject 8 | # project_admins: 9 | # - admin1 10 | # - admin2 11 | # 12 | # bug_id: '000000' #bug id is the place/id where the project is requested 13 | # ``` 14 | 15 | 16 | - hosts: "{{ host }}" 17 | become: true 18 | become_user: "{{ ocp_service_account }}" 19 | 20 | tasks: 21 | # generate the template for project to be created 22 | - name: apply the template 23 | template: 24 | src: openshift-resources.j2 25 | dest: "/home/{{ ocp_service_account }}/ocp_configs/{{ project_name }}.yaml" 26 | 27 | # apply created openshift resources 28 | - name: oc apply resources 29 | command: "/home/{{ ocp_service_account }}/bin/oc apply -f /home/{{ ocp_service_account }}/ocp_configs/{{ project_name }}.yaml" 30 | -------------------------------------------------------------------------------- /adhoc-provision-ocp4-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This playbook is an ad-hoc task used to deploy bare-metal nodes for OCP4 3 | # It used the following variables, declared through inventory (group/host vars): 4 | # rhcos_version: 4.4.3 # default rhcos version deployed 5 | # arch: x86_64 # default architecture 6 | # md-raid-dev: # mdadm device eg: /dev/md126 7 | # deploy_host: # node hosting pxelinux.cfg configurations 8 | # ipmitool_host: # node with ipmitool and mgmt vlan access 9 | # pxe_bootdev: eth0 # device used as bootdev and first interface configured 10 | # rhcos_install_img_url: # Where to grab rhcos install http:////rhcos/rhcos-4.3.8-x86_64-metal.x86_64.raw.gz 11 | # rhcos_ignition_file_url: # where to find ignition file http:///config/rhcos/bootstrap.ign #to be modified based on group or host level, for bootstrap, master, etc 12 | # 13 | # 14 | - hosts: "{{ ocp_init_hosts }}" 15 | become: true 16 | gather_facts: false 17 | vars_prompt: 18 | - name: "ocp_init_hosts" 19 | prompt: "[WARNING] Nodes to be fully wiped/reinstalled with OCP => " 20 | private: no 21 | 22 | tasks: 23 | 24 | - name: == Hardware provisioning == Generating the tftp configuration boot file 25 | template: 26 | src: ../templates/ocp_pxeboot.j2 27 | dest: /var/lib/tftpboot/pxelinux.cfg/01-{{ macaddress | lower | replace(":","-") }} 28 | mode: 0755 29 | delegate_to: "{{ deploy_host }}" 30 | tags: 31 | - pxe 32 | 33 | - name: == Hardware provisioning == Importing generated Ignition files 34 | template: 35 | src: "{{ filestore }}/rhcos/{{ item }}" 36 | dest: "/var/www/html/repo/rhcos/{{ item }}" 37 | mode: 0755 38 | with_items: 39 | - bootstrap.ign 40 | - master.ign 41 | - compute.ign 42 | delegate_to: "{{ deploy_host }}" 43 | tags: 44 | - ignition 45 | 46 | - name: == Hardware provisioning == Ensuring node is powered off 47 | command: ipmitool -I lanplus -U {{ ipmi_user }} -P {{ ipmi_pass }} -H {{ ipmi_ip }} chassis power off 48 | delegate_to: "{{ ipmitool_host }}" 49 | tags: 50 | - ipmi 51 | 52 | - name: == Hardware provisioning == Pausing ... 53 | pause: 54 | seconds: 8 55 | tags: 56 | - ipmi 57 | 58 | - name: == Hardware provisioning == configuring pxe boot 59 | command: ipmitool -I lanplus -U {{ ipmi_user }} -P {{ ipmi_pass }} -H {{ ipmi_ip }} chassis bootdev pxe 60 | delegate_to: "{{ ipmitool_host }}" 61 | tags: 62 | - ipmi 63 | 64 | - name: == Hardware provisioning == Pausing ... 65 | pause: 66 | seconds: 2 67 | 68 | - name: == Hardware provisioning == Ensuring node is powered on 69 | command: ipmitool -I lanplus -U {{ ipmi_user }} -P {{ ipmi_pass }} -H {{ ipmi_ip }} chassis power on 70 | delegate_to: "{{ ipmitool_host }}" 71 | tags: 72 | - ipmi 73 | 74 | - name: == Hardware provisioning == Waiting for host to be reachable 75 | wait_for: 76 | host: "{{ ip }}" 77 | port: 22 78 | timeout: 1200 79 | delegate_to: "{{ deploy_host }}" 80 | 81 | -------------------------------------------------------------------------------- /adhoc-provision-onevm-ci.yml: -------------------------------------------------------------------------------- 1 | # This playbook will be used by Duffy CI provisioner to deploy Opennebula VMs 2 | # Duffy will call this playbook with quantity and also opennebula template to use (defined in nodepools) 3 | # Needed variables (either through inventory or --extra-vars): 4 | # - one_api_username: opennebula user to interact with one api 5 | # - one_api_password: opennebula password 6 | # - one_api_url: can default to localhost on controller but still a variable in case of 7 | # - one_template_name: existing vm template (in opennebula) to instantiate 8 | # - one_vm_quantity: number of VMs to instantiate with that template 9 | # 10 | 11 | - hosts: localhost 12 | gather_facts: False 13 | become: False 14 | vars: 15 | - one_api_url: http://localhost:2633 16 | tasks: 17 | - name: "Instantiating Opennebula template [{{ one_template_name }}]" 18 | one_vm: 19 | template_name: "{{ one_template_name }}" 20 | api_username: "{{ one_api_username }}" 21 | api_password: "{{ one_api_password }}" 22 | api_url: "{{ one_api_url }}" 23 | count: "{{ one_vm_quantity | int }}" 24 | attributes: 25 | name: 'duffy-###' 26 | register: opennebula_result 27 | delegate_to: "{{ one_controller_host }}" 28 | 29 | - name: Opennebula instances IP addresses 30 | debug: 31 | msg: "One VM IP address is {{ item }}" 32 | with_items: "{{ opennebula_result | json_query('instances[*].networks[0].ip') }}" 33 | 34 | - name: Waiting for OneVM to be available on network 35 | wait_for: 36 | host: "{{ item }}" 37 | port: 22 38 | delay: 5 39 | with_items: "{{ opennebula_result | json_query('instances[*].networks[0].ip') }}" 40 | 41 | - name: Sending data back to Duffy provisioner 42 | set_fact: 43 | duffy_out: 44 | nodes: "{{ opennebula_result | json_query(nodes_query) }}" 45 | vars: 46 | nodes_query: >- 47 | instances[*].{ 48 | opennebula: {id: vm_id}, ipaddr: networks[0].ip 49 | } 50 | -------------------------------------------------------------------------------- /adhoc-refresh-facts.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: True 4 | tasks: 5 | - name: Forcing a facts refresh to have those locally available in cache 6 | setup: 7 | gather_timeout: 30 8 | 9 | -------------------------------------------------------------------------------- /adhoc-remove-mirror-stream-role.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This ad-hoc playbook can be used to "revert" the ansible-mirror role that would fetch by default stream9 content 3 | # Some sponsored mirrors don't have enough space for both, so we'll just remove the Stream 9 content (that is huge as it contains also src and debuginfo packages 4 | # Before launching this playbook you'd need to ensure that mirror_stream_content is set to `False` in ansible inventory (and pushed) : we don't want to have automated ansible replay the settings the other way around 5 | # 6 | 7 | - hosts: "{{ mirror_host }}" 8 | become: true 9 | strategy: linear 10 | gather_facts: false 11 | vars_prompt: 12 | - name: "mirror_host" 13 | prompt: "[WARNING] Nodes to remove from mirror.stream.centos.org pool => " 14 | private: no 15 | handlers: 16 | - import_tasks: handlers/main.yml 17 | 18 | 19 | tasks: 20 | - assert: 21 | that: 22 | - not mirror_stream_content 23 | fail_msg: "{{ mirror_host }} seems to inherit mirror_stream_content boolean to True !" 24 | 25 | - block: 26 | - name: Updating PDNS GeoIP backend to remove stream roles 27 | shell: 28 | cmd: echo "update nodes set \"{{ item }}\"='false' where fqdn='{{ mirror_host }}';" |sqlite3 nodes.db 29 | chdir: /var/lib/centos-infra/ 30 | with_items: 31 | - mirror.stream 32 | - rsync.stream 33 | delegate_to: "{{ pdns_db_host }}" 34 | 35 | - name: Regenerating .json backend for powerdns infra 36 | shell: 37 | cmd: ./gen_backend 38 | chdir: /var/lib/centos-infra 39 | delegate_to: "{{ pdns_db_host }}" 40 | 41 | - name: Reloading directly powerdns zone on pdns nodes 42 | command: 43 | cmd: /var/lib/centos-pdns/backend_download 44 | delegate_to: "{{ item }}" 45 | with_items: "{{ groups['pdns-nodes'] }}" 46 | tags: 47 | - pdns 48 | 49 | - name: Ensuring we don't actually try to fetch Stream content 50 | become_user: centos 51 | shell: 52 | cmd: pkill -9 rsync ; /bin/true 53 | tags: 54 | - centos 55 | 56 | - name: Cron job to rsync Stream 9 content from upstream node 57 | cron: 58 | name: "CentOS mirror.stream rsync job" 59 | job: "/home/{{ mirror_local_user }}-scripts/rsync-stream-full {{ mirror_rsync_from }} > /dev/null 2>&1" 60 | minute: "*/7" 61 | user: "{{ mirror_local_user }}" 62 | state: absent 63 | tags: 64 | - cron 65 | - name: Cleaning up some files coming from role (not needed anymore) 66 | file: 67 | path: "{{ item }}" 68 | state: absent 69 | with_items: 70 | - /etc/httpd/conf.d/11_mirror_stream.conf 71 | - /etc/rsyncd.d/02_CentOS-Stream.conf 72 | - /etc/rsyncd.d/03_centosstream.conf 73 | notify: 74 | - reload_httpd 75 | - rebuild_rsyncd 76 | 77 | - name: Reloading rsync/httpd if needed 78 | meta: flush_handlers 79 | 80 | - name: Removing stream content 81 | file: 82 | path: /home/centos-stream 83 | state: absent 84 | register: stream_content 85 | tags: 86 | - cleanup 87 | 88 | - name: Reporting zabbix value 89 | shell: zabbix_sender -c /etc/zabbix/zabbix_agentd.conf -k msync.stream.diff -o 5 90 | when: stream_content is changed 91 | tags: 92 | - cleanup 93 | -------------------------------------------------------------------------------- /adhoc-remove-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: True 4 | gather_facts: False 5 | 6 | tasks: 7 | - name: Removing IP from zabbix firewall 8 | shell: "/usr/local/bin/remove_ip_host {{ ip }}" 9 | delegate_to: "{{ zabbix_api_srv }}" 10 | when: zabbix_api_srv is defined 11 | 12 | - name: Removing node from zabbix 13 | shell: "/bin/zabbix-cli -C \"remove_host {{ inventory_hostname }}\" ; /bin/true " 14 | delegate_to: "{{ zabbix_api_srv }}" 15 | when: zabbix_api_srv is defined 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /adhoc-reset-vm.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ vm_to_reset }}" 3 | become: True 4 | vars_prompt: 5 | - name: "vm_to_reset" 6 | prompt: "[WARNING] Virtual Machine to hard-reset ? " 7 | private: no 8 | gather_facts: False 9 | 10 | tasks: 11 | - name: Resetting VM {{ vm_to_reset }} through virsh 12 | shell: virsh destroy {{ vm_to_reset }} ; virsh start {{ vm_to_reset }} 13 | delegate_to: "{{ kvm_host }}" 14 | 15 | -------------------------------------------------------------------------------- /adhoc-seamicro-poweron.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This ad-hoc playbook is written only to support Seamicro Chassis (now dead product) 3 | # It uses some variables like user/pass to access Chassis rest API and configure/reset compute nodes 4 | # By default, for CI purposes we are ensuring that we add a second path to chassis/fabric to present multipath and so increase available bandwdith during tests 5 | 6 | - hosts: seamicro-nodes 7 | gather_facts: False 8 | become: True 9 | 10 | # Some pre-defined list that will be added for 'uri' module and converted to json for rest api 11 | vars: 12 | seamicro_add_disk_body: 13 | value: "{{ seamicro_diskid }}" 14 | readonly: false 15 | username: "{{ seamicro_chassis_user }}" 16 | password: "{{ seamicro_chassis_pass }}" 17 | seamicro_reset_body: 18 | action: reset 19 | using-pxe: "false" 20 | username: "{{ seamicro_chassis_user }}" 21 | password: "{{ seamicro_chassis_pass }}" 22 | seamicro_poweroff_body: 23 | action: power-off 24 | force: "true" 25 | username: "{{ seamicro_chassis_user }}" 26 | password: "{{ seamicro_chassis_pass }}" 27 | seamicro_poweron_body: 28 | action: power-on 29 | force: "true" 30 | username: "{{ seamicro_chassis_user }}" 31 | password: "{{ seamicro_chassis_pass }}" 32 | 33 | 34 | tasks: 35 | 36 | - block: 37 | 38 | - name: Powering on the Seamicro node[s] 39 | uri: 40 | url: "https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}" 41 | validate_certs: no 42 | method: POST 43 | body_format: json 44 | body: "{{ seamicro_reset_body | to_json }}" 45 | timeout: 180 46 | tags: 47 | - reset 48 | 49 | - name: Waiting for Seamicro node[s] to be available through ssh 50 | wait_for: 51 | port: 22 52 | host: "{{ inventory_hostname }}" 53 | timeout: 1200 54 | 55 | delegate_to: "{{ deploy_host }}" 56 | 57 | 58 | -------------------------------------------------------------------------------- /adhoc-sshd_sign_host_key.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # This adhoc playbook can be used to boostrap a newly deployed node (included in another one) 3 | # It will retrieve the sshd host pub keys (so first user manually connecting will have to validate it 4 | # and then it will submit those .pub files for signing by the central CA 5 | # Once done, it will put back in place the signed files 6 | # Standard baseline role can then really configure sshd_config, using the /etc/ssh/ssh_host_rsa_key-cert.pub 7 | # From that point, any host using the deployed known_hosts file will be able to connect without any warning 8 | # Purposes: being able to use ssh certificats (see https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/deployment_guide/sec-signing_ssh_certificates) 9 | 10 | # Needed variables (in your inventory) 11 | # ssh_ca_host: which host (from your inventory) is the signing host 12 | # ssh_ca_keystore: file on disk on the CA host that will contain the nodes files 13 | # ssh_ca_host_key: the ssh private key that will be used to sign all the .pub files 14 | 15 | - hosts: all 16 | become: True 17 | tasks: 18 | - meta: clear_facts 19 | - name: Ad-hoc task, ensuring we delete first 20 | file: 21 | path: "{{ ssh_ca_keystore }}/{{ inventory_hostname }}" 22 | state: absent 23 | delegate_to: "{{ ssh_ca_host }}" 24 | 25 | - name: Ensuring we have directory to store nodes .pub files on CA host 26 | file: 27 | path: "{{ ssh_ca_keystore }}/{{ inventory_hostname }}" 28 | state: directory 29 | owner: root 30 | group: root 31 | mode: 0700 32 | delegate_to: "{{ ssh_ca_host }}" 33 | 34 | - name: Finding generated host ssh key on node 35 | find: 36 | paths: /etc/ssh 37 | file_type: file 38 | patterns: "ssh_host_*_key.pub" 39 | register: ssh_host_pubkeys 40 | 41 | - name: Copy ssh pub keys from node locally 42 | fetch: 43 | src: "{{ item.path }}" 44 | dest: /var/tmp/ssh-pub-keys-{{ inventory_hostname }}/ 45 | flat: yes 46 | with_items: "{{ ssh_host_pubkeys.files }}" 47 | loop_control: 48 | label: '{{ item.path }}' 49 | 50 | - meta: clear_facts 51 | - name: Ensuring we have needed .pub files on CA host 52 | copy: 53 | src: "/var/tmp/ssh-pub-keys-{{ inventory_hostname }}/" 54 | dest: "{{ ssh_ca_keystore }}/{{ inventory_hostname }}/" 55 | delegate_to: "{{ ssh_ca_host }}" 56 | register: files_to_sign 57 | 58 | - name: Signing host pub keys with CA key 59 | shell: "ssh-keygen -s {{ ssh_ca_host_key }} -I {{ inventory_hostname }} -h ssh_host*_key.pub" 60 | args: 61 | chdir: "{{ ssh_ca_keystore }}/{{ inventory_hostname }}/" 62 | when: files_to_sign is changed 63 | register: ssh_signing 64 | delegate_to: "{{ ssh_ca_host }}" 65 | 66 | - name: Find new -cert.pub files on CA host 67 | find: 68 | paths: "{{ ssh_ca_keystore }}/{{ inventory_hostname }}/" 69 | file_type: file 70 | patterns: "ssh_host*-cert.pub" 71 | register: ssh_signed_certs 72 | delegate_to: "{{ ssh_ca_host }}" 73 | when: ssh_signing is changed 74 | 75 | - name: Copy -cert.pub files back from CA host 76 | fetch: 77 | src: "{{ item.path }}" 78 | dest: "/var/tmp/ssh-pub-keys-{{ inventory_hostname }}/" 79 | flat: yes 80 | with_items: "{{ ssh_signed_certs.files }}" 81 | delegate_to: "{{ ssh_ca_host }}" 82 | loop_control: 83 | label: "{{ item.path }}" 84 | when: ssh_signing is changed 85 | 86 | - name: Find new -cert.pub files locally 87 | find: 88 | paths: "/var/tmp/ssh-pub-keys-{{ inventory_hostname }}/" 89 | file_type: file 90 | patterns: "ssh_host*-cert.pub" 91 | register: ssh_signed_certs 92 | delegate_to: localhost 93 | become: False 94 | when: ssh_signing is changed 95 | 96 | - name: Pushing back signed -cert.pub files back on the node 97 | copy: 98 | src: "{{ item.path }}" 99 | dest: /etc/ssh/ 100 | with_items: "{{ ssh_signed_certs.files }}" 101 | when: ssh_signing is changed 102 | loop_control: 103 | label: "{{ item.path }}" 104 | notify: restart_sshd 105 | 106 | handlers: 107 | - import_tasks: handlers/main.yml 108 | -------------------------------------------------------------------------------- /adhoc-update-phpbb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-phpbb 3 | become: True 4 | vars_prompt: 5 | - name: "phpbb_target_version" 6 | prompt: "[WARNING] phpbb target version (like 3.2.10) that you want to bump to (has to be in same major release) : " 7 | private: no 8 | pre_tasks: 9 | - name: Checking if no-ansible file is there 10 | stat: 11 | path: /etc/no-ansible 12 | register: no_ansible 13 | 14 | - name: Verifying if we can run ansible or not 15 | assert: 16 | that: 17 | - "not no_ansible.stat.exists" 18 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 19 | 20 | tasks: 21 | - import_role: 22 | name: phpbb 23 | tasks_from: update 24 | 25 | post_tasks: 26 | - name: Touching ansible-run (monitored by Zabbix) 27 | file: 28 | path: /var/log/ansible.run 29 | state: touch 30 | 31 | -------------------------------------------------------------------------------- /adhoc-update-reboot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: True 4 | gather_facts: False 5 | 6 | tasks: 7 | - name: Update Operating System pkgs 8 | yum: 9 | name: "*" 10 | state: latest 11 | update_cache: True 12 | 13 | - name: Reboot the machine to ensure latest kernel is booted 14 | reboot: 15 | 16 | -------------------------------------------------------------------------------- /adhoc-update-zabbix.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-zabbix-server:hostgroup-role-zabbix-proxy 3 | become: True 4 | 5 | tasks: 6 | - name: Ensuring metadata cache is updated 7 | yum: 8 | name: glibc 9 | state: present 10 | update_cache: True 11 | 12 | - import_playbook: role-zabbix-server.yml 13 | - import_playbook: role-zabbix-proxy.yml 14 | -------------------------------------------------------------------------------- /adhoc-upgrade-jenkins.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: "{{ target }}" 3 | become: True 4 | vars_prompt: 5 | - name: "target" 6 | prompt: "Host to upgrade to latest Jenkins TLS version => " 7 | private: no 8 | 9 | tasks: 10 | - name: Upgrading Jenkins to latest version on "{{ target }}" 11 | yum: 12 | name: jenkins 13 | state: latest 14 | -------------------------------------------------------------------------------- /adhoc-upgrade-wordpress.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-wp 3 | become: True 4 | tasks: 5 | - name: Upgrading wordpress to version {{ wp_release }} 6 | unarchive: 7 | src: "https://wordpress.org/wordpress-{{ wp_release | default('latest') }}.tar.gz" 8 | remote_src: yes 9 | dest: "{{ wp_local_path | default('/var/www/wordpress') }}" 10 | extra_opts: 11 | - --strip-components=1 12 | tags: 13 | - core 14 | 15 | - name: Upgrading wordpress plugins if needed 16 | unarchive: 17 | src: "https://downloads.wordpress.org/plugin/{{ item.name }}.{{ item.version }}.zip" 18 | remote_src: yes 19 | dest: "{{ wp_local_path | default('/var/www/wordpress') }}/wp-content/plugins/" 20 | with_items: "{{ wp_plugins }}" 21 | tags: 22 | - plugins 23 | -------------------------------------------------------------------------------- /adhoc_install_rhcos_stg.yml: -------------------------------------------------------------------------------- 1 | - hosts: "{{ ocp_init_hosts }}" 2 | become: true 3 | gather_facts: false 4 | vars_prompt: 5 | - name: "ocp_init_hosts" 6 | prompt: "[WARNING] Nodes to be fully wiped/reinstalled with RHCOS=> " 7 | private: no 8 | 9 | 10 | tasks: 11 | - name: Deleting additional path to the Seamicro node[s] 12 | uri: 13 | url="https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}/vdisk/{{ seamicro_vdisk_slot }}?username={{ seamicro_user_login }}&password={{ seamicro_user_pass }}" 14 | validate_certs=no 15 | method=DELETE 16 | status_code=200,400 17 | timeout=120 18 | delegate_to: "{{ deploy_host }}" 19 | tags: 20 | - test 21 | 22 | - name: Wait for the disk to be removed 23 | uri: 24 | url=https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }}/vdisk/{{ seamicro_vdisk_slot }}?username={{ seamicro_user_login }}&password={{ seamicro_user_pass }} 25 | validate_certs=no 26 | method=GET 27 | timeout=120 28 | status_code=200,201,404 29 | register: http_result 30 | until: http_result['status'] == 404 31 | retries: 5 32 | delay: 10 33 | delegate_to: "{{ deploy_host }}" 34 | 35 | 36 | - name: == Hardware provisioning == Importing generated Ignition files 37 | template: 38 | src: "{{ filestore }}/rhcos/staging/{{ item }}" 39 | dest: "/var/www/html/repo/rhcos/staging/{{ item }}" 40 | mode: 0755 41 | with_items: 42 | - bootstrap.ign 43 | - master.ign 44 | - compute.ign 45 | delegate_to: "{{ deploy_host }}" 46 | tags: 47 | - ignition 48 | 49 | - name: Generating the tftp configuration boot file 50 | template: src={{ item }} dest=/var/lib/tftpboot/pxelinux.cfg/01-{{ mac_address | lower | replace(":","-") }} mode=0755 51 | with_first_found: 52 | - "templates/ocp_pxeboot.j2" 53 | delegate_to: "{{ deploy_host }}" 54 | 55 | - name: Resetting the Seamicro node[s] 56 | uri: 57 | url: https://{{ seamicro_chassis }}.ci.centos.org/v2.0/server/{{ seamicro_srvid }} 58 | validate_certs: no 59 | method: POST 60 | headers: 61 | Content-Type: "application/json" 62 | body: '{{ seamicro_reset_body | to_json }}' 63 | timeout: 180 64 | delegate_to: "{{ deploy_host }}" 65 | tags: 66 | - reset 67 | 68 | - name: Waiting for Seamicro node[s] to be available through ssh ... 69 | local_action: wait_for port=22 host={{ inventory_hostname }} timeout=1200 70 | tags: 71 | - wait 72 | -------------------------------------------------------------------------------- /collections-ci.in: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | type: git 4 | source: https://github.com/ansible-collections/ansible.posix.git 5 | version: 1.5.1 6 | - name: ansible.netcommon 7 | type: git 8 | source: https://github.com/ansible-collections/ansible.netcommon.git 9 | version: 2.0.0 10 | - name: community.general 11 | type: git 12 | source: https://github.com/ansible-collections/community.general.git 13 | version: 4.6.1 14 | - name: community.libvirt 15 | type: git 16 | source: https://github.com/ansible-collections/community.libvirt.git 17 | version: 1.0.2 18 | - name: community.mysql 19 | type: git 20 | source: https://github.com/ansible-collections/community.mysql.git 21 | version: 3.5.1 22 | - name: community.postgresql 23 | type: git 24 | source: https://github.com/ansible-collections/community.postgresql.git 25 | version: 2.1.2 26 | - name: containers.podman 27 | type: git 28 | source: https://github.com/containers/ansible-podman-collections.git 29 | version: 1.9.2 30 | - name: amazon.aws 31 | type: git 32 | source: https://github.com/ansible-collections/amazon.aws.git 33 | version: 3.2.0 34 | - name: community.aws 35 | type: git 36 | source: https://github.com/ansible-collections/community.aws.git 37 | version: 3.2.1 38 | - name: community.rabbitmq 39 | type: git 40 | source: https://github.com/ansible-collections/community.rabbitmq.git 41 | version: 1.2.3 42 | -------------------------------------------------------------------------------- /collections-dev.in: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | type: git 4 | source: https://github.com/ansible-collections/ansible.posix.git 5 | version: 1.5.1 6 | - name: ansible.netcommon 7 | type: git 8 | source: https://github.com/ansible-collections/ansible.netcommon.git 9 | version: 2.0.0 10 | - name: community.general 11 | type: git 12 | source: https://github.com/ansible-collections/community.general.git 13 | version: 4.6.1 14 | - name: community.libvirt 15 | type: git 16 | source: https://github.com/ansible-collections/community.libvirt.git 17 | version: 1.0.2 18 | - name: community.mysql 19 | type: git 20 | source: https://github.com/ansible-collections/community.mysql.git 21 | version: 3.5.1 22 | - name: community.postgresql 23 | type: git 24 | source: https://github.com/ansible-collections/community.postgresql.git 25 | version: 2.1.2 26 | - name: containers.podman 27 | type: git 28 | source: https://github.com/containers/ansible-podman-collections.git 29 | version: 1.9.2 30 | - name: amazon.aws 31 | type: git 32 | source: https://github.com/ansible-collections/amazon.aws.git 33 | version: 3.2.0 34 | - name: community.aws 35 | type: git 36 | source: https://github.com/ansible-collections/community.aws.git 37 | version: 3.2.1 38 | - name: community.rabbitmq 39 | type: git 40 | source: https://github.com/ansible-collections/community.rabbitmq.git 41 | version: 1.2.3 42 | -------------------------------------------------------------------------------- /collections-duffy.in: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | type: git 4 | source: https://github.com/ansible-collections/ansible.posix.git 5 | version: 1.3.0 6 | - name: ansible.netcommon 7 | type: git 8 | source: https://github.com/ansible-collections/ansible.netcommon.git 9 | version: 2.0.0 10 | - name: community.general 11 | type: git 12 | source: https://github.com/ansible-collections/community.general.git 13 | version: 4.6.1 14 | - name: community.libvirt 15 | type: git 16 | source: https://github.com/ansible-collections/community.libvirt.git 17 | version: 1.0.2 18 | - name: community.mysql 19 | type: git 20 | source: https://github.com/ansible-collections/community.mysql.git 21 | version: 3.5.1 22 | - name: community.postgresql 23 | type: git 24 | source: https://github.com/ansible-collections/community.postgresql.git 25 | version: 2.1.2 26 | - name: containers.podman 27 | type: git 28 | source: https://github.com/containers/ansible-podman-collections.git 29 | version: 1.9.2 30 | - name: amazon.aws 31 | type: git 32 | source: https://github.com/ansible-collections/amazon.aws.git 33 | version: 3.2.0 34 | - name: community.aws 35 | type: git 36 | source: https://github.com/ansible-collections/community.aws.git 37 | version: 3.2.1 38 | -------------------------------------------------------------------------------- /collections-production.in: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | type: git 4 | source: https://github.com/ansible-collections/ansible.posix.git 5 | version: 1.5.1 6 | - name: ansible.netcommon 7 | type: git 8 | source: https://github.com/ansible-collections/ansible.netcommon.git 9 | version: 2.0.0 10 | - name: community.general 11 | type: git 12 | source: https://github.com/ansible-collections/community.general.git 13 | version: 4.6.1 14 | - name: community.libvirt 15 | type: git 16 | source: https://github.com/ansible-collections/community.libvirt.git 17 | version: 1.0.2 18 | - name: community.mysql 19 | type: git 20 | source: https://github.com/ansible-collections/community.mysql.git 21 | version: 3.5.1 22 | - name: community.postgresql 23 | type: git 24 | source: https://github.com/ansible-collections/community.postgresql.git 25 | version: 2.1.2 26 | - name: containers.podman 27 | type: git 28 | source: https://github.com/containers/ansible-podman-collections.git 29 | version: 1.9.2 30 | - name: amazon.aws 31 | type: git 32 | source: https://github.com/ansible-collections/amazon.aws.git 33 | version: 3.2.0 34 | - name: community.aws 35 | type: git 36 | source: https://github.com/ansible-collections/community.aws.git 37 | version: 3.2.1 38 | - name: community.rabbitmq 39 | type: git 40 | source: https://github.com/ansible-collections/community.rabbitmq.git 41 | version: 1.2.3 42 | -------------------------------------------------------------------------------- /collections-staging.in: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | type: git 4 | source: https://github.com/ansible-collections/ansible.posix.git 5 | version: 1.5.1 6 | - name: ansible.netcommon 7 | type: git 8 | source: https://github.com/ansible-collections/ansible.netcommon.git 9 | version: 2.0.0 10 | - name: community.general 11 | type: git 12 | source: https://github.com/ansible-collections/community.general.git 13 | version: 4.6.1 14 | - name: community.libvirt 15 | type: git 16 | source: https://github.com/ansible-collections/community.libvirt.git 17 | version: 1.0.2 18 | - name: community.mysql 19 | type: git 20 | source: https://github.com/ansible-collections/community.mysql.git 21 | version: 3.5.1 22 | - name: community.postgresql 23 | type: git 24 | source: https://github.com/ansible-collections/community.postgresql.git 25 | version: 2.1.2 26 | - name: containers.podman 27 | type: git 28 | source: https://github.com/containers/ansible-podman-collections.git 29 | version: 1.9.2 30 | - name: amazon.aws 31 | type: git 32 | source: https://github.com/ansible-collections/amazon.aws.git 33 | version: 3.2.0 34 | - name: community.aws 35 | type: git 36 | source: https://github.com/ansible-collections/community.aws.git 37 | version: 3.2.1 38 | - name: community.rabbitmq 39 | type: git 40 | source: https://github.com/ansible-collections/community.rabbitmq.git 41 | version: 1.2.3 42 | -------------------------------------------------------------------------------- /collections-stream.in: -------------------------------------------------------------------------------- 1 | collections: 2 | - name: ansible.posix 3 | type: git 4 | source: https://github.com/ansible-collections/ansible.posix.git 5 | version: 1.5.1 6 | - name: ansible.netcommon 7 | type: git 8 | source: https://github.com/ansible-collections/ansible.netcommon.git 9 | version: 2.0.0 10 | - name: community.general 11 | type: git 12 | source: https://github.com/ansible-collections/community.general.git 13 | version: 4.6.1 14 | - name: community.libvirt 15 | type: git 16 | source: https://github.com/ansible-collections/community.libvirt.git 17 | version: 1.0.2 18 | - name: community.mysql 19 | type: git 20 | source: https://github.com/ansible-collections/community.mysql.git 21 | version: 3.5.1 22 | - name: community.postgresql 23 | type: git 24 | source: https://github.com/ansible-collections/community.postgresql.git 25 | version: 2.1.2 26 | - name: containers.podman 27 | type: git 28 | source: https://github.com/containers/ansible-podman-collections.git 29 | version: 1.9.2 30 | - name: amazon.aws 31 | type: git 32 | source: https://github.com/ansible-collections/amazon.aws.git 33 | version: 3.2.0 34 | - name: community.aws 35 | type: git 36 | source: https://github.com/ansible-collections/community.aws.git 37 | version: 3.2.1 38 | - name: community.rabbitmq 39 | type: git 40 | source: https://github.com/ansible-collections/community.rabbitmq.git 41 | version: 1.2.3 42 | -------------------------------------------------------------------------------- /deploy-pagure-repospanner.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: role-repospanner.yml 3 | - import_playbook: role-pagure.yml 4 | -------------------------------------------------------------------------------- /deploy-reimzul.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: role-reimzul-controller.yml 3 | - import_playbook: role-reimzul-bstore.yml 4 | - import_playbook: role-reimzul-builder.yml 5 | 6 | -------------------------------------------------------------------------------- /files/file: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CentOS/ansible-infra-playbooks/4cc51c8ea578641d3ada2c0f39b17d0e85e7c27d/files/file -------------------------------------------------------------------------------- /handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: restart_sshd 3 | service: 4 | name: sshd 5 | state: restarted 6 | 7 | - name: restart_rsyncd 8 | service: 9 | name: rsyncd 10 | state: restarted 11 | 12 | - name: rebuild_rsyncd 13 | assemble: 14 | src: /etc/rsyncd.d/ 15 | dest: /etc/rsyncd.conf 16 | notify: restart_rsyncd 17 | 18 | - name: restart_httpd 19 | service: 20 | name: httpd 21 | state: restarted 22 | 23 | - name: reload_httpd 24 | service: 25 | name: httpd 26 | state: reloaded 27 | -------------------------------------------------------------------------------- /requirements-duffy.yml: -------------------------------------------------------------------------------- 1 | collections-duffy.in -------------------------------------------------------------------------------- /requirements-stream.yml: -------------------------------------------------------------------------------- 1 | requirements-production.yml -------------------------------------------------------------------------------- /role-all.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - import_playbook: role-ansible-host.yml 3 | - import_playbook: role-artifacts-srv.yml 4 | - import_playbook: role-bind.yml 5 | - import_playbook: role-boot-server.yml 6 | - import_playbook: role-cachethq.yml 7 | - import_playbook: role-centbot.yml 8 | - import_playbook: role-centos-backup.yml 9 | - import_playbook: role-certbot.yml 10 | - import_playbook: role-debuginfod.yml 11 | - import_playbook: role-distgit-lookaside.yml 12 | - import_playbook: role-duffy.yml 13 | - import_playbook: role-fedmsg.yml 14 | - import_playbook: role-fedora-messaging-client.yml 15 | - import_playbook: role-geoip2.yml 16 | - import_playbook: role-gitea.yml 17 | - import_playbook: role-gitlab-runner.yml 18 | - import_playbook: role-haproxy.yml 19 | - import_playbook: role-httpd.yml 20 | - import_playbook: role-httpd-compose.yml 21 | - import_playbook: role-httpd-feeds.yml 22 | - import_playbook: role-ipa-client.yml 23 | - import_playbook: role-ipsilon.yml 24 | - import_playbook: role-iptables.yml 25 | - import_playbook: role-iscsid-target.yml 26 | - import_playbook: role-jenkins-server.yml 27 | - import_playbook: role-kanboard.yml 28 | - import_playbook: role-keepalived.yml 29 | - import_playbook: role-kojibot.yml 30 | - import_playbook: role-koji-client.yml 31 | - import_playbook: role-kojid.yml 32 | - import_playbook: role-kojihub.yml 33 | - import_playbook: role-krb5-client.yml 34 | - import_playbook: role-kvm-host.yml 35 | - import_playbook: role-lsyncd.yml 36 | - import_playbook: role-mailman.yml 37 | - import_playbook: role-mailman3.yml 38 | - import_playbook: role-mantisbt.yml 39 | - import_playbook: role-mbs.yml 40 | - import_playbook: role-mirmon.yml 41 | - import_playbook: role-mirror.yml 42 | - import_playbook: role-mirror-buildlogs.yml 43 | - import_playbook: role-mirror-cloud.yml 44 | - import_playbook: role-mirror-debuginfo.yml 45 | - import_playbook: role-mirrorlist.yml 46 | - import_playbook: role-mirror-master.yml 47 | - import_playbook: role-mirror-qa.yml 48 | - import_playbook: role-mirror-vault.yml 49 | - import_playbook: role-moin.yml 50 | - import_playbook: role-mqtt.yml 51 | - import_playbook: role-mysql.yml 52 | - import_playbook: role-nfs-server.yml 53 | - import_playbook: role-ocp-admin-node.yml 54 | - import_playbook: role-odcs-backend.yml 55 | - import_playbook: role-odcs-frontend.yml 56 | - import_playbook: role-opennebula-frontend.yml 57 | - import_playbook: role-opennebula-kvm-host.yml 58 | - import_playbook: role-opentracker.yml 59 | - import_playbook: role-pagure.yml 60 | - import_playbook: role-pdns-pipe.yml 61 | - import_playbook: role-phpbb.yml 62 | - import_playbook: role-planet.yml 63 | - import_playbook: role-podman-host.yml 64 | - import_playbook: role-postfix.yml 65 | - import_playbook: role-postgresql.yml 66 | - import_playbook: role-redis.yml 67 | - import_playbook: role-reimzul-bstore.yml 68 | - import_playbook: role-reimzul-builder.yml 69 | - import_playbook: role-reimzul-controller.yml 70 | - import_playbook: role-repospanner.yml 71 | - import_playbook: role-restic.yml 72 | - import_playbook: role-robosignatory.yml 73 | - import_playbook: role-rsnapshot.yml 74 | - import_playbook: role-rsyncd.yml 75 | - import_playbook: role-sshd.yml 76 | - import_playbook: role-stikked.yml 77 | - import_playbook: role-stylo.yml 78 | - import_playbook: role-sync2git.yml 79 | - import_playbook: role-sync2s3.yml 80 | - import_playbook: role-tinyproxy.yml 81 | - import_playbook: role-ucarp.yml 82 | - import_playbook: role-unbound.yml 83 | - import_playbook: role-vbox-host.yml 84 | - import_playbook: role-vdo-host.yml 85 | - import_playbook: role-vsftpd.yml 86 | - import_playbook: role-wp.yml 87 | - import_playbook: role-zabbix-agent.yml 88 | - import_playbook: role-zabbix-proxy.yml 89 | - import_playbook: role-zabbix-server.yml 90 | # now the playbooks that are sub-task from existing role 91 | - import_playbook: role-httpd-internal-mirrorlist.yml 92 | - import_playbook: role-httpd-console-qa.yml 93 | - import_playbook: role-httpd-armv7.yml 94 | - import_playbook: role-httpd-docs.yml 95 | - import_playbook: role-httpd-people.yml 96 | - import_playbook: role-httpd-reposnap.yml 97 | - import_playbook: role-httpd-centosproject.yml 98 | - import_playbook: role-httpd-shared-dir.yml 99 | - import_playbook: role-httpd-www.yml 100 | - import_playbook: role-httpd-www-staging.yml 101 | -------------------------------------------------------------------------------- /role-ansible-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-ansible-host 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - ansible-host 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-artifacts-srv.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-artifacts-srv 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - artifacts-srv 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-baseline.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - baseline 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-bind.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-bind 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - bind 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-boot-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-boot-server 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - boot-server 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-cachethq.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-cachethq 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - cachethq 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-centbot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-centbot 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - centbot 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-centos-backup.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-centos-backup 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - centos-backup 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-certbot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-certbot 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - certbot 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-debuginfod.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-debuginfod 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - debuginfod 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-distgit-lookaside.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-distgit-lookaside 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - distgit-lookaside 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-duffy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-duffy 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - duffy 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-fedmsg.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-fedmsg 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - fedmsg 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-fedora-messaging-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-fedora-messaging-client 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - fedora-messaging-client 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-geoip2.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-geoip2 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - geoip2 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-gitea.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-gitea 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - gitea 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-gitlab-runner.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-gitlab-runner 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - gitlab-runner 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-haproxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-haproxy 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - haproxy 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-httpd-armv7.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-armv7-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-armv7 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-centosproject.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-centosproject-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-centosproject 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-compose.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-httpd-compose 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - httpd-compose 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-httpd-console-qa.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-console-qa-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-console-qa 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-docs-infra.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-docs-infra-nodes 3 | become: True 4 | 5 | pre_tasks: 6 | - name: Checking if no-ansible file is there 7 | stat: 8 | path: /etc/no-ansible 9 | register: no_ansible 10 | 11 | - name: Verifying if we can run ansible or not 12 | assert: 13 | that: 14 | - "not no_ansible.stat.exists" 15 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 16 | 17 | roles: 18 | - httpd 19 | 20 | tasks: 21 | - import_role: 22 | name: httpd 23 | tasks_from: vhost-docs-infra.yml 24 | 25 | post_tasks: 26 | - name: Touching ansible-run (monitored by Zabbix) 27 | file: 28 | path: /var/log/ansible.run 29 | state: touch 30 | 31 | -------------------------------------------------------------------------------- /role-httpd-docs-sigs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-docs-sigs-nodes 3 | become: True 4 | 5 | pre_tasks: 6 | - name: Checking if no-ansible file is there 7 | stat: 8 | path: /etc/no-ansible 9 | register: no_ansible 10 | 11 | - name: Verifying if we can run ansible or not 12 | assert: 13 | that: 14 | - "not no_ansible.stat.exists" 15 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 16 | 17 | roles: 18 | - httpd 19 | 20 | tasks: 21 | - import_role: 22 | name: httpd 23 | tasks_from: vhost-docs-sigs.yml 24 | 25 | post_tasks: 26 | - name: Touching ansible-run (monitored by Zabbix) 27 | file: 28 | path: /var/log/ansible.run 29 | state: touch 30 | 31 | -------------------------------------------------------------------------------- /role-httpd-docs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-docs-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-docs 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-feeds.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-httpd-feeds 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - httpd-feeds 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-httpd-git-websitent-content.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-git-website-content-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | 22 | tasks: 23 | - import_role: 24 | name: httpd 25 | tasks_from: vhost-git-website-content 26 | 27 | post_tasks: 28 | - name: Touching ansible-run (monitored by Zabbix) 29 | file: 30 | path: /var/log/ansible.run 31 | state: touch 32 | 33 | -------------------------------------------------------------------------------- /role-httpd-internal-mirrorlist.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-internal-mirrorlist-servers 3 | become: True 4 | vars: 5 | - httpd_tls: False 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-internal-mirrorlist 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-people.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-people-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-people 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-reposnap.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-reposnap-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: False 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-reposnap 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd-shared-dir.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-shared-dir-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: False 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | 22 | tasks: 23 | - import_role: 24 | name: httpd 25 | tasks_from: vhost-shared-dir 26 | 27 | post_tasks: 28 | - name: Touching ansible-run (monitored by Zabbix) 29 | file: 30 | path: /var/log/ansible.run 31 | state: touch 32 | 33 | -------------------------------------------------------------------------------- /role-httpd-www-staging.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-www-staging-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | - httpd_www_hostname: www.stg.centos.org 7 | 8 | pre_tasks: 9 | - name: Checking if no-ansible file is there 10 | stat: 11 | path: /etc/no-ansible 12 | register: no_ansible 13 | 14 | - name: Verifying if we can run ansible or not 15 | assert: 16 | that: 17 | - "not no_ansible.stat.exists" 18 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 19 | 20 | roles: 21 | - httpd 22 | 23 | tasks: 24 | - import_role: 25 | name: httpd 26 | tasks_from: vhost-www-staging 27 | tags: 28 | - script 29 | 30 | post_tasks: 31 | - name: Touching ansible-run (monitored by Zabbix) 32 | file: 33 | path: /var/log/ansible.run 34 | state: touch 35 | 36 | -------------------------------------------------------------------------------- /role-httpd-www.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: httpd-www-nodes 3 | become: True 4 | vars: 5 | - httpd_tls: True 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | tasks: 22 | - import_role: 23 | name: httpd 24 | tasks_from: vhost-www 25 | 26 | post_tasks: 27 | - name: Touching ansible-run (monitored by Zabbix) 28 | file: 29 | path: /var/log/ansible.run 30 | state: touch 31 | 32 | -------------------------------------------------------------------------------- /role-httpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-httpd 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - httpd 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-ipa-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-ipa-client 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - ipa-client 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-ipsilon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-ipsilon 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - ipsilon 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-iptables.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-iptables 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - iptables 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-iscsid-target.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-iscsid-target 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - iscsid-target 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-jenkins-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-jenkins-server 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - jenkins-server 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-kanboard.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-kanboard 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - kanboard 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-keepalived.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-keepalived 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - keepalived 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-koji-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-koji-client 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - koji-client 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-kojibot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-kojibot 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - kojibot 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-kojid.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-kojid 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - kojid 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-kojifiles.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-kojifiles 3 | become: True 4 | vars: 5 | - httpd_tls: False 6 | 7 | pre_tasks: 8 | - name: Checking if no-ansible file is there 9 | stat: 10 | path: /etc/no-ansible 11 | register: no_ansible 12 | 13 | - name: Verifying if we can run ansible or not 14 | assert: 15 | that: 16 | - "not no_ansible.stat.exists" 17 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 18 | 19 | roles: 20 | - httpd 21 | 22 | tasks: 23 | - name: "install nfs-utils" 24 | package: 25 | name: nfs-utils 26 | state: installed 27 | tags: nfs 28 | 29 | - name: "Allow apache to pull from NFS" 30 | seboolean: 31 | name: "httpd_use_nfs" 32 | persistent: yes 33 | state: yes 34 | tags: nfs 35 | 36 | - name: "mount the kojifiles volume" 37 | mount: 38 | path: "{{ item.path }}" 39 | src: "{{ item.src }}" 40 | fstype: "{{ item.fstype }}" 41 | opts: "{{ item.opts }}" 42 | state: mounted 43 | with_items: "{{ kojifiles_nfs_mounts }}" 44 | tags: nfs 45 | 46 | - name: "Link pkgs dir to the right place" 47 | file: 48 | dest: "{{ item }}" 49 | src: '/mnt/centos/koji' 50 | state: link 51 | with_items: 52 | - '/var/www/html/pkgs' 53 | - '/var/www/html/kojifiles' 54 | 55 | post_tasks: 56 | - name: Touching ansible-run (monitored by Zabbix) 57 | file: 58 | path: /var/log/ansible.run 59 | state: touch 60 | -------------------------------------------------------------------------------- /role-kojihub.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-kojihub 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - kojihub 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-krb5-client.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-krb5-client 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - krb5-client 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-kvm-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-kvm-host 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - kvm-host 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-lsyncd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-lsyncd 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - lsyncd 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mailman.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mailman 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mailman 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mailman3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mailman3 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mailman3 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mantisbt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mantisbt 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mantisbt 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mbs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mbs 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mbs 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirmon.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirmon 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirmon 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror-buildlogs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror-buildlogs 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror-buildlogs 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror-cloud.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror-cloud 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror-cloud 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror-debuginfo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror-debuginfo 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror-debuginfo 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror-master.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror-master 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror-master 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror-qa.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror-qa 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror-qa 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror-vault.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror-vault 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror-vault 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirror.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirror 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirror 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mirrorlist.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mirrorlist 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mirrorlist 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-moin.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-moin 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - moin 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mqtt.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mqtt 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mqtt 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-mysql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-mysql 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - mysql 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-nfs-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-nfs-server 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - nfs-server 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-ocp-admin-node.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-ocp-admin-node 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - ocp-admin-node 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-odcs-backend.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-odcs-backend 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - odcs-backend 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-odcs-frontend.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-odcs-frontend 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - odcs-frontend 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-opennebula-frontend.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-opennebula-frontend 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - opennebula-frontend 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-opennebula-kvm-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-opennebula-kvm-host 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - opennebula-kvm-host 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-opentracker.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-opentracker 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - opentracker 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-pagure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-pagure 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - pagure 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-pdns-pipe.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-pdns-pipe 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - pdns-pipe 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-phpbb.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-phpbb 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - phpbb 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-planet.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-planet 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - planet 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-podman-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-podman-host 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - podman-host 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-postfix.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-postfix 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - postfix 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-postgresql.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-postgresql 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - postgresql 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-redis.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-redis 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - redis 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-reimzul-bstore.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-reimzul-bstore 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - reimzul-bstore 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-reimzul-builder.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-reimzul-builder 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - reimzul-builder 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-reimzul-controller.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-reimzul-controller 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - reimzul-controller 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-repospanner.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-repospanner 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - repospanner 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-restic.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-restic 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - restic 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-robosignatory.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-robosignatory 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - robosignatory 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-rsnapshot.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-rsnapshot 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - rsnapshot 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-rsyncd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-rsyncd 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - rsyncd 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-smtp-relay.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-smtp-relay 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | tasks: 17 | - import_role: 18 | name: postfix 19 | tasks_from: relay_to 20 | 21 | 22 | post_tasks: 23 | - name: Touching ansible-run (monitored by Zabbix) 24 | file: 25 | path: /var/log/ansible.run 26 | state: touch 27 | 28 | -------------------------------------------------------------------------------- /role-sshd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-sshd 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - sshd 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-stikked.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-stikked 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - stikked 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-stylo.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-stylo 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - stylo 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-sync2git.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-sync2git 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - sync2git 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-sync2s3.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-sync2s3 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - sync2s3 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-tinyproxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-tinyproxy 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - tinyproxy 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-ucarp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-ucarp 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - ucarp 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-unbound.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-unbound 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - unbound 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-vbox-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-vbox-host 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - vbox-host 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-vdo-host.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-vdo-host 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - vdo-host 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-vsftpd.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-vsftpd 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - vsftpd 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-wp.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-wp 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - wp 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-zabbix-agent.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-zabbix-agent 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - zabbix-agent 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-zabbix-proxy.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-zabbix-proxy 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - zabbix-proxy 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /role-zabbix-server.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: hostgroup-role-zabbix-server 3 | become: True 4 | pre_tasks: 5 | - name: Checking if no-ansible file is there 6 | stat: 7 | path: /etc/no-ansible 8 | register: no_ansible 9 | 10 | - name: Verifying if we can run ansible or not 11 | assert: 12 | that: 13 | - "not no_ansible.stat.exists" 14 | msg: "/etc/no-ansible file exists so skipping ansible run on this node" 15 | 16 | roles: 17 | - zabbix-server 18 | 19 | post_tasks: 20 | - name: Touching ansible-run (monitored by Zabbix) 21 | file: 22 | path: /var/log/ansible.run 23 | state: touch 24 | 25 | -------------------------------------------------------------------------------- /tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | -------------------------------------------------------------------------------- /templates/ansible-hostvars.j2: -------------------------------------------------------------------------------- 1 | hostname: {{ inventory_hostname }} 2 | {% if ansible_bios_vendor == 'Amazon EC2' and pub_ip is defined %} 3 | ip: {{ pub_ip }} 4 | {% elif ansible_default_ipv4.address is defined %} 5 | ip: {{ ansible_default_ipv4.address }} 6 | {% else %} 7 | ip: {{ ansible_all_ipv4_addresses[0] }} 8 | {% endif %} 9 | netmask: {{ ansible_default_ipv4.netmask| default('') }} 10 | gateway: {{ ansible_default_ipv4.gateway | default('')}} 11 | {% if ansible_default_ipv6.address is defined %} 12 | ipv6: {{ ansible_default_ipv6.address }} 13 | prefix6: {{ ansible_default_ipv6.prefix }} 14 | gateway6: {{ ansible_default_ipv6.gateway }} 15 | {% endif %} 16 | macaddress: {{ ansible_default_ipv4.macaddress | default('')}} 17 | dns: 18 | {% for ns in ansible_dns.nameservers %} 19 | - {{ ns }} 20 | {%- endfor %} 21 | 22 | hardware_vendor: {{ ansible_system_vendor }} 23 | product_name: {{ ansible_product_name }} 24 | serial_number: {{ ansible_product_serial }} 25 | 26 | {% if ansible_bios_vendor == 'Amazon EC2' %} 27 | aws_ec2_instance_id: 28 | aws_ec2_region: 29 | {% else %} 30 | root_password: 31 | 32 | sponsor_name: 33 | sponsor_url: 34 | sponsor_location: 35 | sponsor_portal: 36 | - url: 37 | - user: 38 | - password: 39 | sponsor_contacts: [] 40 | {% endif %} 41 | 42 | 43 | -------------------------------------------------------------------------------- /templates/ansible-virt-install-ocp.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | virt-install \ 4 | --name {{ inventory_hostname }} \ 5 | --vcpus {{ kvm_guest_vcpus }} \ 6 | --cpu host \ 7 | --ram {{ kvm_guest_memory }} \ 8 | --disk path=/var/lib/libvirt/images/{{ inventory_hostname }}.qcow2,size={{ kvm_guest_disk_size }},format=qcow2,device=disk,bus=virtio,cache=none \ 9 | --nographics \ 10 | --network bridge={{ kvm_host_bridge }},model=virtio \ 11 | --hvm --accelerate \ 12 | --autostart --wait=-1 \ 13 | --extra-args "ip={{ ip }}::{{ gateway }}:{{ netmask }}:{{ inventory_hostname }}:{{ kvm_guest_vnic }}:none {% for ns in nameservers -%} nameserver={{ ns }} {% endfor -%} console=ttyS0 nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url={{ rhcos_install_rootfs_url }} coreos.inst.ignition_url={{ rhcos_ignition_file_url }} " \ 14 | --os-type linux --os-variant rhel7 \ 15 | --location {{ rhcos_install_url }} 16 | 17 | 18 | 19 | -------------------------------------------------------------------------------- /templates/ansible-virt-install.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This template will check some ansible variables to know what and how to deploy: 4 | # - kvm_guest_bridge (boolean) => if we'll be using bridge network 5 | # - if true => kvm_host_bridge is the br interface on which to attach vnic for bridge 6 | # - if not, which kvm_guest_network to use (usually 'default' from libvirt) 7 | # - rhel_version : if defined, we'll deploy RHEL and not CentOS version (kvm_guest_distro_release) 8 | 9 | {% if kvm_guest_bridge == True %} 10 | {% set kvm_guest_network = 'bridge=' + kvm_host_bridge %} 11 | {% else %} 12 | {% set kvm_guest_network = 'network=' + kvm_guest_default_net %} 13 | {% endif %} 14 | 15 | {% if kvm_guest_eth1_bridge is defined %} 16 | {% set kvm_guest_eth1_network = 'bridge=' + kvm_guest_eth1_bridge %} 17 | {% endif %} 18 | 19 | # Let's ensure we undefine/delete if still running (qa/test scenario) 20 | virsh list|grep -q "{{ inventory_hostname }}" 21 | if [ "$?" -eq "0" ] ; then 22 | virsh destroy {{ inventory_hostname }} 23 | virsh undefine {{ inventory_hostname }} 24 | fi 25 | 26 | virt-install \ 27 | --name {{ inventory_hostname }} \ 28 | --vcpus {{ kvm_guest_vcpus }} \ 29 | {% if not kvm_guest_arch == 'aarch64' -%} 30 | --cpu host \ 31 | {% endif %} 32 | --ram {{ kvm_guest_memory }} \ 33 | --disk path={{ kvm_host_libvirt_images_path | default('/var/lib/libvirt/images') }}/{{ inventory_hostname}}.qcow2,size={{ kvm_guest_disk_size }},format=qcow2,device=disk,bus=virtio,cache=none \ 34 | --nographics \ 35 | --network {{ kvm_guest_network }},model=virtio \ 36 | {% if kvm_guest_eth1_bridge is defined %} 37 | --network {{ kvm_guest_eth1_network }},model=virtio \ 38 | {% endif %} 39 | --hvm --accelerate \ 40 | --autostart --wait=-1 \ 41 | --noautoconsole \ 42 | --initrd-inject=/var/lib/libvirt/local-kickstarts/{{ inventory_hostname }}.cfg \ 43 | {% if kvm_guest_arch == 'power9' or kvm_guest_arch == 'ppc64le' %} 44 | --extra-args "console=hvc0 console=hvc1 net.ifnames=0 inst.ks=file:/{{ inventory_hostname }}.cfg" \ 45 | {% elif kvm_guest_arch == 's390x' %} 46 | --extra-args "console=ttysclp0 net.ifnames=0 inst.ks=file:/{{ inventory_hostname }}.cfg" \ 47 | {% else %} 48 | --extra-args "console=ttyS0 net.ifnames=0 inst.ks=file:/{{ inventory_hostname }}.cfg" \ 49 | {% endif %} 50 | {% if rhel_version is defined %} 51 | --location {{ rhel_deploy_mirror_url }}/{{ rhel_version }}/{{ kvm_guest_arch }}/ 52 | {% elif kvm_guest_distro_release == '8-stream' %} 53 | --location {{ mirror_baseurl | default('http://mirror.centos.org/centos/8-stream') }}/BaseOS/{{ kvm_guest_arch }}/os/ 54 | {% elif kvm_guest_distro_release == '9-stream' %} 55 | --location {{ mirror_baseurl | default('http://mirror.stream.centos.org/9-stream') }}/BaseOS/{{ kvm_guest_arch}}/os/ 56 | {% elif kvm_guest_distro_release == '10-stream' %} 57 | --osinfo fedora38 \ 58 | --location {{ mirror_baseurl | default('http://mirror.stream.centos.org/10-stream') }}/BaseOS/{{ kvm_guest_arch}}/os/ 59 | {% elif (kvm_guest_distro_release == 7 and kvm_guest_arch == 'ppc64le') or (kvm_guest_distro_release == 7 and kvm_guest_arch == 'ppc64') or (kvm_guest_distro_release == 7 and kvm_guest_arch == 'power9') or (kvm_guest_distro_release == '7' and kvm_guest_arch == 'aarch64') %} 60 | --location {{ mirror_baseurl | default('http://mirror.centos.org/altarch/7') }}/os/{{ kvm_guest_arch }}/ 61 | {% else %} 62 | --location {{ mirror_baseurl | default('http://mirror.centos.org/centos/7') }}/os/{{ kvm_guest_arch }}/ 63 | {% endif %} 64 | 65 | 66 | -------------------------------------------------------------------------------- /templates/convert-stream-8: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | logfile="/var/log/centos-convert-stream.log" 3 | rpm -q centos-stream-repos 2>&1 >/dev/null 4 | if [ "$?" -ne "0" ] ; then 5 | dnf swap -y -d 0 -q centos-linux-repos centos-stream-repos >> $logfile 6 | if [ "$?" -ne "0" ] ;then 7 | rpm -e --nodeps centos-linux-repos >> $logfile 2>&1 8 | rpm -Uvh http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/Packages/centos-stream-repos-8-4.el8.noarch.rpm http://mirror.centos.org/centos/8-stream/BaseOS/x86_64/os/Packages/centos-gpg-keys-8-4.el8.noarch.rpm >> $logfile 2>&1 9 | fi 10 | dnf distro-sync -y -q -d 0 >> $logfile 2>&1 11 | fi 12 | if [ "$?" -ne "0" ] ;then 13 | echo "Error converting to Stream 8" >> $logfile 14 | exit 1 15 | else 16 | exit 2 17 | fi 18 | else 19 | exit 0 20 | fi 21 | 22 | -------------------------------------------------------------------------------- /templates/convert-to-rhel: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | logfile="/var/log/centos-convert-rhel.log" 3 | 4 | function f_log () { 5 | echo "[+] $(date +%Y%m%d-%H:%M) -> $*" >>$logfile 6 | } 7 | 8 | f_log "Detecting if we have redhat-release package installed" 9 | rpm -q redhat-release 2>&1 >/dev/null 10 | if [ "$?" -eq "0" ] ; then 11 | f_log "Detected $(rpm -q redhat-release) so exiting" 12 | exit 0 13 | else 14 | f_log "Forcing removal of centos-stream-repos and centos-stream-release pkgs ..." 15 | rpm -e --nodeps centos-stream-repos centos-stream-release 2>&1 >>$logfile 16 | f_log "Forcing dnf makecache from RHEL repositories ..." 17 | dnf clean all --releasever {{ ansible_distribution_major_version }} >> $logfile 18 | dnf makecache --releasever {{ ansible_distribution_major_version }} >> $logfile 19 | dnf distro-sync -y -q --releasever {{ ansible_distribution_major_version }} >> $logfile 2>&1 20 | if [ "$?" -ne "0" ] ;then 21 | echo "Error converting to RHEL 8" >> $logfile 22 | exit 1 23 | else 24 | if [ -d /sys/firmware/efi ] ; then 25 | f_log "UEFI system detected, forcing efibootmanager" 26 | efi_dev=$(lsblk -dno pkname $(mount|grep "boot/efi"|awk '{print $1}')) 27 | f_log "Detected EFI boot device is [/dev/${efi_dev}]" 28 | grub2-mkconfig -o /boot/efi/EFI/redhat/grub.cfg 29 | efibootmgr --create --disk=/dev/${efi_dev} --part=1 --label="Red Hat Enterprise Linux" --loader='EFI\redhat\shimx64.efi' >> $logfile 30 | if [ "$?" -ne "0" ] ; then 31 | f_log "ERROR, not able to configure efibootmgr so machine will not reboot" 32 | exit 1 33 | fi 34 | fi 35 | exit 2 36 | fi 37 | fi 38 | 39 | -------------------------------------------------------------------------------- /templates/empty.cfg.j2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CentOS/ansible-infra-playbooks/4cc51c8ea578641d3ada2c0f39b17d0e85e7c27d/templates/empty.cfg.j2 -------------------------------------------------------------------------------- /templates/install-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: {{ openshift_cluster_base_domain }} 3 | controlPlane: 4 | hyperthreading: Enabled 5 | name: master 6 | platform: 7 | aws: 8 | type: {{ openshift_control_node_instance_type }} 9 | rootVolume: 10 | size: {{ openshift_control_node_ebs_size }} 11 | type: {{ openshift_control_node_ebs_type }} 12 | replicas: {{ openshift_control_node_replicas }} 13 | compute: 14 | - hyperthreading: Enabled 15 | name: worker 16 | platform: 17 | aws: 18 | type: {{ openshift_worker_node_instance_type }} 19 | rootVolume: 20 | size: {{ openshift_worker_node_ebs_size }} 21 | type: {{ openshift_worker_node_ebs_type }} 22 | replicas: {{ openshift_worker_node_replicas }} 23 | metadata: 24 | name: {{ openshift_cluster_name }} 25 | networking: 26 | machineNetwork: 27 | - cidr: {{ openshift_pvc_machine_network }} 28 | platform: 29 | aws: 30 | region: {{ aws_region }} 31 | pullSecret: '{{ openshift_install_pull_secret | regex_replace( '^ ', '' ) }}' 32 | fips: false 33 | sshKey: {{ openshift_node_ssh_public_key }} 34 | -------------------------------------------------------------------------------- /templates/kickstarts/centos-8-stream-ks.cfg.j2: -------------------------------------------------------------------------------- 1 | centos-8-ks.cfg.j2 -------------------------------------------------------------------------------- /templates/kickstarts/ci-centos-7-ks.j2: -------------------------------------------------------------------------------- 1 | {% if centos_arch == 'x86_64' %} 2 | {% set basepath = 'centos' %} 3 | {% else %} 4 | {% set basepath = 'altarch' %} 5 | {% endif %} 6 | #version=RHEL7 7 | # System authorization information 8 | auth --enableshadow --passalgo=sha512 9 | 10 | # Use network installation 11 | url --url="http://mirror.centos.org/{{ basepath }}/7/os/{{ centos_arch }}/" 12 | # Run the Setup Agent on first boot 13 | firstboot --enable 14 | #ignoredisk --only-use=sda 15 | # Keyboard layouts 16 | # old format: keyboard be-latin1 17 | # new format: 18 | keyboard --vckeymap=be-latin1 --xlayouts='be' 19 | # System language 20 | lang en_GB.UTF-8 21 | 22 | # Network information 23 | network --bootproto=static --device=eth0 --gateway={{ gateway }} --ip={{ ip }} --nameserver={{ nameserver }} --netmask={{ netmask }} --ipv6=auto --activate 24 | network --hostname={{ inventory_hostname }} 25 | # Root password 26 | rootpw --iscrypted $6$uPDi1RLccGatUM7N$es3S/p/J7/wQY5sN4PUxfk0ELNmVDddnNs/NCHJWTep9xQcRZ8xkOtDBHfqKTWM4CZQjLOXM0wZpL0tvo4D.41 27 | # System services 28 | services --enabled="chronyd" 29 | # System timezone 30 | timezone Europe/London --isUtc --ntpservers=0.centos.pool.centos.org,1.centos.pool.ntp.org,2.centos.pool.ntp.centos.org,3.centos.pool.ntp.centos.org 31 | # System bootloader configuration 32 | bootloader --location=mbr --boot-drive=mpatha 33 | # Partition clearing information 34 | clearpart --all --initlabel 35 | # Disk partitioning information 36 | part /boot --fstype="ext4" --ondisk=mpatha --size=500 37 | part pv.14 --fstype="lvmpv" --ondisk=mpatha --size=10000 --grow 38 | volgroup vg_{{ inventory_hostname_short }} --pesize=4096 pv.14 39 | logvol / --fstype="xfs" --size=8200 --name=root --vgname=vg_{{ inventory_hostname_short }} --grow --maxsize=1000000 40 | logvol swap --fstype="swap" --size=2136 --name=swap --vgname=vg_{{ inventory_hostname_short }} 41 | 42 | repo --name="base" --baseurl=http://mirror.centos.org/{{ basepath }}/7/os/{{ centos_arch }}/ --cost=100 43 | repo --name="updates" --baseurl=http://mirror.centos.org/{{ basepath }}/7/updates/{{ centos_arch }}/ --cost=100 44 | repo --name="ci-admin" --baseurl=http://repo.ci.centos.org/repo/admin/7/{{ centos_arch }}/ --cost=100 45 | repo --name="cr" --baseurl=http://mirror.centos.org/{{ basepath }}/7/cr/{{ centos_arch }}/ --cost=100 46 | 47 | reboot 48 | 49 | %packages 50 | @core 51 | chrony 52 | policycoreutils-python 53 | openssh-clients 54 | yum-utils 55 | sysstat 56 | 57 | %end 58 | 59 | %pre 60 | #!/bin/bash 61 | # Forcing performance instead of ondemand for AMD cpufreq_governor 62 | for i in {0..7} ; do echo performance > /sys/devices/system/cpu/cpu${i}/cpufreq/scaling_governor ; done 63 | # Forcing multibus (active/active) instead of failover for multipath devices 64 | /sbin/mpathconf --enable --with_multipathd y 65 | /sbin/multipath -F 66 | /sbin/multipath -r -p multibus 67 | /sbin/multipath -l 68 | /sbin/multipath -ll 69 | sed -i s/"user_friendly_names yes"/"user_friendly_names yes \n path_grouping_policy multibus"/g /etc/multipath.conf 70 | %end 71 | 72 | %post 73 | 74 | # Injecting custom ssh pub key for root 75 | mkdir /root/.ssh 76 | echo "{{ duffy_ssh_pub_key }}" >> /root/.ssh/authorized_keys 77 | chmod 700 /root/.ssh 78 | chmod 600 /root/.ssh/* ; chcon -v -R -t ssh_home_t /root/.ssh** 79 | 80 | #Forcing ci.centos.org as search domain in resolv.conf 81 | echo "DOMAIN=ci.centos.org" >> /etc/sysconfig/network-scripts/ifcfg-eth0 82 | 83 | yum-config-manager --enable cr 84 | 85 | %end 86 | 87 | -------------------------------------------------------------------------------- /templates/kickstarts/ci-centos-8-ks.j2: -------------------------------------------------------------------------------- 1 | # System authorization information 2 | auth --enableshadow --passalgo=sha512 3 | 4 | # Use network installation 5 | url --url="http://mirror.centos.org/centos/8/BaseOS/{{ centos_arch }}/os" 6 | # Run the Setup Agent on first boot 7 | firstboot --enable 8 | #ignoredisk --only-use=sda 9 | # Keyboard layouts 10 | # old format: keyboard be-latin1 11 | # new format: 12 | keyboard --vckeymap=be-latin1 --xlayouts='be' 13 | # System language 14 | lang en_GB.UTF-8 15 | 16 | # Network information 17 | network --bootproto=static --device=eth0 --gateway={{ gateway }} --ip={{ ip }} --nameserver={{ nameserver }} --netmask={{ netmask }} --ipv6=auto --activate 18 | network --hostname={{ inventory_hostname }} 19 | # Root password 20 | rootpw --iscrypted $6$uPDi1RLccGatUM7N$es3S/p/J7/wQY5sN4PUxfk0ELNmVDddnNs/NCHJWTep9xQcRZ8xkOtDBHfqKTWM4CZQjLOXM0wZpL0tvo4D.41 21 | # System services 22 | services --enabled="chronyd" 23 | # System timezone 24 | timezone Europe/London --isUtc 25 | # System bootloader configuration 26 | bootloader --location=mbr --boot-drive=mpatha 27 | # Partition clearing information 28 | clearpart --all --initlabel 29 | # Disk partitioning information 30 | part /boot --fstype="ext4" --ondisk=mpatha --size=500 31 | part pv.14 --fstype="lvmpv" --ondisk=mpatha --size=10000 --grow 32 | volgroup vg_{{ inventory_hostname_short }} --pesize=4096 pv.14 33 | logvol / --fstype="xfs" --size=8200 --name=root --vgname=vg_{{ inventory_hostname_short }} --grow --maxsize=1000000 34 | logvol swap --fstype="swap" --size=2136 --name=swap --vgname=vg_{{ inventory_hostname_short }} 35 | 36 | repo --name="base" --baseurl=http://mirror.centos.org/centos/8/BaseOS/{{ centos_arch }}/os --cost=100 37 | repo --name="appstream" --baseurl=http://mirror.centos.org/centos/8/AppStream/{{ centos_arch }}/os --cost=100 38 | repo --name="cr" --baseurl=http://mirror.centos.org/centos/8/cr/{{ centos_arch }}/os --cost=100 39 | 40 | reboot 41 | 42 | %packages 43 | @core 44 | 45 | %end 46 | 47 | %pre 48 | #!/bin/bash 49 | # Forcing performance instead of ondemand for AMD cpufreq_governor 50 | for i in {0..7} ; do echo performance > /sys/devices/system/cpu/cpu${i}/cpufreq/scaling_governor ; done 51 | # Forcing multibus (active/active) instead of failover for multipath devices 52 | /sbin/mpathconf --enable --with_multipathd y 53 | /sbin/multipath -F 54 | /sbin/multipath -r -p multibus 55 | /sbin/multipath -l 56 | /sbin/multipath -ll 57 | sed -i s/"user_friendly_names yes"/"user_friendly_names yes \n path_grouping_policy multibus"/g /etc/multipath.conf 58 | %end 59 | 60 | %post 61 | 62 | # Injecting custom ssh pub key for root 63 | mkdir /root/.ssh 64 | echo "{{ duffy_ssh_pub_key }}" >> /root/.ssh/authorized_keys 65 | chmod 700 /root/.ssh 66 | chmod 600 /root/.ssh/* ; chcon -v -R -t ssh_home_t /root/.ssh** 67 | 68 | #Forcing ci.centos.org as search domain in resolv.conf 69 | echo "DOMAIN=ci.centos.org" >> /etc/sysconfig/network-scripts/ifcfg-eth0 70 | 71 | dnf config-manager --enable cr 72 | 73 | %end 74 | 75 | -------------------------------------------------------------------------------- /templates/kickstarts/ci-centos-8-stream-ks.j2: -------------------------------------------------------------------------------- 1 | # System authorization information 2 | auth --enableshadow --passalgo=sha512 3 | 4 | # Use network installation 5 | url --url="http://mirror.centos.org/centos/8-stream/BaseOS/{{ centos_arch }}/os" 6 | # Run the Setup Agent on first boot 7 | firstboot --enable 8 | #ignoredisk --only-use=sda 9 | # Keyboard layouts 10 | # old format: keyboard be-latin1 11 | # new format: 12 | keyboard --vckeymap=be-latin1 --xlayouts='be' 13 | # System language 14 | lang en_GB.UTF-8 15 | firewall --service=ssh --port=10050:tcp 16 | 17 | # Network information 18 | network --bootproto=static --device=eth0 --gateway={{ gateway }} --ip={{ ip }} --nameserver={{ nameserver }} --netmask={{ netmask }} --ipv6=auto --activate 19 | network --hostname={{ inventory_hostname }} 20 | # Root password 21 | rootpw --iscrypted $6$uPDi1RLccGatUM7N$es3S/p/J7/wQY5sN4PUxfk0ELNmVDddnNs/NCHJWTep9xQcRZ8xkOtDBHfqKTWM4CZQjLOXM0wZpL0tvo4D.41 22 | # System services 23 | services --enabled="chronyd" 24 | # System timezone 25 | timezone Europe/London --isUtc 26 | # System bootloader configuration 27 | bootloader --location=mbr --boot-drive=mpatha 28 | # Partition clearing information 29 | clearpart --all --initlabel 30 | # Disk partitioning information 31 | part /boot --fstype="ext4" --ondisk=mpatha --size=500 32 | part pv.14 --fstype="lvmpv" --ondisk=mpatha --size=10000 --grow 33 | volgroup vg_{{ inventory_hostname_short }} --pesize=4096 pv.14 34 | logvol / --fstype="xfs" --size=8200 --name=root --vgname=vg_{{ inventory_hostname_short }} --grow --maxsize=1000000 35 | logvol swap --fstype="swap" --size=2136 --name=swap --vgname=vg_{{ inventory_hostname_short }} 36 | 37 | # repo --name="base" --baseurl=http://mirror.centos.org/centos/7/os/{{ centos_arch }}/ --cost=100 38 | # repo --name="updates" --baseurl=http://mirror.centos.org/centos/7/updates/{{ centos_arch }}/ --cost=100 39 | # repo --name="ci-admin" --baseurl=http://repo.ci.centos.org/repo/admin/7/{{ centos_arch }}/ --cost=100 40 | # repo --name="cr" --baseurl=http://mirror.centos.org/centos/7/cr/{{ centos_arch }}/ --cost=100 41 | 42 | repo --name="base" --baseurl=http://mirror.centos.org/centos/8-stream/BaseOS/{{ centos_arch }}/os --cost=100 43 | repo --name="appstream" --baseurl=http://mirror.centos.org/centos/8-stream/AppStream/{{ centos_arch }}/os --cost=100 44 | # repo --name="cr" --baseurl=http://mirror.centos.org/centos/8-stream/cr/{{ centos_arch }}/ --cost=100 45 | 46 | reboot 47 | 48 | %packages 49 | @^minimal-environment 50 | @standard 51 | 52 | 53 | %end 54 | 55 | %pre 56 | #!/bin/bash 57 | # Forcing performance instead of ondemand for AMD cpufreq_governor 58 | for i in {0..7} ; do echo performance > /sys/devices/system/cpu/cpu${i}/cpufreq/scaling_governor ; done 59 | # Forcing multibus (active/active) instead of failover for multipath devices 60 | /sbin/mpathconf --enable --with_multipathd y 61 | /sbin/multipath -F 62 | /sbin/multipath -r -p multibus 63 | /sbin/multipath -l 64 | /sbin/multipath -ll 65 | sed -i s/"user_friendly_names yes"/"user_friendly_names yes \n path_grouping_policy multibus"/g /etc/multipath.conf 66 | %end 67 | 68 | %post 69 | 70 | # Injecting custom ssh pub key for root 71 | mkdir /root/.ssh 72 | echo "{{ duffy_ssh_pub_key }}" >> /root/.ssh/authorized_keys 73 | chmod 700 /root/.ssh 74 | chmod 600 /root/.ssh/* ; chcon -v -R -t ssh_home_t /root/.ssh** 75 | 76 | #Forcing ci.centos.org as search domain in resolv.conf 77 | echo "DOMAIN=ci.centos.org" >> /etc/sysconfig/network-scripts/ifcfg-eth0 78 | 79 | yum-config-manager --enable cr 80 | 81 | %end 82 | 83 | 84 | -------------------------------------------------------------------------------- /templates/kickstarts/ci-centos-9-stream-ks.j2: -------------------------------------------------------------------------------- 1 | # System authorization information 2 | auth --enableshadow --passalgo=sha512 3 | 4 | # Use network installation 5 | url --url="http://mirror.stream.centos.org/{{ centos_dist }}/BaseOS/{{ centos_arch }}/os/" 6 | # Run the Setup Agent on first boot 7 | firstboot --enable 8 | #ignoredisk --only-use=sda 9 | # Keyboard layouts 10 | # old format: keyboard be-latin1 11 | # new format: 12 | keyboard --vckeymap=be-latin1 --xlayouts='be' 13 | # System language 14 | lang en_GB.UTF-8 15 | 16 | # Network information 17 | network --bootproto=static --device=eth0 --gateway={{ gateway }} --ip={{ ip }} --nameserver={{ nameserver }} --netmask={{ netmask }} --ipv6=auto --activate 18 | network --hostname={{ inventory_hostname }} 19 | # Root password 20 | rootpw --iscrypted $6$uPDi1RLccGatUM7N$es3S/p/J7/wQY5sN4PUxfk0ELNmVDddnNs/NCHJWTep9xQcRZ8xkOtDBHfqKTWM4CZQjLOXM0wZpL0tvo4D.41 21 | # System services 22 | services --enabled="chronyd" 23 | # System timezone 24 | timezone Etc/UTC --utc 25 | # System bootloader configuration 26 | bootloader --location=mbr --boot-drive=mpatha 27 | # Partition clearing information 28 | clearpart --all --initlabel 29 | # Disk partitioning information 30 | part /boot --fstype="ext4" --ondisk=mpatha --size=500 31 | part pv.14 --fstype="lvmpv" --ondisk=mpatha --size=10000 --grow 32 | volgroup vg_{{ inventory_hostname_short }} --pesize=4096 pv.14 33 | logvol / --fstype="xfs" --size=8200 --name=root --vgname=vg_{{ inventory_hostname_short }} --grow --maxsize=1000000 34 | logvol swap --fstype="swap" --size=2136 --name=swap --vgname=vg_{{ inventory_hostname_short }} 35 | 36 | reboot 37 | 38 | %packages 39 | @^minimal-environment 40 | @standard 41 | 42 | 43 | %end 44 | 45 | %pre 46 | #!/bin/bash 47 | # Forcing performance instead of ondemand for AMD cpufreq_governor 48 | for i in {0..7} ; do echo performance > /sys/devices/system/cpu/cpu${i}/cpufreq/scaling_governor ; done 49 | # Forcing multibus (active/active) instead of failover for multipath devices 50 | /sbin/mpathconf --enable --with_multipathd y 51 | /sbin/multipath -F 52 | /sbin/multipath -r -p multibus 53 | /sbin/multipath -l 54 | /sbin/multipath -ll 55 | sed -i s/"user_friendly_names yes"/"user_friendly_names yes \n path_grouping_policy multibus"/g /etc/multipath.conf 56 | %end 57 | 58 | %post 59 | 60 | # Injecting custom ssh pub key for root 61 | mkdir /root/.ssh 62 | echo "{{ duffy_ssh_pub_key }}" >> /root/.ssh/authorized_keys 63 | chmod 700 /root/.ssh 64 | chmod 600 /root/.ssh/* ; chcon -v -R -t ssh_home_t /root/.ssh** 65 | 66 | #Forcing ci.centos.org as search domain in resolv.conf 67 | echo "DOMAIN=ci.centos.org" >> /etc/sysconfig/network-scripts/ifcfg-eth0 68 | 69 | 70 | %end 71 | 72 | 73 | -------------------------------------------------------------------------------- /templates/kickstarts/kvm-guest-c8-stream-ks.j2: -------------------------------------------------------------------------------- 1 | kvm-guest-c8-ks.j2 -------------------------------------------------------------------------------- /templates/kickstarts/kvm-guest-rhel8-ks.j2: -------------------------------------------------------------------------------- 1 | # Kickstart file automatically generated by Ansible. 2 | # {{ ansible_managed }} 3 | 4 | # System authorization information 5 | auth --enableshadow --passalgo=sha512 6 | 7 | # Use network installation 8 | url --url="{{ rhel_deploy_mirror_url }}/{{ rhel_version }}/{{ kvm_guest_arch }}/" 9 | 10 | {% if rhel_internal_mirror_baseurl is defined %} 11 | # poiting to internal RHEL mirror with updates for baseos/appstream and so be up2date at deploy time 12 | repo --name="baseos-updates" --baseurl={{ rhel_internal_mirror_baseurl }}/RHEL/{{ rhel_version }}/baseos/{{ arch }}/os/ 13 | repo --name="appstream-updates" --baseurl={{ rhel_internal_mirror_baseurl }}/RHEL/{{ rhel_version }}/appstream/{{ arch }}/os/ 14 | {% endif %} 15 | 16 | # Run the Setup Agent on first boot 17 | firstboot --enable 18 | #ignoredisk --only-use=sda 19 | # Keyboard layouts 20 | # old format: keyboard be-latin1 21 | # new format: 22 | keyboard --vckeymap=be-latin1 --xlayouts='be' 23 | # System language 24 | lang en_GB.UTF-8 25 | 26 | {% if kvm_guest_ip is defined %} 27 | network --device eth0 --bootproto static --ip {{ kvm_guest_ip }} --netmask {{ kvm_guest_netmask }} --gateway {{ kvm_guest_gateway }} --nameserver {{ kvm_guest_nameserver }} --hostname {{ inventory_hostname }} --activate 28 | {% else %} 29 | network --device eth0 --bootproto dhcp --hostname {{ inventory_hostname }} 30 | {% endif %} 31 | 32 | {% if kvm_guest_eth1 is defined %} 33 | network --activate --device eth1 --bootproto static --ip {{ kvm_guest_eth1_ip }} --netmask {{ kvm_guest_eth1_netmask }} 34 | {% endif %} 35 | 36 | # Root password 37 | rootpw --iscrypted {{ kvm_guest_root_pass | password_hash('sha512') }} 38 | # System services 39 | services --enabled="chronyd" 40 | # System timezone 41 | timezone Etc/UTC --isUtc 42 | # System bootloader configuration 43 | bootloader --location=mbr --boot-drive=vda 44 | # Partition clearing information 45 | clearpart --all --initlabel 46 | # Disk partitioning information 47 | # Adding first reqpart to automatically add /boot/efi or prepboot for aarch64, uefi, or IBM Power architectures 48 | reqpart 49 | 50 | {% if kvm_guest_arch == 'ppc64le' or kvm_guest_arch == 'ppc64' or kvm_guest_arch == 'power9' %} 51 | part prepboot --asprimary --fstype=prepboot --size=10 52 | {% endif %} 53 | {% if kvm_guest_arch == 'aarch64' %} 54 | part /boot/efi --fstype="vfat" --ondisk=vda --size=200 55 | {% endif %} 56 | 57 | part /boot --fstype="ext4" --ondisk=vda --size=1024 58 | 59 | {%if kvm_guest_luks_encrypted %} 60 | part pv.14 --fstype="lvmpv" --ondisk=vda --size=10000 --grow --encrypted --passphrase={{ kvm_guest_luks_passphrase }} 61 | {% else %} 62 | part pv.14 --fstype="lvmpv" --ondisk=vda --size=10000 --grow 63 | {% endif %} 64 | 65 | volgroup vg_{{ inventory_hostname_short }} --pesize=4096 pv.14 66 | logvol / --fstype="ext4" --size=10000 --grow --name=root --vgname=vg_{{ inventory_hostname_short }} 67 | logvol /home --fstype="ext4" --size={{ kvm_guest_disk_home_size | default(2048) }} --name=home --vgname=vg_{{ inventory_hostname_short }} 68 | logvol swap --fstype="swap" --size=2048 --name=swap --vgname=vg_{{ inventory_hostname_short }} 69 | 70 | #Ensuring rebooting at the end 71 | reboot 72 | 73 | %packages 74 | @^minimal-environment 75 | @standard 76 | 77 | %end 78 | 79 | %addon com_redhat_kdump --disable 80 | %end 81 | 82 | %post 83 | 84 | sed -i 's/PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config 85 | 86 | # tuning ext4 87 | /sbin/tune2fs -m 0.5 /dev/mapper/vg_{{ inventory_hostname_short }}-root 88 | 89 | # Using Ansible vars to automatically render template and add users 90 | {% for user in admins_list %} 91 | # Adding user,ssh pub key and sudo right for {{ user.login_name }} 92 | /sbin/useradd {{ user.login_name}} -c "{{ user.full_name }}" 93 | mkdir /home/{{ user.login_name }}/.ssh 94 | {% for key in user.ssh_pub_key %} 95 | echo "{{ key }}" >> /home/{{ user.login_name }}/.ssh/authorized_keys 96 | {% endfor %} 97 | chmod 700 /home/{{ user.login_name }}/.ssh 98 | chmod 600 /home/{{ user.login_name }}/.ssh/* ; chcon -v -R -t ssh_home_t /home/{{ user.login_name }}/.ssh* 99 | chown -R {{ user.login_name }}.{{ user.login_name}} /home/{{ user.login_name }}/ 100 | echo "{{ user.login_name }} ALL=(ALL) NOPASSWD: ALL" >/etc/sudoers.d/{{ user.login_name }} 101 | {% endfor %} 102 | 103 | %end 104 | 105 | 106 | -------------------------------------------------------------------------------- /templates/kickstarts/kvm-guest-rhel9-ks.j2: -------------------------------------------------------------------------------- 1 | # Kickstart file automatically generated by Ansible. 2 | # {{ ansible_managed }} 3 | 4 | # Use network installation 5 | url --url="{{ rhel_deploy_mirror_url }}/{{ rhel_version }}/{{ kvm_guest_arch }}/" 6 | 7 | {% if rhel_internal_mirror_baseurl is defined %} 8 | # poiting to internal RHEL mirror with updates for baseos/appstream and so be up2date at deploy time 9 | repo --name="baseos-updates" --baseurl={{ rhel_internal_mirror_baseurl }}/RHEL/{{ rhel_version }}/baseos/{{ kvm_guest_arch }}/os/ 10 | repo --name="appstream-updates" --baseurl={{ rhel_internal_mirror_baseurl }}/RHEL/{{ rhel_version }}/appstream/{{ kvm_guest_arch }}/os/ 11 | {% endif %} 12 | 13 | 14 | # Run the Setup Agent on first boot 15 | firstboot --enable 16 | #ignoredisk --only-use=sda 17 | # Keyboard layouts 18 | # old format: keyboard be-latin1 19 | # new format: 20 | keyboard --vckeymap=be-latin1 --xlayouts='be' 21 | # System language 22 | lang en_GB.UTF-8 23 | 24 | {% if kvm_guest_ip is defined %} 25 | network --device eth0 --bootproto static --ip {{ kvm_guest_ip }} --netmask {{ kvm_guest_netmask }} --gateway {{ kvm_guest_gateway }} --nameserver {{ kvm_guest_nameserver }} --hostname {{ inventory_hostname }} --activate 26 | {% else %} 27 | network --device eth0 --bootproto dhcp --hostname {{ inventory_hostname }} 28 | {% endif %} 29 | 30 | {% if kvm_guest_eth1 is defined %} 31 | network --activate --device eth1 --bootproto static --ip {{ kvm_guest_eth1_ip }} --netmask {{ kvm_guest_eth1_netmask }} 32 | {% endif %} 33 | 34 | # Root password 35 | rootpw --iscrypted {{ kvm_guest_root_pass | password_hash('sha512') }} 36 | # System services 37 | services --enabled="chronyd" 38 | # System timezone 39 | timezone Etc/UTC --utc 40 | # System bootloader configuration 41 | bootloader --location=mbr --boot-drive=vda 42 | # Partition clearing information 43 | clearpart --all --initlabel 44 | # Disk partitioning information 45 | # Adding first reqpart to automatically add /boot/efi or prepboot for aarch64, uefi, or IBM Power architectures 46 | reqpart 47 | 48 | {% if kvm_guest_arch == 'ppc64le' or kvm_guest_arch == 'ppc64' or kvm_guest_arch == 'power9' %} 49 | part prepboot --asprimary --fstype=prepboot --size=10 50 | {% endif %} 51 | {% if kvm_guest_arch == 'aarch64' %} 52 | part /boot/efi --fstype="vfat" --ondisk=vda --size=200 53 | {% endif %} 54 | 55 | part /boot --fstype="ext4" --ondisk=vda --size=1024 56 | 57 | {%if kvm_guest_luks_encrypted %} 58 | part pv.14 --fstype="lvmpv" --ondisk=vda --size=10000 --grow --encrypted --passphrase={{ kvm_guest_luks_passphrase }} 59 | {% else %} 60 | part pv.14 --fstype="lvmpv" --ondisk=vda --size=10000 --grow 61 | {% endif %} 62 | 63 | volgroup vg_{{ inventory_hostname_short }} --pesize=4096 pv.14 64 | logvol / --fstype="ext4" --size=10000 --grow --name=root --vgname=vg_{{ inventory_hostname_short }} 65 | logvol /home --fstype="ext4" --size={{ kvm_guest_disk_home_size | default(2048) }} --name=home --vgname=vg_{{ inventory_hostname_short }} 66 | logvol swap --fstype="swap" --size=2048 --name=swap --vgname=vg_{{ inventory_hostname_short }} 67 | 68 | #Ensuring rebooting at the end 69 | reboot 70 | 71 | %packages 72 | @^minimal-environment 73 | @standard 74 | 75 | %end 76 | 77 | %addon com_redhat_kdump --disable 78 | %end 79 | 80 | %post 81 | 82 | sed -i 's/PasswordAuthentication yes/PasswordAuthentication no/g' /etc/ssh/sshd_config 83 | 84 | # tuning ext4 85 | /sbin/tune2fs -m 0.5 /dev/mapper/vg_{{ inventory_hostname_short }}-root 86 | 87 | # Using Ansible vars to automatically render template and add users 88 | {% for user in admins_list %} 89 | # Adding user,ssh pub key and sudo right for {{ user.login_name }} 90 | /sbin/useradd {{ user.login_name}} -c "{{ user.full_name }}" 91 | mkdir /home/{{ user.login_name }}/.ssh 92 | {% for key in user.ssh_pub_key %} 93 | echo "{{ key }}" >> /home/{{ user.login_name }}/.ssh/authorized_keys 94 | {% endfor %} 95 | chmod 700 /home/{{ user.login_name }}/.ssh 96 | chmod 600 /home/{{ user.login_name }}/.ssh/* ; chcon -v -R -t ssh_home_t /home/{{ user.login_name }}/.ssh* 97 | chown -R {{ user.login_name }}.{{ user.login_name}} /home/{{ user.login_name }}/ 98 | echo "{{ user.login_name }} ALL=(ALL) NOPASSWD: ALL" >/etc/sudoers.d/{{ user.login_name }} 99 | {% endfor %} 100 | 101 | # Disable subscription-manager yum plugins 102 | sed -i 's|^enabled=1|enabled=0|' /etc/yum/pluginconf.d/product-id.conf 103 | sed -i 's|^enabled=1|enabled=0|' /etc/yum/pluginconf.d/subscription-manager.conf 104 | 105 | 106 | %end 107 | 108 | 109 | -------------------------------------------------------------------------------- /templates/ocp-treeinfo.j2: -------------------------------------------------------------------------------- 1 | [general] 2 | arch = x86_64 3 | family = Red Hat CoreOS 4 | platforms = x86_64 5 | version = {{ rhcos_version }} 6 | [images-x86_64] 7 | initrd = rhcos-{{ rhcos_version }}-x86_64-live-initramfs.x86_64.img 8 | kernel = rhcos-{{ rhcos_version }}-x86_64-live-kernel-x86_64 9 | -------------------------------------------------------------------------------- /templates/ocp4.3_install-config.yaml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: centos.org 3 | #proxy: 4 | # httpProxy: http://dkirwan:password@172.30.0.1:8080 5 | # httpsProxy: http://dkirwan:password@172.30.0.1:8443 6 | # noProxy: "redhat.com,centos.org" 7 | #additionalTrustBundle: | 8 | # {{ x509_cert }} 9 | compute: 10 | - hyperthreading: Enabled 11 | name: worker 12 | replicas: 0 13 | controlPlane: 14 | hyperthreading: Enabled 15 | name: master 16 | replicas: 3 17 | metadata: 18 | name: '{{ subdomain_name }}' 19 | networking: 20 | clusterNetwork: 21 | - cidr: '{{ networking_cluster_network }}' # 10.128.0.0/14 22 | hostPrefix: '{{ networking_cluster_network_host_prefix }}' # 23 23 | networkType: OpenShiftSDN 24 | serviceNetwork: 25 | - '{{ networking_service_network }}' #172.30.0.0/16 26 | platform: 27 | none: {} 28 | fips: false 29 | pullSecret: '{{ ocp_pull_token }}' 30 | sshKey: '{{ ssh_key }}' 31 | -------------------------------------------------------------------------------- /templates/ocp_pxeboot.j2: -------------------------------------------------------------------------------- 1 | DEFAULT {{ inventory_hostname }}-deploy 2 | PROMPT 0 3 | TIMEOUT 50 4 | TOTALTIMEOUT 6000 5 | ONTIMEOUT {{ inventory_hostname }}-deploy 6 | 7 | LABEL local 8 | MENU LABEL (local) 9 | MENU DEFAULT 10 | LOCALBOOT 0 11 | 12 | LABEL {{ inventory_hostname}}-deploy 13 | MENU LABEL RHCOS {{ rhcos_version }} {{ arch }}- Kickstart for {{ inventory_hostname }} 14 | KERNEL rhcos-{{ rhcos_version }}-{{ arch }}-installer-kernel-{{ arch }} 15 | APPEND ip={{ ip }}::{{ gateway }}:{{ netmask }}:{{ inventory_hostname }}:{{ pxe_bootdev }}:none nameserver={{ nameserver }} rd.neednet=1 initrd=rhcos-{{ rhcos_version }}-{{ arch }}-installer-initramfs.{{ arch }}.img console={{ kernel_console | default('tty0')}} coreos.inst=yes rd.md=1 rd.auto coreos.inst.install_dev={{ md_raid_dev }} coreos.inst.image_url={{ rhcos_install_img_url }} coreos.inst.ignition_url={{ rhcos_ignition_file_url }} 16 | 17 | -------------------------------------------------------------------------------- /templates/openshift-idp.yml: -------------------------------------------------------------------------------- 1 | apiVersion: config.openshift.io/v1 2 | kind: OAuth 3 | metadata: 4 | name: cluster 5 | spec: 6 | identityProviders: 7 | - name: "{{idp_name}}" # should be the same used in the callback url used when creating oidc provider 8 | mappingMethod: claim 9 | type: OpenID 10 | openID: 11 | clientID: "{{idp_client_id}}" # should be the name used to create idp provider 12 | clientSecret: 13 | name: "{{idp_secret_name}}" # Name of secret created in previous step 14 | claims: 15 | email: 16 | - email 17 | groups: 18 | - groups 19 | name: 20 | - name 21 | preferredUsername: 22 | - nickname 23 | issuer: "{{idp_issuer_url}}" 24 | -------------------------------------------------------------------------------- /templates/openshift-pv-storage/persistent-volume.json.j2: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "apiVersion": "v1", 4 | "kind": "PersistentVolume", 5 | "metadata": { 6 | "name": "{{ pv_name }}", 7 | "labels": { 8 | "type": "nfs" 9 | } 10 | }, 11 | "spec": { 12 | "capacity": { 13 | "storage": "{{ pv_size }}" 14 | }, 15 | "accessModes": [ "ReadWriteOnce", "ReadOnlyMany", "ReadWriteMany" ], 16 | "persistentVolumeReclaimPolicy": "Retain", 17 | {% if pv_claimref is defined %} 18 | "claimref": { 19 | "name": "{{ pv_claimref }}", 20 | "namespace": "{{ cico_project_name }}" 21 | }, 22 | {% endif %} 23 | "nfs": { 24 | "server": "{{ ocp_nfs_server }}", 25 | "path": "/{{ ocp_nfs_export }}/{{ pv_name }}" 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /templates/openshift-pv-storage/pv.yml.j2: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | kind: PersistentVolume 3 | metadata: 4 | name: {{ pv_name }} 5 | labels: 6 | type: nfs 7 | spec: 8 | capacity: 9 | storage: {{ pv_size }} 10 | accessModes: 11 | - ReadOnlyMany 12 | - ReadWriteOnce 13 | - ReadWriteMany 14 | mountOptions: 15 | - nfsvers=4.1 16 | - noresvport 17 | - hard 18 | nfs: 19 | path: /{{ ocp_nfs_export }}/{{ pv_name }} 20 | server: {{ ocp_nfs_server }} 21 | persistentVolumeReclaimPolicy: Retain 22 | claimRef: 23 | namespace: {{ ocp_project }} 24 | name: {{ pv_claimref }} 25 | -------------------------------------------------------------------------------- /templates/openshift-resources.j2: -------------------------------------------------------------------------------- 1 | --- 2 | kind: Namespace 3 | apiVersion: v1 4 | metadata: 5 | name: "{{project_name}}" 6 | annotations: 7 | openshift.io/description: "{{ project_name }}" 8 | openshift.io/display-name: "{{ project_name }}" 9 | openshift.io/requester: siddharthvipul1@gmail.com 10 | labels: 11 | bug-id: "{{bug_id}}" 12 | --- 13 | kind: Group 14 | apiVersion: user.openshift.io/v1 15 | metadata: 16 | name: "{{project_name}}-admins" 17 | users: 18 | {% for item in project_members %} 19 | - "{{ item }}" 20 | {% endfor %} 21 | --- 22 | kind: RoleBinding 23 | apiVersion: rbac.authorization.k8s.io/v1 24 | metadata: 25 | name: "{{project_name}}-admins" 26 | namespace: "{{project_name}}" 27 | subjects: 28 | - kind: Group 29 | apiGroup: rbac.authorization.k8s.io 30 | name: "{{project_name}}-admins" 31 | roleRef: 32 | apiGroup: rbac.authorization.k8s.io 33 | kind: ClusterRole 34 | name: admin 35 | -------------------------------------------------------------------------------- /templates/openshift/jenkins-ci-workspace.env.j2: -------------------------------------------------------------------------------- 1 | DUFFY_API_KEY="{{ duffy_api_key }}" 2 | DUFFY_SSH_KEY="{{ duffy_ssh_key }}" 3 | -------------------------------------------------------------------------------- /templates/puppet-cron-compare.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | cron_user="$1" 3 | 4 | if [ $(wc -l /root/backup-crontab-${cron_user} |awk '{print $1}') -ne $(egrep -v '^#' /var/spool/cron/${cron_user} |wc -l) ] ; then 5 | echo Difference 6 | exit 0 7 | else 8 | echo Equal 9 | exit 0 10 | fi 11 | 12 | -------------------------------------------------------------------------------- /templates/pxe-grub.cfg.j2: -------------------------------------------------------------------------------- 1 | {% if rhel_version is defined %} 2 | {% set pxe_boot_dir = 'rhel' %} 3 | {% set os_version = rhel_version %} 4 | {% else %} 5 | {% set pxe_boot_dir = 'centos' %} 6 | {% set os_version = centos_version %} 7 | {% endif %} 8 | {% if arch == "aarch64" %} 9 | {% set kernel_boot = 'linux' %} 10 | {% set initrd_boot = 'initrd' %} 11 | {% else %} 12 | {% set kernel_boot = 'linuxefi' %} 13 | {% set initrd_boot = 'initrdefi' %} 14 | {% endif %} 15 | 16 | set timeout=3 17 | 18 | {% if vnc_based_install is defined and vnc_based_install %} 19 | menuentry '{{ inventory_hostname }}-Ansible-deploy' { 20 | {{ kernel_boot }} boot/{{ pxe_boot_dir }}/{{ os_version }}/{{ arch }}/vmlinuz ip={{ pxe_bootdev }}:dhcp inst.vnc inst.vncpassword={{ vnc_pass | default('N98tV89w') }} inst.repo={{ rhel_deploy_mirror_url}}/{{ rhel_version }}/{{ arch }}/ inst.lang=en_GB inst.keymap=be-latin1 21 | {{ initrd_boot }} boot/{{ pxe_boot_dir }}/{{ os_version }}/{{ arch }}/initrd.img 22 | } 23 | {% else %} 24 | menuentry '{{ inventory_hostname }}-Ansible-deploy' { 25 | {{ kernel_boot }} boot/{{ pxe_boot_dir }}/{{ os_version }}/{{ arch }}/vmlinuz ip={{ pxe_bootdev }}:dhcp inst.ks={{ ks_url }}/{{ inventory_hostname }}-ks.cfg 26 | {{ initrd_boot }} boot/{{ pxe_boot_dir }}/{{ os_version }}/{{ arch }}/initrd.img 27 | } 28 | {% endif %} 29 | -------------------------------------------------------------------------------- /templates/pxeboot.j2: -------------------------------------------------------------------------------- 1 | # Merging CI and previous setup so different variables, let's merge 2 | {% if centos_dist is defined %} 3 | {% set centos_version = centos_dist %} 4 | {% set arch = centos_arch %} 5 | {% endif %} 6 | 7 | {% if centos_infra_env == "qa" %} 8 | {% set pxe_base_dir = 'qa' %} 9 | {% else %} 10 | {% set pxe_base_dir = 'boot' %} 11 | {% endif %} 12 | 13 | {% if centos_version is defined %} 14 | {% set pxe_boot_dir = 'centos' %} 15 | {% set os_version = centos_version %} 16 | {% else %} 17 | {% set pxe_boot_dir = 'rhel' %} 18 | {% set os_version = rhel_version %} 19 | {% endif %} 20 | 21 | {% if pxe_boot_serial is defined and pxe_boot_serial %} 22 | # Let's setup for Serial console for CI and seamicro (no vga at all) 23 | SERIAL 0 9600 24 | DEFAULT text 25 | {% endif %} 26 | DEFAULT {{ inventory_hostname }}-deploy 27 | PROMPT 0 28 | TIMEOUT 50 29 | TOTALTIMEOUT 6000 30 | ONTIMEOUT {{ inventory_hostname }}-deploy 31 | 32 | LABEL local 33 | MENU LABEL (local) 34 | MENU DEFAULT 35 | LOCALBOOT 0 36 | 37 | LABEL {{ inventory_hostname}}-deploy 38 | MENU LABEL Deploying {{ inventory_hostname }} - kickstart (OS {{ os_version }}/{{ arch }}) 39 | kernel {{ pxe_base_dir }}/{{ pxe_boot_dir }}/{{ os_version }}/{{ arch }}/vmlinuz 40 | append initrd={{ pxe_base_dir }}/{{ pxe_boot_dir }}/{{ os_version }}/{{ arch }}/initrd.img net.ifnames=0 biosdevname=0 ip={{ pxe_bootdev }}:dhcp inst.ks={{ ks_url }}/{{ inventory_hostname }}-ks.cfg {%if pxe_boot_serial is defined and pxe_boot_serial %} console=ttyS0,115200n8 {% endif %} 41 | -------------------------------------------------------------------------------- /templates/sudofile.j2: -------------------------------------------------------------------------------- 1 | {{ item.login_name }} ALL=(ALL) NOPASSWD: ALL 2 | -------------------------------------------------------------------------------- /vars/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | 3 | --------------------------------------------------------------------------------