├── .gitignore
├── .images
├── libvirt.png
├── libvirtIcon.png
└── libvirticonshadow.png
├── .vscode
└── settings.json
├── 00_prerequisites.yml
├── 00_sno_prerequisites.yml
├── 01_install_virtualization_tools.yml
├── 01_sno_install_virtualization_tools.yml
├── 02_setup_working_directory.yml
├── 02_sno_setup_working_directory.yml
├── 05_vm_provisioning_infra.yml
├── 07_vm_provisioning_ocp.yml
├── 10_infra_packages.yml
├── 13_bastion_networking.yml
├── 15_bastion_workspace.yml
├── 17_bastion_services.yml
├── 19_lb_services.yml
├── 20_prepare_ocp_install.yml
├── 25_pxeboot_vms.yml
├── 27_ocp_install.yml
├── 29_remove_bootstrap.yml
├── 30_create_users.yml
├── 33_patch_nodes.yml
├── 34_move_services_to_infra_nodes.yml
├── 70_setup_sno_cluster.yml
├── 99_cleanup.yml
├── 99_cleanup_sno.yml
├── LICENSE
├── Makefile
├── README.md
├── _config.yml
├── ansible-navigator.yaml
├── ansible.cfg
├── execution-environment
├── execution-environment.yml
└── requirements.yml
├── files
├── localdns.conf
├── machineconfigpool.yml
├── my-dnsmasq.pp
├── my-dnsmasq.te
└── patch_monitoring.yml
├── group_vars
└── vm_host
│ ├── packages.yml
│ ├── sno-vars.yml
│ └── terraform.yml
├── host_vars
├── bastion
│ ├── downloads.yml
│ ├── fw_bastion.yml
│ ├── packages.yml
│ ├── pxe.yml
│ └── workspace.yml
└── loadbalancer
│ ├── fw_loadbalancer.yml
│ └── packages.yml
├── inventory
├── main-sno.yml
├── main.yml
├── requirements.yml
├── templates
├── csr.j2
├── dnsmasq.j2
├── haproxy.j2
├── htpasswd_provider.j2
├── install-config-sno.j2
├── install-config.j2
├── label_nodes.j2
├── libvirt_dnsmasq.j2
├── libvirt_dnsmasq_sno.j2
├── ocp_user_script.j2
├── patch_default_selector.j2
├── patch_ingress_selector.j2
├── patch_registry_selector.j2
├── pxeboot_mac.j2
└── systemd-resolved.j2
├── terraform
├── bastion
│ ├── bastion.tf
│ ├── cloud_init.cfg
│ ├── network_config.cfg
│ └── uefi-patch.xsl
├── bootstrap
│ └── bootstrap.tf
├── libvirt-resources-sno
│ └── libvirt-resources.tf
├── libvirt-resources
│ └── libvirt-resources.tf
├── loadbalancer
│ ├── cloud_init.cfg
│ ├── loadbalancer.tf
│ ├── network_config.cfg
│ └── uefi-patch.xsl
├── masters
│ └── masters.tf
├── sno
│ └── master-sno.tf
└── workers
│ └── workers.tf
└── vars
├── cluster_vars.yml
├── infra_vars.yml
└── sno_vars.yml
/.gitignore:
--------------------------------------------------------------------------------
1 | id_rsa*
2 | *.log
3 | terraform.tfstate*
4 | .terraform*
5 | ansible.log
6 | execution-environment/context
--------------------------------------------------------------------------------
/.images/libvirt.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kubealex/libvirt-ocp4-provisioner/50fd9aef10ae48ad1acdcde58c18c44a4c6deda4/.images/libvirt.png
--------------------------------------------------------------------------------
/.images/libvirtIcon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kubealex/libvirt-ocp4-provisioner/50fd9aef10ae48ad1acdcde58c18c44a4c6deda4/.images/libvirtIcon.png
--------------------------------------------------------------------------------
/.images/libvirticonshadow.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kubealex/libvirt-ocp4-provisioner/50fd9aef10ae48ad1acdcde58c18c44a4c6deda4/.images/libvirticonshadow.png
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "ansible.python.interpreterPath": "/bin/python"
3 | }
--------------------------------------------------------------------------------
/00_prerequisites.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: This play ensures prerequisites are satisfied before installing
3 | hosts: vm_host
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | - vars/infra_vars.yml
7 | tasks:
8 | - name: Check if distribution is supported
9 | ansible.builtin.fail:
10 | msg: Your distribution is actually unsupported
11 | when:
12 | - ansible_distribution != 'CentOS'
13 | - ansible_distribution != 'Fedora'
14 | - ansible_distribution != 'RedHat'
15 |
16 | - name: Block of cluster checks
17 | block:
18 | - name: Fail fast if more than one bootstrap node is provided
19 | ansible.builtin.fail:
20 | msg: "Only ONE bootstrap node can be present"
21 | when: cluster_nodes.host_list.bootstrap | count > 1
22 |
23 | - name: Fail fast if more than one bootstrap node is provided
24 | ansible.builtin.fail:
25 | msg: "Versions before 4.10 are not supported"
26 | when:
27 | - cluster.version != "stable"
28 | - cluster.version is version('4.10', '<')
29 |
30 | - name: Fail fast when no workers are provided and it's not in Compact mode (three nodes)
31 | ansible.builtin.fail:
32 | msg: 'To provision NO workers, you must flag "three_node" varible to true in vars/cluster_vars.yml'
33 | when:
34 | - (cluster_nodes.host_list.workers | count <= 0)
35 | - not three_node
36 |
37 | - name: Fail fast if hosts are missing from the list
38 | ansible.builtin.fail:
39 | msg: "Please check the host_list variable in cluster_nodes"
40 | when: (cluster_nodes.host_list.masters | count <= 0) or
41 | (cluster_nodes.host_list.workers | count <= 0) or
42 | (cluster_nodes.host_list.bootstrap | count <= 0)
43 |
44 | - name: Fail fast if bootstrap node doesn't meet minimum requirements
45 | ansible.builtin.fail:
46 | msg: "Bootstrap nodes must be provisioned with at least 16GB memory, 40GB storage and 4vCPUs"
47 | when: (cluster_nodes.specs.bootstrap.vcpu < 4) or (cluster_nodes.specs.bootstrap.mem < 16) or (cluster_nodes.specs.bootstrap.disk < 40)
48 |
49 | - name: Fail fast if master nodes don't meet minimum requirements
50 | ansible.builtin.fail:
51 | msg: "Master nodes must be provisioned with at least 16GB memory, 40GB storage and 4vCPUs"
52 | when: (cluster_nodes.specs.masters.vcpu < 4) or (cluster_nodes.specs.masters.mem < 16) or (cluster_nodes.specs.masters.disk < 40)
53 |
54 | - name: Fail fast if worker nodes don't meet minimum requirements
55 | ansible.builtin.fail:
56 | msg: "Worker nodes must be provisioned with at least 8GB memory and 2vCPUs"
57 | when: (cluster_nodes.specs.workers.vcpu < 2) or (cluster_nodes.specs.workers.mem < 8) or (cluster_nodes.specs.workers.disk < 40)
58 |
59 | - name: Check for pullSecret variable and fail fast
60 | ansible.builtin.fail:
61 | msg: "Check the pullSecret var in files/vars.yaml"
62 | when: (cluster.pullSecret is undefined) or (cluster.pullSecret | length < 1)
63 |
64 | - name: Fail fast if bastion and/or loadbalancer are not provided
65 | ansible.builtin.fail:
66 | msg: "At least one bastion and one loadbalancer VMs must be created"
67 | when: (infra_nodes.host_list.bastion | count != 1 or infra_nodes.host_list.loadbalancer | count != 1)
68 |
--------------------------------------------------------------------------------
/00_sno_prerequisites.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: This play ensures prerequisites are satisfied before installing
3 | hosts: vm_host
4 | vars_files:
5 | - vars/sno_vars.yml
6 | tasks:
7 | - name: Check if distribution is supported
8 | ansible.builtin.fail:
9 | msg: Your distribution is actually unsupported
10 | when:
11 | - ansible_distribution != 'CentOS'
12 | - ansible_distribution != 'Fedora'
13 | - ansible_distribution != 'RedHat'
14 |
15 | - name: Block of cluster checks
16 | block:
17 | - name: Fail fast if node count is not consistent
18 | ansible.builtin.fail:
19 | msg: "Only ONE node can be present"
20 | when: (cluster_nodes.host_list.sno | count != 1)
21 |
22 | - name: Fail fast if node count is not consistent
23 | ansible.builtin.fail:
24 | msg: "Please check the host_list variable in cluster_nodes, one node must be present"
25 | when: (cluster_nodes.host_list.sno | count <= 0)
26 |
27 | - name: Fail fast if SNO node doesn't meet minimum requirements
28 | ansible.builtin.fail:
29 | msg: "SNO node must be provisioned with at least 16GB memory, 60GB storage and 8vCPUs"
30 | when: (cluster_nodes.specs.sno.vcpu < 8) or (cluster_nodes.specs.sno.mem < 16) or (cluster_nodes.specs.sno.disk < 60)
31 |
32 | - name: Check for pullSecret variable and fail fast
33 | ansible.builtin.fail:
34 | msg: "Check the pullSecret var in files/vars.yaml"
35 | when: (cluster.pullSecret is undefined) or (cluster.pullSecret | length < 1)
36 |
--------------------------------------------------------------------------------
/01_install_virtualization_tools.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: This play installs needed tools to provision infrastructure VMs
3 | hosts: vm_host
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | - vars/infra_vars.yml
7 | become: true
8 | tasks:
9 | - name: Install needed packages
10 | ansible.builtin.yum:
11 | name: "{{ virtualization_packages.rhel }}"
12 | state: latest # noqa package-latest
13 | when:
14 | - ansible_distribution == 'RedHat'
15 |
16 | - name: Install needed packages
17 | ansible.builtin.yum:
18 | name: "{{ virtualization_packages.centos }}"
19 | state: latest # noqa package-latest
20 | when:
21 | - ansible_distribution == 'CentOS'
22 |
23 | - name: Install needed packages
24 | ansible.builtin.yum:
25 | name: "{{ virtualization_packages.fedora }}"
26 | state: latest # noqa package-latest
27 | when:
28 | - ansible_distribution == 'Fedora'
29 |
30 | - name: Download and provision Terraform
31 | ansible.builtin.unarchive:
32 | src: "{{ terraform_release_url }}"
33 | dest: /usr/bin/
34 | mode: "0755"
35 | remote_src: true
36 |
37 | - name: Virtualization services are enabled
38 | ansible.builtin.service:
39 | name: libvirtd
40 | state: started
41 | enabled: true
42 | when: ansible_distribution != 'CentOS' or
43 | (ansible_distribution == 'CentOS' and ansible_distribution_major_version | int == 8)
44 |
45 | - name: Virtualization services are enabled
46 | ansible.builtin.service:
47 | name: virtqemud
48 | state: started
49 | enabled: true
50 | when:
51 | - ansible_distribution == 'CentOS'
52 | - ansible_distribution_major_version | int == 9
53 |
54 | - name: Ensuring libvirt module is present
55 | ansible.builtin.pip:
56 | name: libvirt-python
57 | become: true
58 |
59 | - name: Use TF project to ensure pool and network are defined
60 | community.general.terraform:
61 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/libvirt-resources"
62 | variables:
63 | dns: "{{ infra_nodes.host_list.bastion[0].ip }}"
64 | domain: "{{ cluster.name }}.{{ domain }}"
65 | network_cidr: ' ["{{ network_cidr }}"]'
66 | cluster_name: "{{ cluster.name }}"
67 | force_init: true
68 | state: present
69 |
--------------------------------------------------------------------------------
/01_sno_install_virtualization_tools.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: This play installs needed tools to provision infrastructure VMs
3 | hosts: vm_host
4 | vars_files:
5 | - vars/sno_vars.yml
6 | become: true
7 | tasks:
8 | - name: Install needed packages
9 | ansible.builtin.yum:
10 | name: "{{ virtualization_packages.rhel }}"
11 | state: latest # noqa package-latest
12 | when:
13 | - ansible_distribution == 'RedHat'
14 |
15 | - name: Install needed packages
16 | ansible.builtin.yum:
17 | name: "{{ virtualization_packages.centos }}"
18 | state: latest # noqa package-latest
19 | when:
20 | - ansible_distribution == 'CentOS'
21 |
22 | - name: Install needed packages
23 | ansible.builtin.yum:
24 | name: "{{ virtualization_packages.fedora }}"
25 | state: latest # noqa package-latest
26 | when:
27 | - ansible_distribution == 'Fedora'
28 |
29 | - name: Download and provision Terraform
30 | ansible.builtin.unarchive:
31 | src: "{{ terraform_release_url }}"
32 | dest: /usr/bin/
33 | mode: "0755"
34 | remote_src: true
35 |
36 | - name: Virtualization services are enabled
37 | ansible.builtin.service:
38 | name: libvirtd
39 | state: started
40 | enabled: true
41 | when: ansible_distribution != 'CentOS' or
42 | (ansible_distribution == 'CentOS' and ansible_distribution_major_version | int == 8)
43 |
44 | - name: Virtualization services are enabled
45 | ansible.builtin.service:
46 | name: virtqemud
47 | state: started
48 | enabled: true
49 | when:
50 | - ansible_distribution == 'CentOS'
51 | - ansible_distribution_major_version | int == 9
52 |
53 | - name: Ensure libvirt can use root as user
54 | ansible.builtin.replace:
55 | path: /etc/libvirt/qemu.conf
56 | regexp: "{{ item.regexp }}"
57 | replace: "{{ item.line }}"
58 | loop:
59 | - regexp: '#user = "root"'
60 | line: 'user = "root"'
61 | - regexp: '#group = "root"'
62 | line: 'group = "root"'
63 |
64 | - name: Virtualization services are enabled
65 | ansible.builtin.service:
66 | name: libvirtd
67 | state: restarted
68 | enabled: true
69 | when:
70 | - ansible_distribution == 'CentOS'
71 |
72 | - name: Virtualization services are enabled
73 | ansible.builtin.service:
74 | name: virtqemud
75 | state: restarted
76 | enabled: true
77 | when:
78 | - ansible_distribution == 'CentOS'
79 | - ansible_distribution_major_version | int == 9
80 |
81 | - name: Use TF project to ensure pool and network are defined
82 | community.general.terraform:
83 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/libvirt-resources-sno"
84 | variables:
85 | domain: "{{ cluster.name }}.{{ domain }}"
86 | network_cidr: ' ["{{ network_cidr }}"]'
87 | cluster_name: "{{ cluster.name }}"
88 | dns: "{{ cluster_nodes.host_list.sno.ip }}"
89 | force_init: true
90 | state: present
91 | become: true
92 |
--------------------------------------------------------------------------------
/02_setup_working_directory.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: This play installs needed tools to provision infrastructure VMs
3 | hosts: vm_host
4 | vars_files:
5 | - cluster_vars.yml
6 | tasks:
7 | - name: Set home directory as fact for the user
8 | ansible.builtin.set_fact:
9 | home_dir: "{{ ansible_env.HOME }}"
10 |
11 | - name: Creating workspace
12 | ansible.builtin.file:
13 | state: directory
14 | path: "{{ workspace_directory.base_path }}/{{ cluster.name }}"
15 | recurse: true
16 |
17 | - name: Copy terraform files on host
18 | ansible.builtin.copy:
19 | src: "{{ playbook_dir }}/terraform"
20 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/"
21 | mode: "0755"
22 |
23 | - name: Create ssh keypair
24 | community.crypto.openssh_keypair:
25 | path: "{{ playbook_dir }}/id_rsa_ocp_setup"
26 | delegate_to: localhost
27 |
28 | - name: Copy SSH keys in working directory
29 | ansible.builtin.copy:
30 | src: "{{ playbook_dir }}/{{ item }}"
31 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ item }}"
32 | mode: "0755"
33 | loop:
34 | - id_rsa_ocp_setup
35 | - id_rsa_ocp_setup.pub
36 |
37 | - name: Getting ssh private key
38 | ansible.builtin.slurp:
39 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/id_rsa_ocp_setup"
40 | register: service_key
41 |
42 | - name: Getting ssh public key
43 | ansible.builtin.slurp:
44 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/id_rsa_ocp_setup.pub"
45 | register: service_key_pub
46 |
47 | - name: Save SSH keys as fact for further use
48 | ansible.builtin.set_fact:
49 | ssh_service_key: "{{ service_key['content'] | b64decode }}"
50 | ssh_service_key_pub: "{{ service_key_pub['content'] | b64decode }}"
51 |
--------------------------------------------------------------------------------
/02_sno_setup_working_directory.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: This play installs needed tools to provision infrastructure VMs
3 | hosts: vm_host
4 | vars_files:
5 | - sno_vars.yml
6 | tasks:
7 | - name: Set home directory as fact for the user
8 | ansible.builtin.set_fact:
9 | home_dir: "{{ ansible_env.HOME }}"
10 |
11 | - name: Creating workspace
12 | ansible.builtin.file:
13 | state: directory
14 | path: "{{ workspace_directory.base_path }}/{{ cluster.name }}"
15 | recurse: true
16 |
17 | - name: Copy terraform files on host
18 | ansible.builtin.copy:
19 | src: "{{ playbook_dir }}/terraform"
20 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/"
21 | mode: "0755"
22 |
23 | - name: Create ssh keypair
24 | community.crypto.openssh_keypair:
25 | path: "{{ playbook_dir }}/id_rsa_ocp_setup"
26 | delegate_to: localhost
27 |
28 | - name: Copy SSH keys in working directory
29 | ansible.builtin.copy:
30 | src: "{{ playbook_dir }}/{{ item }}"
31 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ item }}"
32 | mode: "0755"
33 | loop:
34 | - id_rsa_ocp_setup
35 | - id_rsa_ocp_setup.pub
36 |
--------------------------------------------------------------------------------
/05_vm_provisioning_infra.yml:
--------------------------------------------------------------------------------
1 | - name: Provisioning infrastructure VMs
2 | hosts: vm_host
3 | vars_files:
4 | - vars/cluster_vars.yml
5 | - vars/infra_vars.yml
6 | tasks:
7 | - name: Set ocp_domain as fact
8 | ansible.builtin.set_fact:
9 | ocp_domain: "{{ cluster.name }}.{{ domain }}"
10 |
11 | - name: Ensure to clean known_hosts
12 | ansible.builtin.known_hosts:
13 | host: "{{ item.value[0].ip }}"
14 | path: ~/.ssh/known_hosts
15 | state: absent
16 | loop: "{{ infra_nodes.host_list | dict2items }}"
17 | delegate_to: localhost
18 |
19 | - name: Deploy bastion VM with terraform
20 | community.general.terraform:
21 | force_init: true
22 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/bastion"
23 | variables:
24 | libvirt_network: "{{ cluster.name }}"
25 | libvirt_pool: "{{ cluster.name }}"
26 | network_data: '{ hostIP = "{{ infra_nodes.host_list.bastion[0].ip }}", broadcast= "{{ infra_nodes.host_list.bastion[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''broadcast'') }}", dns = "{{ infra_nodes.host_list.bastion[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''network'') | ansible.utils.ipmath(1) }}", gateway = "{{ infra_nodes.host_list.bastion[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''network'') | ansible.utils.ipmath(1) }}", network = "{{ infra_nodes.host_list.bastion[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''network'') }}" }' # noqa yaml[line-length]
27 | hostname: "bastion"
28 | domain: "{{ domain }}"
29 | cluster_name: "{{ cluster.name }}"
30 | sshkey: "{{ ssh_service_key_pub }}"
31 | state: present
32 | become: true
33 | register: output_ba
34 |
35 | - name: Deploy loadbalancer VM
36 | community.general.terraform:
37 | force_init: true
38 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/loadbalancer"
39 | variables:
40 | libvirt_network: "{{ cluster.name }}"
41 | libvirt_pool: "{{ cluster.name }}"
42 | network_data: '{ hostIP = "{{ infra_nodes.host_list.loadbalancer[0].ip }}", broadcast= "{{ infra_nodes.host_list.loadbalancer[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''broadcast'') }}", dns = "{{ infra_nodes.host_list.loadbalancer[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''network'') | ansible.utils.ipmath(1) }}", gateway = "{{ infra_nodes.host_list.loadbalancer[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''network'') | ansible.utils.ipmath(1) }}", network = "{{ infra_nodes.host_list.loadbalancer[0].ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr(''network'') }}" }' # noqa yaml[line-length]
43 | hostname: "loadbalancer"
44 | domain: "{{ domain }}"
45 | cluster_name: "{{ cluster.name }}"
46 | sshkey: "{{ ssh_service_key_pub }}"
47 | state: present
48 | become: true
49 | register: output_lb
50 |
51 | - name: Add bastion and loadbalancer to in-memory inventory
52 | ansible.builtin.add_host:
53 | hostname: "{{ item.key }}"
54 | ansible_host: "{{ item.value[0].ip }}"
55 | ansible_ssh_private_key_file: "{{ playbook_dir }}/id_rsa_ocp_setup"
56 | ansible_user: ocpinstall
57 | ansible_ssh_common_args: "-o StrictHostKeyChecking=no"
58 | domain: "{{ domain }}"
59 | cluster_name: "{{ cluster.name }}"
60 | ocp_domain: "{{ ocp_domain }}"
61 | timezone: "{{ dhcp.timezone }}"
62 | ntp_server: "{{ dhcp.ntp }}"
63 | loop: "{{ infra_nodes.host_list | dict2items }}"
64 | delegate_to: localhost
65 |
66 | - name: Check connection to infra VMs and set facts
67 | hosts: bastion,loadbalancer
68 | gather_facts: false
69 | tasks:
70 | - name: Wait 600 seconds for target connection to become reachable/usable
71 | ansible.builtin.wait_for_connection:
72 | timeout: 120
73 | delay: 0
74 | - name: Verify the host can be reached
75 | ansible.builtin.ping:
76 |
77 | - name: Save host facts
78 | ansible.builtin.setup:
79 | register: machine_facts
80 |
81 | - name: Fetch specific facts for further use
82 | ansible.builtin.set_fact:
83 | host_ip: "{{ machine_facts.ansible_facts.ansible_default_ipv4.address }}"
84 | host_interface: "{{ machine_facts.ansible_facts.ansible_default_ipv4.interface }}"
85 | host_mac: "{{ machine_facts.ansible_facts.ansible_default_ipv4.macaddress }}"
86 | host_fqdn: "{{ machine_facts.ansible_facts.ansible_fqdn }}"
87 |
88 | - name: Fetch specific facts for further use
89 | ansible.builtin.set_fact:
90 | host_api_fqdn: "api.{{ ocp_domain }}"
91 | host_api_int_fqdn: "api-int.{{ ocp_domain }}"
92 | host_apps_fqdn: "apps.{{ ocp_domain }}"
93 | when: inventory_hostname == 'loadbalancer'
94 |
--------------------------------------------------------------------------------
/07_vm_provisioning_ocp.yml:
--------------------------------------------------------------------------------
1 | - name: This play provisions OCP VMs based on intial config
2 | hosts: vm_host
3 | vars_files:
4 | - vars/cluster_vars.yml
5 | - vars/infra_vars.yml
6 | tasks:
7 | - name: Provision bootstrap node with Terraform
8 | community.general.terraform:
9 | force_init: true
10 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/bootstrap"
11 | variables:
12 | hostname: "bootstrap"
13 | libvirt_network: "{{ cluster.name }}"
14 | libvirt_pool: "{{ cluster.name }}"
15 | cpu: "{{ cluster_nodes.specs.bootstrap.vcpu }}"
16 | vm_volume_size: "{{ cluster_nodes.specs.bootstrap.disk }}"
17 | memory: "{{ cluster_nodes.specs.bootstrap.mem }}"
18 | state: present
19 | become: true
20 | register: output_bootstrap
21 |
22 | - name: Provision master nodes with Terraform
23 | community.general.terraform:
24 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/masters"
25 | force_init: true
26 | variables:
27 | hostname: "master"
28 | libvirt_network: "{{ cluster.name }}"
29 | libvirt_pool: "{{ cluster.name }}"
30 | cpu: "{{ cluster_nodes.specs.masters.vcpu }}"
31 | memory: "{{ cluster_nodes.specs.masters.mem }}"
32 | vm_volume_size: "{{ cluster_nodes.specs.masters.disk }}"
33 | vm_count: "{{ cluster_nodes.host_list.masters | count }}"
34 | vm_block_device: "{{ cluster.additional_block_device.enabled if three_node else false }}"
35 | vm_block_device_size: "{{ cluster.additional_block_device.size }}"
36 | vm_additional_nic: "{{ cluster.additional_nic.enabled | default(false, true) }}"
37 | vm_additional_nic_network: "{{ cluster.additional_nic.network | default(cluster.name, true) }}"
38 | state: present
39 | become: true
40 | register: output_masters
41 |
42 | - name: Provision worker nodes with Terraform
43 | community.general.terraform:
44 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/workers"
45 | force_init: true
46 | variables:
47 | hostname: "worker"
48 | libvirt_network: "{{ cluster.name }}"
49 | libvirt_pool: "{{ cluster.name }}"
50 | cpu: "{{ cluster_nodes.specs.workers.vcpu }}"
51 | memory: "{{ cluster_nodes.specs.workers.mem }}"
52 | vm_volume_size: "{{ cluster_nodes.specs.workers.disk }}"
53 | vm_count: "{{ cluster_nodes.host_list.workers | count }}"
54 | vm_block_device: "{{ cluster.additional_block_device.enabled | default(false, true) }}"
55 | vm_block_device_size: "{{ cluster.additional_block_device.size }}"
56 | vm_additional_nic: "{{ cluster.additional_nic.enabled | default(false, true) }}"
57 | vm_additional_nic_network: "{{ cluster.additional_nic.network | default(cluster.name, true) }}"
58 | state: present
59 | become: true
60 | register: output_workers
61 | when: not three_node
62 |
63 | - name: Add bootstrap node and set vars
64 | ansible.builtin.add_host:
65 | hostname: "bootstrap"
66 | node_hostname: "bootstrap"
67 | node_mac: "{{ output_bootstrap.outputs.macs.value[0] | lower }}"
68 | node_ip: "{{ item.ip }}"
69 | node_reversedns: "{{ item.ip | ansible.utils.ipaddr('revdns') }}" # noqa jinja[invalid]
70 | node_role: "bootstrap"
71 | node_fqdn: bootstrap.{{ ocp_domain }}
72 | groups:
73 | - "bootstrap"
74 | loop: "{{ cluster_nodes.host_list.bootstrap }}"
75 |
76 | - name: Add masters to correct group and set facts
77 | ansible.builtin.add_host:
78 | hostname: "master-{{ master_idx }}"
79 | node_hostname: "master-{{ master_idx }}"
80 | etcd_fqdn: "etcd-{{ master_idx }}.{{ ocp_domain }}"
81 | etcd_ip: "{{ item.ip }}"
82 | node_mac: "{{ output_masters.outputs.macs.value[master_idx] | lower }}" # noqa jinja[invalid]
83 | node_ip: "{{ item.ip }}"
84 | node_reversedns: "{{ item.ip | ansible.utils.ipaddr('revdns') }}"
85 | node_role: "master"
86 | node_fqdn: master-{{ master_idx }}.{{ ocp_domain }}
87 | groups:
88 | - "masters"
89 | loop: "{{ cluster_nodes.host_list.masters }}"
90 | loop_control:
91 | index_var: master_idx
92 | when: not three_node
93 |
94 | - name: Add masters to correct group and set facts
95 | ansible.builtin.add_host:
96 | hostname: "master-{{ master_idx }}"
97 | node_hostname: "master-{{ master_idx }}"
98 | etcd_fqdn: "etcd-{{ master_idx }}.{{ ocp_domain }}"
99 | etcd_ip: "{{ item.ip }}"
100 | node_mac: "{{ output_masters.outputs.macs.value[master_idx] | lower }}" # noqa jinja[invalid]
101 | node_ip: "{{ item.ip }}"
102 | node_reversedns: "{{ item.ip | ansible.utils.ipaddr('revdns') }}"
103 | node_role: "master"
104 | node_fqdn: master-{{ master_idx }}.{{ ocp_domain }}
105 | groups:
106 | - "masters"
107 | - "workers"
108 | loop: "{{ cluster_nodes.host_list.masters }}"
109 | loop_control:
110 | index_var: master_idx
111 | when: three_node
112 |
113 | - name: Add workers to correct group and set facts
114 | ansible.builtin.add_host:
115 | hostname: "worker-{{ worker_idx }}"
116 | node_hostname: "worker-{{ worker_idx }}"
117 | node_mac: "{{ output_workers.outputs.macs.value[worker_idx] | lower }}" # noqa jinja[invalid]
118 | node_ip: "{{ item.ip }}"
119 | node_reversedns: "{{ item.ip | ansible.utils.ipaddr('revdns') }}"
120 | node_role: "worker"
121 | ocp_role: "{{ item.role | default('worker') }}"
122 | node_fqdn: worker-{{ worker_idx }}.{{ ocp_domain }}
123 | groups:
124 | - "workers"
125 | loop: "{{ cluster_nodes.host_list.workers }}"
126 | loop_control:
127 | index_var: worker_idx
128 | when: not three_node
129 |
130 | - name: Run dynamic inventory creation for infra nodes
131 | when: not three_node
132 | block:
133 | - name: Add infra nodes to correct group and set facts
134 | ansible.builtin.add_host:
135 | hostname: "{{ item }}"
136 | groups:
137 | - "infra"
138 | loop: "{{ groups['workers'] }}"
139 | when:
140 | - hostvars[item].ocp_role == 'infra'
141 |
--------------------------------------------------------------------------------
/10_infra_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Bastion packages installer
3 | hosts: bastion
4 | become: true
5 | tasks:
6 | - name: Installing required packages for Bastion Vm (Fedora)
7 | ansible.builtin.yum:
8 | name: "{{ packages.fedora }}"
9 | state: present
10 | when: ansible_distribution == "Fedora"
11 |
12 | - name: Installing required packages for Bastion Vm (Centos)
13 | ansible.builtin.yum:
14 | name: "{{ packages.centos }}"
15 | state: present
16 | when: ansible_distribution == "CentOS"
17 |
18 | - name: Loadbalancer packages installer
19 | hosts: loadbalancer
20 | become: true
21 | tasks:
22 | - name: Installing required packages
23 | ansible.builtin.yum:
24 | name: "{{ packages }}"
25 | state: present
26 |
--------------------------------------------------------------------------------
/13_bastion_networking.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Bastion networking playbook
3 | hosts: bastion
4 | become: true
5 | tasks:
6 | - name: Checking Bastion internal network interfaces
7 | ansible.builtin.fail:
8 | msg: "{{ host_interface }} not found in {{ ansible_facts.interfaces }}"
9 | when: host_interface not in ansible_facts.interfaces
10 |
11 | - name: Ensure firewalld is enabled and running
12 | ansible.builtin.service:
13 | name: firewalld
14 | state: started
15 | enabled: true
16 |
17 | # - name: Adding interface {{ host_interface }} to internal zone
18 | # community.general.nmcli:
19 | # conn_name: "{{ host_interface }}"
20 | # zone: internal
21 | # state: present
22 | - name: Adding interface to internal zone # noqa no-changed-when
23 | ansible.builtin.command: nmcli con mod {{ host_interface }} connection.zone internal
24 |
25 | - name: Adding Bastion interface to firewall internal zone
26 | ansible.posix.firewalld:
27 | zone: internal
28 | interface: "{{ host_interface }}"
29 | permanent: true
30 | state: enabled
31 |
32 | - name: Allow required service for internal zone
33 | ansible.posix.firewalld:
34 | zone: internal
35 | state: enabled
36 | permanent: true
37 | service: "{{ item }}"
38 | loop: "{{ services }}"
39 |
40 | - name: Allow tftp and pxe ports
41 | ansible.posix.firewalld:
42 | zone: internal
43 | state: enabled
44 | permanent: true
45 | port: "{{ item }}"
46 | loop: "{{ ports }}"
47 |
48 | - name: Reload Bastion firewalld service
49 | ansible.builtin.service:
50 | name: firewalld
51 | state: restarted
52 |
--------------------------------------------------------------------------------
/15_bastion_workspace.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Creating a workspace on Bastion
3 | hosts: bastion
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | tasks:
7 | - name: Erasing dnsmasq lease
8 | ansible.builtin.file:
9 | path: /var/lib/dnsmasq/dnsmasq.leases
10 | state: absent
11 | become: true
12 |
13 | - name: Creating workspace
14 | ansible.builtin.file:
15 | state: directory
16 | path: "{{ item }}"
17 | mode: "0755"
18 | loop:
19 | - "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
20 | - "~/.ssh"
21 |
22 | - name: Creating ssh keys if it doens not exist
23 | community.crypto.openssh_keypair:
24 | path: "~/.ssh/id_rsa"
25 |
26 | - name: Creating tftpboot and nginx workspace
27 | ansible.builtin.file:
28 | state: directory
29 | path: "{{ item }}"
30 | mode: "0755"
31 | loop:
32 | - "{{ tftp_boot_root }}/{{ tftp_workspace_dir }}"
33 | - "{{ nginx_document_root }}/{{ nginx_workspace_dir }}"
34 | become: true
35 |
36 | - name: Downloading Openshift installer and CLI
37 | ansible.builtin.unarchive:
38 | src: "{{ item }}"
39 | dest: /usr/bin
40 | remote_src: true
41 | become: true
42 | loop:
43 | - "{{ downloads.ocp.base_url }}/{{ cluster.version }}/openshift-client-linux-{{ cluster.version }}.tar.gz"
44 | - "{{ downloads.ocp.base_url }}/{{ cluster.version }}/openshift-install-linux-{{ cluster.version }}.tar.gz"
45 | when:
46 | - not (skip_download | bool)
47 | - cluster.version != "latest"
48 | - cluster.version != "fast"
49 | - cluster.version != "stable"
50 | - cluster.version != "candidate"
51 |
52 | - name: Downloading Openshift installer and CLI
53 | ansible.builtin.unarchive:
54 | src: "{{ item }}"
55 | dest: /usr/bin
56 | remote_src: true
57 | become: true
58 | loop:
59 | - "{{ downloads.ocp.base_url }}/{{ cluster.version }}/openshift-client-linux.tar.gz"
60 | - "{{ downloads.ocp.base_url }}/{{ cluster.version }}/openshift-install-linux.tar.gz"
61 | when:
62 | - not (skip_download | bool)
63 | - cluster.version == "latest" or cluster.version == "fast" or cluster.version == "stable" or cluster.version == "candidate"
64 |
65 | - name: Checking for openshift-install tool # noqa no-changed-when
66 | ansible.builtin.command: openshift-install version
67 | register: output
68 | failed_when: output.rc != 0
69 |
70 | - name: Checking for OCP cli tool # noqa no-changed-when
71 | ansible.builtin.command: oc
72 | register: output
73 | failed_when: output.rc != 0
74 |
75 | - name: Take care of retrieving packages for CoreOS
76 | when: not (skip_download | bool)
77 | become: true
78 | block:
79 | - name: Retrieve the minor version
80 | ansible.builtin.set_fact:
81 | version_check: "{{ cluster.version.split('.') }}"
82 | when: cluster.version != "stable"
83 |
84 | - name: Retrieve the minor version
85 | ansible.builtin.set_fact:
86 | version: "{{ (version_check.0 + '.' + version_check.1) | default('', true) }}"
87 | when: version_check is defined
88 |
89 | - name: Set fact for files
90 | ansible.builtin.set_fact:
91 | rhcos_kernel: "{{ downloads.rhcos.boot_files.kernel }}"
92 | rhcos_initramfs: "{{ downloads.rhcos.boot_files.initramfs }}"
93 | rhcos_os: "{{ downloads.rhcos.boot_files.rootfs }}"
94 | rhcos_download_url: "{{ (downloads.rhcos.base_url + version + '/latest/') if cluster.version != 'stable' else (downloads.rhcos.base_url + 'latest/') }}" # noqa yaml[line-length]
95 |
96 | - name: Download initramfs and kernel
97 | ansible.builtin.get_url:
98 | url: "{{ rhcos_download_url + item }}"
99 | dest: "{{ tftp_boot_root }}/{{ tftp_workspace_dir }}/{{ item }}"
100 | mode: "0755"
101 | loop:
102 | - "{{ rhcos_initramfs }}"
103 | - "{{ rhcos_kernel }}"
104 |
105 | - name: Download Red Hat CoreOS for bare metal
106 | ansible.builtin.get_url:
107 | url: "{{ rhcos_download_url + rhcos_os }}"
108 | dest: "{{ nginx_document_root }}/{{ nginx_workspace_dir }}/{{ rhcos_os }}"
109 | mode: "0755"
110 |
--------------------------------------------------------------------------------
/17_bastion_services.yml:
--------------------------------------------------------------------------------
1 | - name: Bastion services configuration
2 | hosts: bastion
3 | become: true
4 | vars_files:
5 | - vars/infra_vars.yml
6 | - vars/cluster_vars.yml
7 | tasks:
8 | - name: Ensuring tftp boot directory exists
9 | ansible.builtin.file:
10 | state: directory
11 | path: "{{ tftp_boot_root }}/pxelinux.cfg"
12 | mode: "0755"
13 |
14 | - name: Copy pxelinux.0 file
15 | ansible.builtin.copy:
16 | src: /usr/share/syslinux/{{ item }}
17 | dest: "{{ tftp_boot_root }}/"
18 | mode: "0755"
19 | remote_src: true
20 | loop: "{{ pxe_files }}"
21 |
22 | - name: Creating seelinux rules for dnsmasq service
23 | ansible.builtin.copy:
24 | src: files/my-dnsmasq.pp
25 | dest: /tmp
26 | mode: "0755"
27 |
28 | - name: Apply seelinux rules for dnsmasq service # noqa no-changed-when
29 | ansible.builtin.command: semodule -X 300 -i /tmp/my-dnsmasq.pp
30 |
31 | - name: Delete selinux temp file
32 | ansible.builtin.file:
33 | path: "{{ item }}"
34 | state: absent
35 | loop:
36 | - /tmp/my-dnsmasq.pp
37 | - /tmp/my-dnsmasq.te
38 |
39 | - name: Firing dnsmasq template
40 | ansible.builtin.template:
41 | src: templates/dnsmasq.j2
42 | dest: /etc/dnsmasq.conf
43 | mode: "0755"
44 |
45 | - name: Firing pxe boot template
46 | ansible.builtin.template:
47 | src: templates/pxeboot_mac.j2
48 | dest: "{{ tftp_boot_root }}/pxelinux.cfg/01-{{ hostvars[item].node_mac | replace(':', '-') }}"
49 | mode: "0755"
50 | loop: "{{ groups['masters'] }}"
51 |
52 | - name: Firing pxe boot template
53 | ansible.builtin.template:
54 | src: templates/pxeboot_mac.j2
55 | dest: "{{ tftp_boot_root }}/pxelinux.cfg/01-{{ hostvars[item].node_mac | replace(':', '-') }}"
56 | mode: "0755"
57 | loop: "{{ groups['bootstrap'] }}"
58 |
59 | - name: Firing pxe boot template
60 | ansible.builtin.template:
61 | src: templates/pxeboot_mac.j2
62 | dest: "{{ tftp_boot_root }}/pxelinux.cfg/01-{{ hostvars[item].node_mac | replace(':', '-') }}"
63 | mode: "0755"
64 | loop: "{{ groups['workers'] }}"
65 |
66 | - name: Configure interface to use bastion as DNS server # noqa no-changed-when
67 | ansible.builtin.command: nmcli con mod {{ host_interface }} ipv4.dns "{{ host_ip }}"
68 |
69 | - name: Refreshing inteface # noqa no-changed-when
70 | ansible.builtin.shell: nmcli con down "{{ host_interface }}"; nmcli con up "{{ host_interface }}"
71 |
72 | - name: Enable services
73 | ansible.builtin.service:
74 | name: "{{ item }}"
75 | enabled: true
76 | state: started
77 | loop:
78 | - dnsmasq
79 | - nginx
80 |
81 | - name: Rebooting bastion
82 | ansible.builtin.reboot:
83 |
--------------------------------------------------------------------------------
/19_lb_services.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Loadbalancer configuration play
3 | hosts: loadbalancer
4 | become: true
5 | tasks:
6 | - name: Checking internal network interfaces
7 | ansible.builtin.fail:
8 | msg: "{{ host_interface }} not found in {{ ansible_facts.interfaces }}"
9 | when: host_interface not in ansible_facts.interfaces
10 |
11 | - name: Adding interface to internal zone # noqa no-changed-when
12 | ansible.builtin.shell: nmcli con mod {{ host_interface }} connection.zone internal && nmcli con mod {{ host_interface }} ipv4.dns {{ hostvars['bastion']['host_ip'] }} # noqa yaml[line-length]
13 |
14 | - name: Enable firewalld
15 | ansible.builtin.service:
16 | name: "{{ item }}"
17 | enabled: true
18 | state: started
19 | loop:
20 | - firewalld
21 | - haproxy
22 |
23 | # https://access.redhat.com/discussions/1455033
24 |
25 | # - name: Adding interface {{ host_interface }} to internal zone
26 | # community.general.nmcli:
27 | # conn_name: "{{ host_interface }}"
28 | # zone: internal
29 | # dns4: "{{ hostvars['bastion']['host_ip'] }}"
30 | # gw4:
31 | # state: present
32 |
33 | - name: Setting bastion's IP as DNS for our interface # noqa no-changed-when
34 | ansible.builtin.command: nmcli con mod {{ host_interface }} connection.zone internal ipv4.dns {{ hostvars['bastion']['host_ip'] }}
35 |
36 | - name: Refreshing {{ host_interface }} # noqa no-changed-when
37 | ansible.builtin.shell: nmcli con down "{{ host_interface }}"; nmcli con up "{{ host_interface }}"
38 |
39 | - name: Adding interface to firewall internal zone
40 | ansible.posix.firewalld:
41 | zone: internal
42 | interface: "{{ host_interface }}"
43 | permanent: true
44 | state: enabled
45 |
46 | - name: Allow service for internal zone
47 | ansible.posix.firewalld:
48 | zone: internal
49 | state: enabled
50 | permanent: true
51 | service: "{{ item }}"
52 | loop: "{{ services }}"
53 |
54 | - name: Allow ports for internal zone
55 | ansible.posix.firewalld:
56 | zone: internal
57 | state: enabled
58 | permanent: true
59 | port: "{{ item }}"
60 | loop: "{{ internal_zone_port }}"
61 |
62 | - name: Reload firewalld service
63 | ansible.builtin.service:
64 | name: firewalld
65 | state: restarted
66 |
67 | - name: Enabling selinux boolean for haproxy
68 | ansible.posix.seboolean:
69 | name: haproxy_connect_any
70 | state: true
71 | persistent: true
72 |
73 | - name: Firing haproxy template
74 | ansible.builtin.template:
75 | src: templates/haproxy.j2
76 | dest: /etc/haproxy/haproxy.cfg
77 | mode: "0755"
78 |
79 | - name: Reload haproxy service
80 | ansible.builtin.service:
81 | name: haproxy
82 | state: restarted
83 | enabled: true
84 |
--------------------------------------------------------------------------------
/20_prepare_ocp_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install config
3 | hosts: bastion
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | tasks:
7 | - name: Removing config directory
8 | ansible.builtin.file:
9 | path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
10 | state: absent
11 | become: true
12 |
13 | - name: Creating config directory
14 | ansible.builtin.file:
15 | path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
16 | state: directory
17 | mode: "0755"
18 |
19 | - name: Getting ssh public key
20 | ansible.builtin.slurp:
21 | src: ~/.ssh/id_rsa.pub
22 | register: key
23 |
24 | - name: Save SSH key as fact
25 | ansible.builtin.set_fact:
26 | sshkey: "{{ key['content'] | b64decode }}"
27 |
28 | - name: Firing up install-config.yaml
29 | ansible.builtin.template:
30 | src: templates/install-config.j2
31 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/install-config.yaml"
32 | mode: "0755"
33 |
34 | - name: Create backup dir
35 | ansible.builtin.file:
36 | state: directory
37 | path: /tmp/ocpinstall
38 | mode: "0755"
39 |
40 | - name: Backup install directory
41 | ansible.builtin.copy:
42 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
43 | dest: /tmp/ocpinstall
44 | remote_src: true
45 | mode: "0755"
46 |
47 | - name: Creating ignition manifest # noqa command-instead-of-shell no-changed-when
48 | ansible.builtin.shell: openshift-install create manifests --dir {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }} # noqa yaml[line-length]
49 |
50 | - name: Setting master unschedulable
51 | ansible.builtin.lineinfile:
52 | path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/manifests/cluster-scheduler-02-config.yml"
53 | regexp: " mastersSchedulable"
54 | line: " mastersSchedulable: False"
55 | when:
56 | - not three_node
57 | - hostvars['bastion'].version.1 | int > 1
58 |
59 | - name: Backup install directory
60 | ansible.builtin.copy:
61 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
62 | dest: /tmp/ocpinstall
63 | remote_src: true
64 | mode: "0755"
65 |
66 | - name: Creating ignition files # noqa command-instead-of-shell yaml[line-length] no-changed-when
67 | ansible.builtin.shell: openshift-install create ignition-configs --dir {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }} # noqa yaml[line-length]
68 |
69 | - name: Backup install directory
70 | ansible.builtin.copy:
71 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
72 | dest: /tmp/ocpinstall
73 | remote_src: true
74 | mode: "0755"
75 |
76 | - name: Copying ignition files to webserver
77 | ansible.builtin.copy:
78 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/{{ item }}"
79 | dest: "{{ nginx_document_root }}/{{ nginx_workspace_dir }}"
80 | remote_src: true
81 | mode: "0644"
82 | loop:
83 | - bootstrap.ign
84 | - master.ign
85 | - worker.ign
86 | become: true
87 |
88 | - name: Restart nginx
89 | ansible.builtin.service:
90 | name: nginx
91 | state: restarted
92 | become: true
93 |
94 | - name: Creating csr approver script
95 | ansible.builtin.template:
96 | src: templates/csr.j2
97 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/csr.sh"
98 | mode: "0755"
99 |
100 | - name: Installing csr cronjob
101 | ansible.builtin.cron:
102 | name: "Openshift csr approver"
103 | job: "{{ workspace_directory.base_path }}/{{ cluster.name }}/csr.sh >> /tmp/aJob.log 2>&1"
104 | state: present
105 |
--------------------------------------------------------------------------------
/25_pxeboot_vms.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: PXE boot vms
3 | hosts: vm_host
4 | become: true
5 | vars_files:
6 | - vars/cluster_vars.yml
7 | tasks:
8 | - name: Restart NetworkManager service
9 | ansible.builtin.service:
10 | name: NetworkManager
11 | state: restarted
12 |
13 | - name: Restart OCP VMs sequentially
14 | block:
15 | - name: Shutdown bootstrap node
16 | community.libvirt.virt:
17 | name: "{{ hostvars[item].node_hostname }}"
18 | state: destroyed
19 | loop: "{{ groups['bootstrap'] }}"
20 |
21 | - name: Shutdown master nodes
22 | community.libvirt.virt:
23 | name: "{{ hostvars[item].node_hostname }}"
24 | state: destroyed
25 | loop: "{{ groups['masters'] }}"
26 |
27 | - name: Shutdown worker nodes
28 | community.libvirt.virt:
29 | name: "{{ hostvars[item].node_hostname }}"
30 | state: destroyed
31 | loop: "{{ groups['workers'] }}"
32 | when: not three_node
33 |
34 | - name: Restart bootstrap node
35 | community.libvirt.virt:
36 | name: "{{ hostvars[item].node_hostname }}"
37 | state: running
38 | loop: "{{ groups['bootstrap'] }}"
39 |
40 | - name: Restart master nodes
41 | community.libvirt.virt:
42 | name: "{{ hostvars[item].node_hostname }}"
43 | state: running
44 | loop: "{{ groups['masters'] }}"
45 |
46 | - name: Restart worker nodes
47 | community.libvirt.virt:
48 | name: "{{ hostvars[item].node_hostname }}"
49 | state: running
50 | loop: "{{ groups['workers'] }}"
51 | when: not three_node
52 |
53 | - name: State message
54 | ansible.builtin.debug:
55 | msg: All VMs are booting...
56 |
--------------------------------------------------------------------------------
/27_ocp_install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Let's install OCP
3 | hosts: bastion
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | tasks:
7 | - name: State message
8 | ansible.builtin.debug:
9 | msg: All VMs booted up, using the MAC-tailored pxeboot configuration
10 |
11 | - name: Waiting for bootstrap to complete # noqa command-instead-of-shell no-changed-when
12 | ansible.builtin.shell: openshift-install wait-for bootstrap-complete --dir {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }} # noqa yaml[line-length]
13 | retries: 10
14 | delay: 5
15 | register: result
16 | until: result.rc == 0
17 |
18 | - name: Waiting for install to complete # noqa command-instead-of-shell no-changed-when
19 | ansible.builtin.shell: openshift-install wait-for install-complete --dir {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }} # noqa yaml[line-length]
20 | retries: 10
21 | delay: 5
22 | register: result
23 | until: result.rc == 0
24 |
25 | - name: Jedy
26 | ansible.builtin.debug:
27 | msg: May OCP be with you
28 |
--------------------------------------------------------------------------------
/29_remove_bootstrap.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove cronjob on bastion for csr
3 | hosts: bastion
4 | become: true
5 | tasks:
6 | - name: Removing csr cronjob
7 | ansible.builtin.cron:
8 | name: "Openshift csr approver"
9 | state: absent
10 |
11 | - name: Clean up haproxy config on loadbalancer
12 | hosts: loadbalancer
13 | become: true
14 | tasks:
15 | - name: Remove bootstrap from HAProxy
16 | ansible.builtin.lineinfile:
17 | regexp: "bootstrap"
18 | state: absent
19 | path: /etc/haproxy/haproxy.cfg
20 | notify: Restart haproxy
21 |
22 | handlers:
23 | - name: Restart haproxy
24 | ansible.builtin.service:
25 | name: haproxy
26 | state: restarted
27 |
28 | - name: Destroy bootstrap VM
29 | hosts: vm_host
30 | vars_files:
31 | - vars/cluster_vars.yml
32 | tasks:
33 | - name: Destroy bootstrap VM
34 | community.general.terraform:
35 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/bootstrap"
36 | state: absent
37 | become: true
38 |
--------------------------------------------------------------------------------
/30_create_users.yml:
--------------------------------------------------------------------------------
1 | - name: Create admin with cluster admin privileges
2 | hosts: bastion
3 | vars_files: vars/cluster_vars.yml
4 | tasks:
5 | - name: Ensuring httpd-tools is present
6 | ansible.builtin.dnf:
7 | name: httpd-tools
8 | state: present
9 | become: true
10 |
11 | - name: Ensure pip is up to date
12 | ansible.builtin.pip:
13 | name: pip
14 | state: latest # noqa package-latest
15 | become: true
16 |
17 | - name: Ensuring passlib is present
18 | ansible.builtin.pip:
19 | name:
20 | - passlib
21 | - bcrypt
22 | become: true
23 |
24 | - name: Firing yaml configuration template for htpasswd identity provider
25 | ansible.builtin.template:
26 | src: templates/htpasswd_provider.j2
27 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/htpasswd_provider.yaml"
28 | mode: "0755"
29 |
30 | - name: Firing configuration script template for user creation
31 | ansible.builtin.template:
32 | src: templates/ocp_user_script.j2
33 | dest: /tmp/ocp_user.sh
34 | mode: +x
35 |
36 | - name: Creating htpasswd identity and user # noqa command-instead-of-shell no-changed-when
37 | ansible.builtin.shell: /tmp/ocp_user.sh
38 |
39 | - name: Sleeping 300 seconds...
40 | ansible.builtin.pause:
41 | seconds: 450
42 |
43 | - name: Try to login with admin # noqa command-instead-of-shell no-changed-when
44 | ansible.builtin.shell: /usr/bin/oc login --insecure-skip-tls-verify -u {{ cluster.ocp_user }} -p {{ cluster.ocp_pass }} https://api.{{ hostvars['bastion'].ocp_domain }}:6443 # noqa yaml[line-length]
45 | retries: 10
46 |
47 | - name: Remove kubeadmin user # noqa command-instead-of-shell no-changed-when
48 | ansible.builtin.shell: /usr/bin/oc delete secrets kubeadmin -n kube-system
49 |
--------------------------------------------------------------------------------
/33_patch_nodes.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Patch worker nodes
3 | hosts: bastion
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | tasks:
7 | - name: Patch nodes if needed
8 | when: not three_node
9 | block:
10 | - name: Set fact if infra patching is needed
11 | ansible.builtin.set_fact:
12 | infra_present: false
13 |
14 | - name: Set fact if infra patching is needed
15 | ansible.builtin.set_fact:
16 | infra_present: true
17 | when: "'infra' in groups"
18 |
19 | - name: Create Infra machine config pools if needed
20 | when: infra_present
21 | block:
22 | - name: Create machine config pool
23 | ansible.builtin.copy:
24 | src: files/machineconfigpool.yml
25 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/machineconfigpool.yml"
26 | mode: "0755"
27 |
28 | - name: Creating machine config pool for infra nodes # noqa command-instead-of-shell no-changed-when
29 | ansible.builtin.shell: /usr/bin/oc create -f {{ workspace_directory.base_path }}/{{ cluster.name }}/machineconfigpool.yml --kubeconfig={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig # noqa yaml[line-length]
30 |
31 | - name: Copy script for node labeling
32 | ansible.builtin.template:
33 | src: templates/label_nodes.j2
34 | dest: /tmp/label_nodes.sh
35 | mode: +x
36 |
37 | - name: Applying patch # noqa command-instead-of-shell no-changed-when
38 | ansible.builtin.shell: /tmp/label_nodes.sh
39 |
40 | - name: Deleting label node script
41 | ansible.builtin.file:
42 | path: /tmp/label_nodes.sh
43 | state: absent
44 |
45 | - name: Set default nodeSelector
46 | ansible.builtin.template:
47 | src: templates/patch_default_selector.j2
48 | dest: /tmp/patch_default_selector.sh
49 | mode: +x
50 |
51 | - name: Execute script # noqa command-instead-of-shell no-changed-when
52 | ansible.builtin.shell: /tmp/patch_default_selector.sh
53 |
54 | - name: Delete node selector patch
55 | ansible.builtin.file:
56 | path: /tmp/patch_default_selector.sh
57 | state: absent
58 |
--------------------------------------------------------------------------------
/34_move_services_to_infra_nodes.yml:
--------------------------------------------------------------------------------
1 | - name: Patch configs to use infra nodes
2 | hosts: bastion
3 | become: true
4 | vars_files:
5 | - vars/cluster_vars.yml
6 | tasks:
7 | - name: Take care of moving core operators to infra nodes
8 | when: not three_node
9 | block:
10 | - name: Take care of moving core operators to infra nodes
11 | when: infra_present
12 | block:
13 | - name: Copy ingress and registry patcher templates
14 | ansible.builtin.template:
15 | src: templates/{{ item }}.j2
16 | dest: /tmp/{{ item }}.sh
17 | mode: +x
18 | loop:
19 | - patch_ingress_selector
20 | - patch_registry_selector
21 |
22 | - name: Run scripts # noqa command-instead-of-shell no-changed-when
23 | ansible.builtin.shell: /tmp/{{ item }}.sh
24 | loop:
25 | - patch_ingress_selector
26 | - patch_registry_selector
27 |
28 | - name: Delete scripts
29 | ansible.builtin.file:
30 | path: /tmp/{{ item }}.sh
31 | state: absent
32 | loop:
33 | - patch_ingress_selector
34 | - patch_registry_selector
35 |
36 | - name: Copy monitoring CM
37 | ansible.builtin.copy:
38 | src: files/patch_monitoring.yml
39 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/patch_monitoring.yml"
40 | mode: "0755"
41 |
42 | - name: Patch monitoring # noqa command-instead-of-shell no-changed-when
43 | ansible.builtin.shell: /usr/bin/oc create -f {{ workspace_directory.base_path }}/{{ cluster.name }}/patch_monitoring.yml --kubeconfig={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig # noqa yaml[line-length]
44 |
--------------------------------------------------------------------------------
/70_setup_sno_cluster.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Play for preparing to OCP4 single-node setup
3 | hosts: vm_host
4 | become: true
5 | vars_files:
6 | - vars/sno_vars.yml
7 | - vars/infra_vars.yml
8 | tasks:
9 | - name: Preparing workspace
10 | ansible.builtin.file:
11 | state: directory
12 | path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}"
13 | mode: "0755"
14 |
15 | - name: Creating ssh keys if the do not exist
16 | community.crypto.openssh_keypair:
17 | path: "~/.ssh/id_rsa_{{ cluster.name }}"
18 |
19 | - name: Fetch information about the release
20 | ansible.builtin.uri:
21 | url: "{{ downloads.ocp.base_url }}/{{ cluster.version }}/release.txt"
22 | return_content: true
23 | register: image_version
24 |
25 | - name: Save version as fact
26 | ansible.builtin.set_fact:
27 | release_image: "{{ image_version.content | regex_search('Pull From: ([^\\s]+)') | replace('Pull From: ', '') }}"
28 |
29 | - name: Downloading Openshift CLI
30 | ansible.builtin.unarchive:
31 | src: "{{ downloads.ocp.base_url }}/{{ cluster.version }}/openshift-client-linux-{{ cluster.version }}.tar.gz"
32 | dest: /usr/bin
33 | remote_src: true
34 | when:
35 | - cluster.version != "latest"
36 | - cluster.version != "fast"
37 | - cluster.version != "stable"
38 | - cluster.version != "candidate"
39 |
40 | - name: Downloading Openshift CLI
41 | ansible.builtin.unarchive:
42 | src: "{{ downloads.ocp.base_url }}/{{ cluster.version }}/openshift-client-linux.tar.gz"
43 | dest: /usr/bin
44 | remote_src: true
45 | when: cluster.version == "latest" or cluster.version == "fast" or cluster.version == "stable" or cluster.version == "candidate"
46 |
47 | - name: Checking for OCP cli tool # noqa no-changed-when
48 | ansible.builtin.command: oc
49 | register: output
50 | failed_when: output.rc != 0
51 |
52 | - name: Download coreOS ISO and save it locally
53 | ansible.builtin.get_url:
54 | url: "{{ downloads.coreos.live_media }}"
55 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/coreos.iso"
56 | mode: "0755"
57 |
58 | - name: Download coreOS-installer and save it locally
59 | ansible.builtin.get_url:
60 | url: "{{ downloads.coreos.installer }}"
61 | dest: /usr/bin/coreos-installer
62 | mode: +x
63 |
64 | - name: Ensure NM configuration directory exists
65 | ansible.builtin.file:
66 | path: /etc/NetworkManager/conf.d
67 | state: directory
68 | mode: "0755"
69 |
70 | - name: Ensure NM dnsmasq directory exists
71 | ansible.builtin.file:
72 | path: /etc/NetworkManager/dnsmasq.d
73 | state: directory
74 | mode: "0755"
75 |
76 | - name: Configure NetworkManager for local DNS
77 | ansible.builtin.copy:
78 | src: files/localdns.conf
79 | dest: /etc/NetworkManager/conf.d/{{ cluster.name }}-localdns.conf
80 | mode: "0755"
81 |
82 | - name: Configure NetworkManager for libvirt network
83 | ansible.builtin.template:
84 | src: templates/libvirt_dnsmasq_sno.j2
85 | dest: "/etc/NetworkManager/dnsmasq.d/{{ cluster.name }}-libvirt_dnsmasq.conf"
86 | mode: "0755"
87 |
88 | - name: Take care of systemd-resolved on F33 and Ubuntu hosts
89 | when: (ansible_distribution == 'Fedora' and ansible_distribution_major_version | int >= 33)
90 | block:
91 | - name: Ensure systemd-resolved config dir is present
92 | ansible.builtin.file:
93 | path: /etc/systemd/resolved.conf.d/
94 | state: directory
95 | mode: "0755"
96 |
97 | - name: Enable localdns if systemd-resolved is present
98 | ansible.builtin.template:
99 | src: systemd-resolved.j2
100 | dest: /etc/systemd/resolved.conf.d/{{ cluster.name }}-local.conf
101 | mode: "0755"
102 |
103 | - name: Restart systemd-resolved
104 | ansible.builtin.service:
105 | name: systemd-resolved
106 | state: restarted
107 |
108 | - name: Backup resolv.conf for further debug
109 | ansible.builtin.copy:
110 | src: /etc/resolv.conf
111 | dest: /etc/resolv.conf.bak
112 | remote_src: true
113 | mode: "0755"
114 |
115 | - name: Ensure systemd-resolved config dir is present
116 | ansible.builtin.file:
117 | src: /run/systemd/resolve/resolv.conf
118 | dest: /etc/resolv.conf
119 | state: link
120 | force: true
121 |
122 | - name: Copy pull secret to a file
123 | ansible.builtin.copy:
124 | content: "{{ cluster.pullSecret }}"
125 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/pull-secret"
126 | mode: "0755"
127 |
128 | - name: Extract openshift-baremetal-install from release image # noqa command-instead-of-shell no-changed-when
129 | ansible.builtin.shell: "oc adm release extract --registry-config {{ workspace_directory.base_path }}/{{ cluster.name }}/pull-secret --command=openshift-baremetal-install --to {{ workspace_directory.base_path }}/{{ cluster.name }}/openshift-baremetal-install {{ release_image }}" # noqa yaml[line-length]
130 |
131 | - name: Move openshift installer to PATH
132 | ansible.builtin.copy:
133 | src: "{{ workspace_directory.base_path }}/{{ cluster.name }}/openshift-baremetal-install/openshift-baremetal-install"
134 | dest: /usr/bin/openshift-baremetal-install
135 | remote_src: true
136 | mode: +x
137 |
138 | - name: Getting ssh public key
139 | ansible.builtin.slurp:
140 | src: "~/.ssh/id_rsa_{{ cluster.name }}.pub"
141 | register: key
142 |
143 | - name: Set SSH Keys as fact
144 | ansible.builtin.set_fact:
145 | sshkey: "{{ key['content'] | b64decode }}"
146 |
147 | - name: Firing up install-config.yaml
148 | ansible.builtin.template:
149 | src: templates/install-config-sno.j2
150 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/install-config.yaml"
151 | mode: "0755"
152 |
153 | - name: Generate ignition config # noqa command-instead-of-shell no-changed-when
154 | ansible.builtin.shell: openshift-baremetal-install --dir {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }} create single-node-ignition-config # noqa yaml[line-length]
155 |
156 | - name: Patch live ISO with generate ignition file # noqa command-instead-of-shell no-changed-when
157 | ansible.builtin.shell: coreos-installer iso ignition embed -fi {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/bootstrap-in-place-for-live-iso.ign {{ workspace_directory.base_path }}/{{ cluster.name }}/coreos.iso # noqa yaml[line-length]
158 |
159 | - name: Restart net-services
160 | ansible.builtin.service:
161 | name: "{{ item }}"
162 | state: restarted
163 | loop:
164 | - NetworkManager
165 | - dnsmasq
166 |
167 | - name: Virtualization services are enabled
168 | ansible.builtin.service:
169 | name: libvirtd
170 | state: restarted
171 | enabled: true
172 | when:
173 | - ansible_distribution == 'CentOS'
174 |
175 | - name: Virtualization services are enabled
176 | ansible.builtin.service:
177 | name: virtqemud
178 | state: restarted
179 | enabled: true
180 | when:
181 | - ansible_distribution == 'CentOS'
182 | - ansible_distribution_major_version | int == 9
183 |
184 | - name: Provision OCP node with Terraform
185 | community.general.terraform:
186 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/sno"
187 | force_init: true
188 | variables:
189 | hostname: "master-sno"
190 | libvirt_network: "{{ cluster.name }}"
191 | libvirt_pool: "{{ cluster.name }}"
192 | vm_net_ip: "{{ cluster_nodes.host_list.sno.ip }}"
193 | coreos_iso_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/coreos.iso"
194 | cpu: "{{ cluster_nodes.specs.sno.vcpu }}"
195 | memory: "{{ cluster_nodes.specs.sno.mem }}"
196 | vm_volume_size: "{{ cluster_nodes.specs.sno.disk }}"
197 | local_volume_enabled: "{{ local_storage.enabled | default(false, true) }}"
198 | local_volume_size: "{{ local_storage.volume_size | default(omit, true) }}"
199 | vm_additional_nic: "{{ additional_nic.enabled | default(false, true) }}"
200 | vm_additional_nic_network: "{{ additional_nic.network | default(cluster.name, true) }}"
201 | state: present
202 | register: output_sno
203 |
204 | - name: Start Openshift install # noqa command-instead-of-shell no-changed-when
205 | ansible.builtin.shell: openshift-baremetal-install wait-for install-complete --dir {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }} # noqa yaml[line-length]
206 |
207 | - name: Ensuring httpd-tools is present
208 | ansible.builtin.yum:
209 | name: httpd-tools
210 | state: present
211 |
212 | - name: Ensuring passlib is present
213 | ansible.builtin.pip:
214 | name:
215 | - passlib
216 | - bcrypt
217 |
218 | - name: Firing yaml configuration template for htpasswd identity provider
219 | ansible.builtin.template:
220 | src: templates/htpasswd_provider.j2
221 | dest: "{{ workspace_directory.base_path }}/{{ cluster.name }}/htpasswd_provider.yaml"
222 | mode: "0755"
223 |
224 | - name: Firing configuration script template for user creation
225 | ansible.builtin.template:
226 | src: templates/ocp_user_script.j2
227 | dest: /tmp/ocp_user.sh
228 | mode: +x
229 |
230 | - name: Creating htpasswd identity and user # noqa command-instead-of-shell no-changed-when
231 | ansible.builtin.shell: /tmp/ocp_user.sh
232 |
233 | - name: Sleeping 180 seconds...
234 | ansible.builtin.pause:
235 | seconds: 180
236 |
237 | - name: Your cluster is ready
238 | ansible.builtin.debug:
239 | msg:
240 | - "Cluster setup finished"
241 | - "Console URL: https://console-openshift-console.apps.{{ cluster.name }}.{{ domain }}"
242 | - "Kubeconfig available at {{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/auth or log in the console with the credentials you chose" # noqa yaml[line-length]
243 |
--------------------------------------------------------------------------------
/99_cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Cleanup playbook
3 | hosts: vm_host
4 | vars_files:
5 | - vars/infra_vars.yml
6 | - vars/cluster_vars.yml
7 | tasks:
8 | - name: Set home directory as fact
9 | ansible.builtin.set_fact:
10 | home_dir: "{{ ansible_env.HOME }}"
11 |
12 | - name: Destroy support VM
13 | community.general.terraform:
14 | force_init: true
15 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/{{ item }}"
16 | state: absent
17 | variables:
18 | sshkey: "{{ playbook_dir }}"
19 | loop:
20 | - bastion
21 | - loadbalancer
22 | become: true
23 |
24 | - name: Destroy cluster VM
25 | community.general.terraform:
26 | force_init: true
27 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/{{ item }}"
28 | state: absent
29 | loop:
30 | - bootstrap
31 | - workers
32 | - masters
33 | - sno
34 | become: true
35 |
36 | - name: Use TF project to ensure pool and network are removed
37 | community.general.terraform:
38 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/libvirt-resources"
39 | variables:
40 | domain: "{{ domain }}"
41 | network_cidr: ' ["{{ network_cidr }}"]'
42 | cluster_name: "{{ cluster.name }}"
43 | force_init: true
44 | state: absent
45 | become: true
46 |
47 | - name: Delete all created paths
48 | ansible.builtin.file:
49 | path: "{{ item }}"
50 | state: absent
51 | loop:
52 | - "{{ workspace_directory.base_path }}/{{ cluster.name }}"
53 | - /etc/NetworkManager/conf.d/{{ cluster.name }}-localdns.conf
54 | - /etc/NetworkManager/dnsmasq.d/{{ cluster.name }}-libvirt_dnsmasq.conf
55 | become: true
56 |
--------------------------------------------------------------------------------
/99_cleanup_sno.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Cleanup playbook
3 | hosts: vm_host
4 | vars_files:
5 | - vars/sno_vars.yml
6 | tasks:
7 | - name: Set home directory as fact
8 | ansible.builtin.set_fact:
9 | home_dir: "{{ ansible_env.HOME }}"
10 |
11 | - name: Destroy cluster VM
12 | community.general.terraform:
13 | force_init: true
14 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/{{ item }}"
15 | state: absent
16 | loop:
17 | - sno
18 | become: true
19 |
20 | - name: Use TF project to ensure pool and network are removed
21 | community.general.terraform:
22 | project_path: "{{ workspace_directory.base_path }}/{{ cluster.name }}/terraform/libvirt-resources-sno"
23 | force_init: true
24 | state: absent
25 | become: true
26 |
27 | - name: Delete all created paths
28 | ansible.builtin.file:
29 | path: "{{ item }}"
30 | state: absent
31 | loop:
32 | - /usr/bin/terraform
33 | - /usr/bin/openshift-baremetal-install
34 | - /usr/bin/coreos-installer
35 | - "{{ workspace_directory.base_path }}/{{ cluster.name }}"
36 | - /etc/NetworkManager/conf.d/{{ cluster.name }}-localdns.conf
37 | - /etc/NetworkManager/dnsmasq.d/{{ cluster.name }}-libvirt_dnsmasq.conf
38 | become: true
39 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2020 Alessandro Rossi
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY: help
2 | help:
3 | @echo "Usage for libvirt-ocp4-provisioner:"
4 | @echo " setup to install required collections"
5 | @echo " create-ha to create the cluster using HA setup"
6 | @echo " create-sno to create the cluster using Single Node setup"
7 | @echo " destroy to destroy the cluster"
8 | .PHONY: setup
9 | setup:
10 | @ansible-galaxy collection install -r requirements.yml
11 | .PHONY: create-ha
12 | create-ha:
13 | @ansible-playbook main.yml
14 | .PHONY: create-sno
15 | create-sno:
16 | @ansible-playbook main-sno.yml -vv
17 | .PHONY: destroy
18 | destroy:
19 | @ansible-playbook 99_cleanup.yml
20 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://opensource.org/licenses/MIT)
2 |
3 | # libvirt-ocp4-provisioner - Automate your cluster provisioning from 0 to OCP!
4 |
5 | Welcome to the home of the project!
6 | This project has been inspired by [@ValentinoUberti](https://github.com/ValentinoUberti), who did a GREAT job creating the playbooks to provision existing infrastructure nodes on oVirt and preparing for cluster installation.
7 |
8 | I wanted to play around with terraform and port his great work to libvirt and so, here we are! I adapted his playbooks to libvirt needs, making massive use of in-memory inventory creation for provisioned VMs, to minimize the impact on customizable stuff in variables.
9 |
10 | - [Project Overview](#project-overview)
11 | - [Quickstart](#quickstart)
12 | - [HA Clusters](#ha-clusters)
13 | - [Single Node Openshift (SNO)](#single-node-openshift--sno-)
14 | - [Quickstart with Execution Environment](#quickstart-with-execution-environment)
15 | - [Build EE image](#build-ee-image)
16 | - [Run playbooks](#run-playbooks)
17 | - [Common vars](#common-vars)
18 | - [HA Configuration vars](#ha-configuration-vars)
19 | - [Single Node Openshift vars](#single-node-openshift-vars)
20 | - [Cleanup](#cleanup)
21 | - [Full deployment cleanup](#full-deployment-cleanup)
22 | - [SNO deployment cleanup](#sno-deployment-cleanup)
23 |
24 | ## Project Overview
25 |
26 | To give a quick overview, this project will allow you to provision a **fully working** and **stable** OCP environment, consisting of:
27 |
28 | - Bastion machine provisioned with:
29 | - dnsmasq (with SELinux module, compiled and activated)
30 | - dhcp based on dnsmasq
31 | - nginx (for ignition files and rhcos pxe-boot)
32 | - pxeboot
33 | - Loadbalancer machine provisioned with:
34 | - haproxy
35 | - OCP Bootstrap VM
36 | - OCP Master VM(s)
37 | - OCP Worker VM(s)
38 |
39 | It also takes care of preparing the host machine with needed packages, configuring:
40 |
41 | - dedicated libvirt network (fully customizable)
42 | - dedicated libvirt storage pool (fully customizable)
43 | - terraform
44 | - libvirt-terraform-provider ( compiled and initialized based on [https://github.com/dmacvicar/terraform-provider-libvirt](https://github.com/dmacvicar/terraform-provider-libvirt))
45 |
46 | PXE is automatic, based on MAC binding to different OCP nodes role, so no need of choosing it from the menus, this means you can just run the playbook, take a beer and have your fully running OCP up and running.
47 |
48 | The version can be selected freely, by specifying the desired one (i.e. 4.10.x, 4.13.2) or the latest stable release with "stable". **Versions before 4.10 are not supported anymore!!**
49 |
50 | Now support for **Single Node Openshift - SNO** has been added!
51 |
52 | **bastion** and **loadbalancer** VMs spec:
53 |
54 | - OS: Centos8 Generic Cloud base image [https://cloud.centos.org/centos/8-stream/x86_64/images/](https://cloud.centos.org/centos/8-stream/x86_64/images/)
55 | - cloud-init:
56 | - user: ocpinstall
57 | - pass: ocprocks
58 | - ssh-key: generated during vm-provisioning and stores in the project folder
59 |
60 | The user is capable of logging via SSH too.
61 |
62 | ## Quickstart
63 |
64 | First of all, you need to install required collections to get started:
65 |
66 | ```bash
67 | ansible-galaxy collection install -r requirements.yml
68 | ```
69 |
70 | The playbook is meant to run against local host/s, defined under **vm_host** group in your inventory, depending on how many clusters you want to configure at once.
71 |
72 | ### HA Clusters
73 |
74 | ```bash
75 | ansible-playbook main.yml
76 | ```
77 |
78 | ### Single Node Openshift (SNO)
79 |
80 | ```bash
81 | ansible-playbook main-sno.yml
82 | ```
83 |
84 | You can quickly make it work by configuring the needed vars, but you can go straight with the defaults!
85 |
86 | ## Quickstart with Execution Environment
87 |
88 | The playbooks are compatible with the newly introduced **Execution environments (EE)**. To use them with an execution environment you need to have [ansible-builder](https://ansible-builder.readthedocs.io/en/stable/) and [ansible-navigator](https://ansible-navigator.readthedocs.io/en/latest/) installed.
89 |
90 | ### Build EE image
91 |
92 | To build the EE image, jump in the _execution-environment_ folder and run the build:
93 |
94 | ```bash
95 | ansible-builder build -f execution-environment/execution-environment.yml -t ocp-ee
96 | ```
97 |
98 | ### Run playbooks
99 |
100 | To run the playbooks use ansible navigator:
101 |
102 | ```bash
103 | ansible-navigator run main.yml -m stdout
104 | ```
105 |
106 | Or, in case of Single Node Openshift:
107 |
108 | ```bash
109 | ansible-navigator run main-sno.yml -m stdout
110 | ```
111 |
112 | ## Common vars
113 |
114 | The kind of network created is a simple NAT configuration, without DHCP since it will be provisioned with **bastion** VM. Defaults can be OK if you don't have any overlapping network.
115 |
116 | ### HA Configuration vars
117 |
118 | **vars/infra_vars.yml**
119 |
120 | ```yaml
121 | infra_nodes:
122 | host_list:
123 | bastion:
124 | - ip: 192.168.100.4
125 | loadbalancer:
126 | - ip: 192.168.100.5
127 | dhcp:
128 | timezone: "Europe/Rome"
129 | ntp: 204.11.201.10
130 | ```
131 |
132 | **vars/cluster_vars.yml**
133 |
134 | ```yaml
135 | three_node: false
136 | network_cidr: 192.168.100.0/24
137 | domain: hetzner.lab
138 | additional_block_device:
139 | enabled: false
140 | size: 100
141 | additional_nic:
142 | enabled: false
143 | network:
144 | cluster:
145 | version: stable
146 | name: ocp4
147 | ocp_user: admin
148 | ocp_pass: openshift
149 | pullSecret: ""
150 | cluster_nodes:
151 | host_list:
152 | bootstrap:
153 | - ip: 192.168.100.6
154 | masters:
155 | - ip: 192.168.100.7
156 | - ip: 192.168.100.8
157 | - ip: 192.168.100.9
158 | workers:
159 | - ip: 192.168.100.10
160 | role: infra
161 | - ip: 192.168.100.11
162 | - ip: 192.168.100.12
163 | specs:
164 | bootstrap:
165 | vcpu: 4
166 | mem: 16
167 | disk: 40
168 | masters:
169 | vcpu: 4
170 | mem: 16
171 | disk: 40
172 | workers:
173 | vcpu: 2
174 | mem: 8
175 | disk: 40
176 | ```
177 |
178 | Where **domain** is the dns domain assigned to the nodes and **cluster.name** is the name chosen for our OCP cluster installation.
179 |
180 | **mem** and **disk** are intended in GB
181 |
182 | **cluster.version** allows you to choose a particular version to be installed (i.e. 4.5.0, stable)
183 |
184 | **additional_block_device** controls whether an additional disk of the given size should be added to Workers or Control Plane nodes in case of compact (3 nodes) setup
185 |
186 | **additional_nic** allows the creation of an additional network interface on all nodes. It is possible to customize the libvirt network to attach to it.
187 |
188 | The **role** for workers is intended for nodes labelling. Omitting labels sets them to their default value, **worker**
189 |
190 | The count of VMs is taken by the elements of the list, in this example, we got:
191 |
192 | - 3 master nodes with 4vcpu and 16G memory
193 | - 3 worker nodes with 2vcpu and 8G memory
194 |
195 | Recommended values are:
196 |
197 | | Role | vCPU | RAM | Storage |
198 | | --------- | ---- | --- | ------- |
199 | | bootstrap | 4 | 16G | 120G |
200 | | master | 4 | 16G | 120G |
201 | | worker | 2 | 8G | 120G |
202 |
203 | For testing purposes, minimum storage value is set at **60GB**.
204 |
205 | **The playbook now supports three nodes setup (3 masters with both master and worker node role) intended for pure testing purposes and you can enable it with the three_node boolean var ONLY FOR 4.6+**
206 |
207 | ### Single Node Openshift vars
208 |
209 | **vars/cluster_vars.yml**
210 |
211 | ```yaml
212 | domain: hetzner.lab
213 | network_cidr: 192.168.100.0/24
214 | cluster:
215 | version: stable
216 | name: ocp4
217 | ocp_user: admin
218 | ocp_pass: openshift
219 | pullSecret: ""
220 | cluster_nodes:
221 | host_list:
222 | sno:
223 | ip: 192.168.100.7
224 | specs:
225 | sno:
226 | vcpu: 8
227 | mem: 32
228 | disk: 120
229 | local_storage:
230 | enabled: true
231 | volume_size: 50
232 | additional_nic:
233 | enabled: false
234 | network:
235 | ```
236 |
237 | **local_storage** field can be used to provision an additional disk to the VM in order to provision volumes using, for instance, rook-ceph or local storage operator.
238 |
239 | **additional_nic** allows the creation of an additional network interface on the node. It is possible to customize the libvirt network to attach to it.
240 |
241 | In both cases, Pull Secret can be retrived easily at [https://cloud.redhat.com/openshift/install/pull-secret](https://cloud.redhat.com/openshift/install/pull-secret)
242 |
243 | **HTPasswd** provider is created after the installation, you can use **ocp_user** and **ocp_pass** to login!
244 |
245 | ## Cleanup
246 |
247 | To clean all resources, you can simply run the cleanup playbooks.
248 |
249 | ### Full deployment cleanup
250 |
251 | ```bash
252 | ansible-playbook -i inventory 99_cleanup.yml
253 | ```
254 |
255 | ### SNO deployment cleanup
256 |
257 | ```bash
258 | ansible-playbook -i inventory 99_cleanup_sno.yml
259 | ```
260 |
261 | **DISCLAIMER**
262 | This project is for testing/lab only, it is not supported in any way by Red Hat nor endorsed.
263 |
264 | Feel free to suggest modifications/improvements.
265 |
266 | Alex
267 |
--------------------------------------------------------------------------------
/_config.yml:
--------------------------------------------------------------------------------
1 | theme: jekyll-theme-midnight
--------------------------------------------------------------------------------
/ansible-navigator.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | ansible-navigator:
3 | ansible:
4 | cmdline: "--forks 15"
5 | inventory:
6 | help: False
7 | entries:
8 | - ./inventory
9 | execution-environment:
10 | container-engine: podman
11 | enabled: true
12 | image: ocp-ee:latest
13 | pull:
14 | policy: never
15 | logging:
16 | level: debug
17 | # mode: stdout
18 |
19 | playbook-artifact:
20 | enable: false
21 |
--------------------------------------------------------------------------------
/ansible.cfg:
--------------------------------------------------------------------------------
1 | [defaults]
2 | inventory=./inventory
3 | log_path=./ansible.log
4 | [privilege_escalation]
5 | become_ask_pass=false
6 | become_user=root
7 | become_method=sudo
8 | become=false
9 |
--------------------------------------------------------------------------------
/execution-environment/execution-environment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: 1
3 |
4 | #ansible_config: 'ansible.cfg'
5 |
6 | dependencies:
7 | galaxy: requirements.yml
8 |
9 |
--------------------------------------------------------------------------------
/execution-environment/requirements.yml:
--------------------------------------------------------------------------------
1 | collections:
2 | - name: ansible.posix
3 | - name: ansible.utils
4 | - name: community.crypto
5 | - name: community.general
6 | - name: community.libvirt
--------------------------------------------------------------------------------
/files/localdns.conf:
--------------------------------------------------------------------------------
1 | [main]
2 | dns=dnsmasq
3 |
--------------------------------------------------------------------------------
/files/machineconfigpool.yml:
--------------------------------------------------------------------------------
1 | apiVersion: machineconfiguration.openshift.io/v1
2 | kind: MachineConfigPool
3 | metadata:
4 | name: infra
5 | spec:
6 | machineConfigSelector:
7 | matchExpressions:
8 | - {key: machineconfiguration.openshift.io/role, operator: In, values: [worker,infra]}
9 | nodeSelector:
10 | matchLabels:
11 | node-role.kubernetes.io/infra: ""
12 |
--------------------------------------------------------------------------------
/files/my-dnsmasq.pp:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/kubealex/libvirt-ocp4-provisioner/50fd9aef10ae48ad1acdcde58c18c44a4c6deda4/files/my-dnsmasq.pp
--------------------------------------------------------------------------------
/files/my-dnsmasq.te:
--------------------------------------------------------------------------------
1 |
2 | module my-dnsmas 1.0;
3 |
4 | require {
5 | type dnsmasq_t;
6 | type var_lib_t;
7 | type dnsmasq_exec_t;
8 | class file { getattr open read };
9 | class dir search;
10 | }
11 |
12 | #============= dnsmasq_t ==============
13 |
14 | #!!!! This avc is allowed in the current policy
15 | allow dnsmasq_t dnsmasq_exec_t:dir search;
16 |
17 | #!!!! This avc is allowed in the current policy
18 | allow dnsmasq_t var_lib_t:file { getattr open read };
19 |
--------------------------------------------------------------------------------
/files/patch_monitoring.yml:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: cluster-monitoring-config
5 | namespace: openshift-monitoring
6 | data:
7 | config.yaml: |+
8 | alertmanagerMain:
9 | nodeSelector:
10 | node-role.kubernetes.io/infra: ""
11 | prometheusK8s:
12 | nodeSelector:
13 | node-role.kubernetes.io/infra: ""
14 | prometheusOperator:
15 | nodeSelector:
16 | node-role.kubernetes.io/infra: ""
17 | grafana:
18 | nodeSelector:
19 | node-role.kubernetes.io/infra: ""
20 | k8sPrometheusAdapter:
21 | nodeSelector:
22 | node-role.kubernetes.io/infra: ""
23 | kubeStateMetrics:
24 | nodeSelector:
25 | node-role.kubernetes.io/infra: ""
26 | telemeterClient:
27 | nodeSelector:
28 | node-role.kubernetes.io/infra: ""
29 | openshiftStateMetrics:
30 | nodeSelector:
31 | node-role.kubernetes.io/infra: ""
32 |
--------------------------------------------------------------------------------
/group_vars/vm_host/packages.yml:
--------------------------------------------------------------------------------
1 | virtualization_packages:
2 | rhel:
3 | - unzip
4 | - git
5 | - gcc
6 | - make
7 | - python3-lxml
8 | - python3-netaddr
9 | - python3-libvirt
10 | - python3-devel
11 | - libvirt-devel
12 | - libvirt
13 | - qemu-kvm
14 | - virt-install
15 | - virt-manager
16 | - edk2-ovmf
17 | centos:
18 | - unzip
19 | - git
20 | - gcc
21 | - make
22 | - python3-lxml
23 | - python3-netaddr
24 | - python3-libvirt
25 | - python3-devel
26 | - libvirt
27 | - qemu-kvm
28 | - virt-install
29 | - virt-manager
30 | - edk2-ovmf
31 | fedora:
32 | - unzip
33 | - git
34 | - gcc
35 | - make
36 | - python3-lxml
37 | - python3-netaddr
38 | - python3-libvirt
39 | - python3-libvirt
40 | - libvirt-devel
41 | - libvirt
42 | - qemu-kvm
43 | - virt-install
44 | - virt-manager
45 | - edk2-ovmf
46 |
--------------------------------------------------------------------------------
/group_vars/vm_host/sno-vars.yml:
--------------------------------------------------------------------------------
1 | downloads:
2 | ocp:
3 | base_url: https://mirror.openshift.com/pub/openshift-v4/clients/ocp
4 | ocp_oc_cli: openshift-client-linux
5 | coreos:
6 | installer: https://mirror.openshift.com/pub/openshift-v4/clients/coreos-installer/latest/coreos-installer
7 | live_media: https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/latest/rhcos-live.x86_64.iso
8 | workspace_directory:
9 | base_path: "{{ home_dir }}/ocpsetup"
10 | config_dir: config
11 |
--------------------------------------------------------------------------------
/group_vars/vm_host/terraform.yml:
--------------------------------------------------------------------------------
1 | terraform_release_url: https://releases.hashicorp.com/terraform/1.9.8/terraform_1.9.8_linux_amd64.zip
2 |
--------------------------------------------------------------------------------
/host_vars/bastion/downloads.yml:
--------------------------------------------------------------------------------
1 | skip_download: false
2 | downloads:
3 | ocp:
4 | base_url: https://mirror.openshift.com/pub/openshift-v4/clients/ocp
5 | ocp_oc_cli: openshift-client-linux
6 | ocp_installer: openshift-install-linux
7 | rhcos:
8 | base_url: https://mirror.openshift.com/pub/openshift-v4/dependencies/rhcos/
9 | boot_files:
10 | initramfs: rhcos-live-initramfs.x86_64.img
11 | kernel: rhcos-live-kernel-x86_64
12 | rootfs: rhcos-live-rootfs.x86_64.img
13 |
--------------------------------------------------------------------------------
/host_vars/bastion/fw_bastion.yml:
--------------------------------------------------------------------------------
1 | ports:
2 | - "69/udp"
3 | - "69/tcp"
4 | - "68/tcp"
5 | - "68/udp"
6 | - "4011/tcp"
7 | - "4011/udp"
8 |
9 | services:
10 | - http
11 | - dns
12 | - dhcp
13 | - mountd
14 | - rpc-bind
15 |
--------------------------------------------------------------------------------
/host_vars/bastion/packages.yml:
--------------------------------------------------------------------------------
1 | packages:
2 | fedora:
3 | - syslinux
4 | - dnsmasq
5 | - nginx
6 | - cronie
7 | - cronie-anacron
8 | - python-netaddr
9 | - firewalld
10 |
11 | centos:
12 | - python39
13 | - syslinux
14 | - dnsmasq
15 | - nginx
16 | - cronie
17 | - cronie-anacron
18 | - tar
19 | - bind-utils
20 | - firewalld
21 | - python3-pip
22 |
--------------------------------------------------------------------------------
/host_vars/bastion/pxe.yml:
--------------------------------------------------------------------------------
1 | nginx_document_root: /usr/share/nginx/html
2 | tftp_boot_root: /var/lib/tftpboot
3 | tftp_workspace_dir: rhcos
4 | nginx_workspace_dir: metal
5 | pxe_files:
6 | - menu.c32
7 | - chain.c32
8 | - ldlinux.c32
9 | - libcom32.c32
10 | - ldlinux.c32
11 | - pxelinux.0
12 | - libutil.c32
13 |
--------------------------------------------------------------------------------
/host_vars/bastion/workspace.yml:
--------------------------------------------------------------------------------
1 | workspace_directory:
2 | base_path: /home/ocpinstall/ocpInstallerFile
3 | config_dir: config
4 |
--------------------------------------------------------------------------------
/host_vars/loadbalancer/fw_loadbalancer.yml:
--------------------------------------------------------------------------------
1 | services:
2 | - http
3 | - https
4 |
5 | internal_zone_port:
6 | - "22623/tcp"
7 | - "6443/tcp"
8 |
--------------------------------------------------------------------------------
/host_vars/loadbalancer/packages.yml:
--------------------------------------------------------------------------------
1 | packages:
2 | - haproxy
3 | - policycoreutils-python-utils
4 | - firewalld
5 |
--------------------------------------------------------------------------------
/inventory:
--------------------------------------------------------------------------------
1 | [vm_host]
2 | localhost ansible_connection=local
3 |
--------------------------------------------------------------------------------
/main-sno.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: 00_sno_prerequisites.yml
3 | - import_playbook: 02_sno_setup_working_directory.yml
4 | - import_playbook: 01_sno_install_virtualization_tools.yml
5 | - import_playbook: 70_setup_sno_cluster.yml
6 |
--------------------------------------------------------------------------------
/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - import_playbook: 00_prerequisites.yml
3 | - import_playbook: 02_setup_working_directory.yml
4 | - import_playbook: 01_install_virtualization_tools.yml
5 | - import_playbook: 05_vm_provisioning_infra.yml
6 | - import_playbook: 07_vm_provisioning_ocp.yml
7 | - import_playbook: 10_infra_packages.yml
8 | - import_playbook: 13_bastion_networking.yml
9 | - import_playbook: 15_bastion_workspace.yml
10 | - import_playbook: 17_bastion_services.yml
11 | - import_playbook: 19_lb_services.yml
12 | - import_playbook: 20_prepare_ocp_install.yml
13 | - import_playbook: 25_pxeboot_vms.yml
14 | - import_playbook: 27_ocp_install.yml
15 | - import_playbook: 29_remove_bootstrap.yml
16 | - import_playbook: 30_create_users.yml
17 | - import_playbook: 33_patch_nodes.yml
18 | - import_playbook: 34_move_services_to_infra_nodes.yml
19 |
--------------------------------------------------------------------------------
/requirements.yml:
--------------------------------------------------------------------------------
1 | collections:
2 | - name: community.general
3 | - name: community.crypto
4 | - name: community.libvirt
5 | - name: ansible.posix
6 | - name: ansible.utils
7 |
--------------------------------------------------------------------------------
/templates/csr.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export KUBECONFIG={{ workspace_directory.base_path }}/{{ cluster.name }}/{{ workspace_directory.config_dir }}/auth/kubeconfig
3 | /usr/bin/oc get csr -oname | xargs /usr/bin/oc adm certificate approve
4 |
--------------------------------------------------------------------------------
/templates/dnsmasq.j2:
--------------------------------------------------------------------------------
1 | ## External dns ##
2 |
3 | server={{ hostvars['bastion']['host_ip'] | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr('net') | ansible.utils.ipmath(1) }}
4 |
5 | ## External dns end ##
6 |
7 | ## Bastion ##
8 |
9 | address=/{{ hostvars['bastion']['host_fqdn'] }}/{{ hostvars['bastion']['host_ip'] }}
10 | dhcp-host={{ hostvars['bastion']['host_mac'] }},{{ hostvars['bastion']['host_ip'] }}
11 |
12 | ## Bastion end ##
13 |
14 | ## LoadBalancer ##
15 |
16 | address=/{{ hostvars['loadbalancer']['host_fqdn'] }}/{{ hostvars['loadbalancer']['host_ip'] }}
17 | dhcp-host={{ hostvars['loadbalancer']['host_mac'] }},{{ hostvars['loadbalancer']['host_ip'] }}
18 |
19 | ## LoadBalancer end ##
20 |
21 | ## Required fqdn and wildcard for OCP ##
22 |
23 | address=/{{ hostvars['loadbalancer']['host_api_fqdn'] }}/{{ hostvars['loadbalancer']['host_ip'] }}
24 | address=/{{ hostvars['loadbalancer']['host_apps_fqdn'] }}/{{ hostvars['loadbalancer']['host_ip'] }}
25 | address=/{{ hostvars['loadbalancer']['host_api_int_fqdn'] }}/{{ hostvars['loadbalancer']['host_ip'] }}
26 |
27 | ## Required fqdn and wildcard for OCP end ##
28 |
29 | ## Bootstrap ##
30 |
31 | {% for node in groups['bootstrap'] %}
32 | address=/{{ hostvars[node].node_fqdn }}/{{ hostvars[node].node_ip }}
33 | ptr-record={{ hostvars[node].node_reversedns[:-1] }},{{ hostvars[node].node_fqdn }}
34 | dhcp-host={{ hostvars[node].node_mac }},{{ hostvars[node].node_ip }}
35 | {%endfor%}
36 |
37 | ## Bootstrap end ##
38 |
39 | ## Etcd ##
40 |
41 | {% for node in groups['masters'] %}
42 | address=/{{ hostvars[node].etcd_fqdn }}/{{ hostvars[node].etcd_ip }}
43 | {%endfor%}
44 |
45 | ## Etcd end ##
46 |
47 | ## Masters ##
48 | {% for node in groups['masters'] %}
49 | address=/{{ hostvars[node].node_fqdn }}/{{ hostvars[node].node_ip }}
50 | ptr-record={{ hostvars[node].node_reversedns[:-1] }},{{ hostvars[node].node_fqdn }}
51 | dhcp-host={{ hostvars[node].node_mac }},{{ hostvars[node].node_ip }}
52 | {%endfor%}
53 |
54 | ## Masters end ##
55 |
56 | {% if not three_node %}
57 | ## Workers ##
58 | {% for node in groups['workers'] %}
59 | address=/{{ hostvars[node].node_fqdn }}/{{ hostvars[node].node_ip }}
60 | ptr-record={{ hostvars[node].node_reversedns[:-1] }},{{ hostvars[node].node_fqdn }}
61 | dhcp-host={{ hostvars[node].node_mac }},{{ hostvars[node].node_ip }}
62 | {%endfor%}
63 |
64 | ## Workers end ##
65 | {%endif%}
66 |
67 | ## SRV records for etcd service. Priority must be 0 and Weight must be 10 ###
68 |
69 | {% for node in groups['masters'] %}
70 | srv-host=_etcd-server-ssl._tcp.{{ hostvars['bastion'].ocp_domain }},{{ hostvars[node].etcd_fqdn }},2380,0,10
71 | {%endfor%}
72 |
73 | ## SRV records end ##
74 |
75 | ## PXE ##
76 |
77 | enable-tftp
78 | tftp-root={{ tftp_boot_root }},{{ hostvars['bastion'].host_interface }}
79 | dhcp-boot=pxelinux.0
80 |
81 | ## PXE end ##
82 |
83 | ## DHCP ##
84 |
85 | dhcp-option=101,"{{ hostvars['bastion'].timezone }}"
86 | domain={{ hostvars['bastion'].ocp_domain }}
87 | interface={{ hostvars['bastion'].host_interface }}
88 | dhcp-option={{ hostvars['bastion'].host_interface }},3,{{ hostvars['bastion'].host_ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr('net') | ansible.utils.ipmath(1) }}
89 | dhcp-option=option:netmask,{{ hostvars['bastion'].host_ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr('netmask') }}
90 | dhcp-option=option:dns-server,{{ hostvars['bastion'].host_ip }}
91 | dhcp-option=option:ntp-server,{{ hostvars['bastion'].ntp_server }}
92 | dhcp-range={{ hostvars['bastion'].host_interface }},{{ hostvars['bastion'].host_ip | ansible.utils.ipmath(1) }},{{ hostvars['bastion'].host_ip | ansible.utils.ipsubnet(24) | ansible.utils.ipaddr('last_usable') }},12h
93 |
94 | ## DHCP end ##
95 |
96 |
--------------------------------------------------------------------------------
/templates/haproxy.j2:
--------------------------------------------------------------------------------
1 | global
2 | log 127.0.0.1 local0
3 | chroot /var/lib/haproxy
4 | pidfile /var/run/haproxy.pid
5 | maxconn 4000
6 | user haproxy
7 | group haproxy
8 | daemon
9 | stats socket /var/lib/haproxy/stats
10 |
11 | defaults
12 | mode http
13 | log global
14 | option httplog
15 | option dontlognull
16 | #option http-server-close
17 | option forwardfor except 127.0.0.0/8
18 | option redispatch
19 | retries 3
20 | timeout http-request 10s
21 | timeout queue 1m
22 | timeout connect 10s
23 | timeout client 1m
24 | timeout server 1m
25 | timeout http-keep-alive 10s
26 | timeout check 10s
27 | maxconn 3000
28 |
29 | frontend openshift-api-server
30 | bind *:6443
31 | default_backend openshift-api-server
32 | mode tcp
33 | option tcplog
34 |
35 | backend openshift-api-server
36 | balance source
37 | mode tcp
38 |
39 | {% for node in groups['masters'] %}
40 | server {{ hostvars[node].node_fqdn }} {{ hostvars[node].node_ip }}:6443 check
41 | {%endfor%}
42 | {% for node in groups['bootstrap'] %}
43 | server {{ hostvars[node].node_fqdn }} {{ hostvars[node].node_ip }}:6443 check
44 | {%endfor%}
45 |
46 | frontend machine-config-server
47 | bind *:22623
48 | default_backend machine-config-server
49 | mode tcp
50 | option tcplog
51 |
52 | backend machine-config-server
53 | balance source
54 | mode tcp
55 | {% for node in groups['masters'] %}
56 | server {{ hostvars[node].node_fqdn }} {{ hostvars[node].node_ip }}:22623 check
57 | {%endfor%}
58 | {% for node in groups['bootstrap'] %}
59 | server {{ hostvars[node].node_fqdn }} {{ hostvars[node].node_ip }}:22623 check
60 | {%endfor%}
61 |
62 | frontend ingress-http
63 | bind *:80
64 | default_backend ingress-http
65 | mode tcp
66 | option tcplog
67 |
68 | backend ingress-http
69 | balance source
70 | mode tcp
71 | {% for node in groups['workers'] %}
72 | server {{ hostvars[node].node_fqdn }} {{ hostvars[node].node_ip }}:80 check
73 | {%endfor%}
74 |
75 | frontend ingress-https
76 | bind *:443
77 | default_backend ingress-https
78 | mode tcp
79 | option tcplog
80 |
81 | backend ingress-https
82 | balance source
83 | mode tcp
84 | {% for node in groups['workers'] %}
85 | server {{ hostvars[node].node_fqdn }} {{ hostvars[node].node_ip }}:443 check
86 | {%endfor%}
87 |
--------------------------------------------------------------------------------
/templates/htpasswd_provider.j2:
--------------------------------------------------------------------------------
1 | apiVersion: config.openshift.io/v1
2 | kind: OAuth
3 | metadata:
4 | name: cluster
5 | spec:
6 | identityProviders:
7 | - name: htpasswd_provider
8 | mappingMethod: claim
9 | type: HTPasswd
10 | htpasswd:
11 | fileData:
12 | name: htpass-secret
13 |
--------------------------------------------------------------------------------
/templates/install-config-sno.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1beta4
2 | baseDomain: {{ domain }}
3 | metadata:
4 | name: {{ cluster.name }}
5 | networking:
6 | networkType: OVNKubernetes
7 | machineCIDR: {{ network_cidr }}
8 | compute:
9 | - name: worker
10 | replicas: 0
11 | controlPlane:
12 | name: master
13 | replicas: 1
14 | platform:
15 | none: {}
16 | BootstrapInPlace:
17 | InstallationDisk: /dev/vda
18 | pullSecret: '{{ cluster.pullSecret }}'
19 | sshKey: '{{ sshkey }}'
20 |
--------------------------------------------------------------------------------
/templates/install-config.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | baseDomain: {{ hostvars['bastion'].domain }}
3 | compute:
4 | - hyperthreading: Enabled
5 | name: worker
6 | replicas: 0
7 | controlPlane:
8 | hyperthreading: Enabled
9 | name: master
10 | replicas: {{ cluster_nodes.host_list.masters | count }}
11 | metadata:
12 | name: {{ hostvars['bastion'].cluster_name }}
13 | networking:
14 | clusterNetwork:
15 | - cidr: 10.128.0.0/14
16 | hostPrefix: 23
17 | networkType: {{ cluster.cni_plugin | default('OVNKubernetes', true) }}
18 | serviceNetwork:
19 | - 172.30.0.0/16
20 | platform:
21 | none: {}
22 | pullSecret: '{{ cluster.pullSecret }}'
23 | sshKey: '{{ sshkey }}'
24 |
--------------------------------------------------------------------------------
/templates/label_nodes.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export KUBECONFIG={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig
3 | {% for node in groups['infra'] %}
4 | oc label node {{ hostvars[node].node_fqdn }} node-role.kubernetes.io/worker-
5 | oc label node {{ hostvars[node].node_fqdn }} node-role.kubernetes.io/{{ hostvars[node].ocp_role }}=""
6 | {% endfor %}
7 |
--------------------------------------------------------------------------------
/templates/libvirt_dnsmasq.j2:
--------------------------------------------------------------------------------
1 | server=/{{ cluster.name }}.{{ domain }}/{{ infra_vars.host_list.bastion[0].ip }}
2 |
--------------------------------------------------------------------------------
/templates/libvirt_dnsmasq_sno.j2:
--------------------------------------------------------------------------------
1 | server={{ network_cidr | ansible.utils.next_nth_usable(1) }}
2 | address=/{{ cluster.name }}.{{ domain }}/{{ cluster_nodes.host_list.sno.ip }}
3 |
--------------------------------------------------------------------------------
/templates/ocp_user_script.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export KUBECONFIG={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig
3 | htpasswd -c -B -b {{ workspace_directory.base_path }}/{{ cluster.name }}/user.htpasswd {{ cluster.ocp_user }} {{ cluster.ocp_pass }}
4 | /usr/bin/oc create secret generic htpass-secret --from-file=htpasswd={{ workspace_directory.base_path }}/{{ cluster.name }}/user.htpasswd -n openshift-config
5 | /usr/bin/oc apply -f {{ workspace_directory.base_path }}/{{ cluster.name }}/htpasswd_provider.yaml
6 | /usr/bin/oc adm policy add-cluster-role-to-user cluster-admin {{ cluster.ocp_user }}
7 |
--------------------------------------------------------------------------------
/templates/patch_default_selector.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export KUBECONFIG={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig
3 | oc patch scheduler/cluster --type=merge -p '{"spec":{"defaultNodeSelector": "node-role.kubernetes.io/worker="}}'
4 |
--------------------------------------------------------------------------------
/templates/patch_ingress_selector.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export KUBECONFIG={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig
3 | /usr/bin/oc patch ingresscontroller/default --type=merge -p '{"spec":{"nodePlacement":{"nodeSelector":{"matchLabels":{"node-role.kubernetes.io/infra": ""}}}}}' -n openshift-ingress-operator
4 | /usr/bin/oc patch ingresscontroller/default -n openshift-ingress-operator --type=merge -p '{"spec":{"replicas": {{ groups['infra'] | count }} }}'
5 |
6 |
--------------------------------------------------------------------------------
/templates/patch_registry_selector.j2:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 | export KUBECONFIG={{ workspace_directory.base_path }}/{{ cluster.name }}/config/auth/kubeconfig
3 | /usr/bin/oc patch config/cluster --type=merge -p '{"spec":{"nodeSelector":{"node-role.kubernetes.io/infra": ""}}}'
4 |
--------------------------------------------------------------------------------
/templates/pxeboot_mac.j2:
--------------------------------------------------------------------------------
1 | DEFAULT {{ hostvars[item].node_role }}
2 | SAY Now booting the kernel from SYSLINUX...
3 | LABEL {{ hostvars[item].node_role }}
4 | KERNEL {{ tftp_workspace_dir }}/{{ hostvars['bastion'].rhcos_kernel }}
5 | APPEND initrd=rhcos/{{ hostvars['bastion'].rhcos_initramfs }} coreos.live.rootfs_url=http://{{ hostvars['bastion'].host_ip }}/{{ nginx_workspace_dir }}/{{ hostvars['bastion'].rhcos_os }} coreos.inst.install_dev=/dev/vda coreos.inst.ignition_url=http://{{ hostvars['bastion'].host_ip }}/{{ nginx_workspace_dir }}/{{ hostvars[item].node_role }}.ign {% if cluster.additional_nic.enabled %} ip=ens3:dhcp ip=ens4:none {% endif %}
6 |
--------------------------------------------------------------------------------
/templates/systemd-resolved.j2:
--------------------------------------------------------------------------------
1 | [Resolve]
2 | DNS={{ network_cidr | ansible.utils.next_nth_usable(1) }}
3 | Domains=~{{ domain }}
4 |
--------------------------------------------------------------------------------
/terraform/bastion/bastion.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "hostname" { default = "bastion" }
3 | variable "domain" { default = "hetzner.lab" }
4 | variable "cluster_name" { default = "ocp4" }
5 | variable "memory" { default = 6 }
6 | variable "cpu" { default = 2 }
7 | variable "iface" { default = "eth0" }
8 | variable "libvirt_network" { default = "ocp4" }
9 | variable "libvirt_pool" { default= "ocp4" }
10 | variable "vm_volume_size" { default = 20 }
11 | variable "sshkey" { default = "" }
12 | #variable "mac" { default = "FF:FF:FF:FF:FF:FF" }
13 | variable "network_data" {
14 | type = map
15 | default = {
16 | hostIP = "192.168.100.31"
17 | broadcast = "192.168.100.255"
18 | dns = "192.168.100.1"
19 | gateway = "192.168.100.1"
20 | network = "192.168.100.0"
21 | }
22 | }
23 | # instance the provider
24 | provider "libvirt" {
25 | uri = "qemu:///system"
26 | }
27 |
28 | # fetch the latest ubuntu release image from their mirrors
29 | resource "libvirt_volume" "os_image" {
30 | name = "${var.hostname}-os_image"
31 | pool = var.libvirt_pool
32 | source = "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2"
33 | format = "qcow2"
34 | }
35 | # Use CloudInit ISO to add ssh-key to the instance
36 | resource "libvirt_cloudinit_disk" "commoninit" {
37 | name = "${var.hostname}-commoninit.iso"
38 | pool = var.libvirt_pool
39 | user_data = data.template_file.user_data.rendered
40 | meta_data = data.template_file.meta_data.rendered
41 | }
42 |
43 |
44 | data "template_file" "user_data" {
45 | template = file("${path.module}/cloud_init.cfg")
46 | vars = {
47 | hostname = "${var.hostname}.${var.cluster_name}.${var.domain}"
48 | fqdn = "${var.hostname}.${var.cluster_name}.${var.domain}"
49 | iface = "${var.iface}"
50 | sshkey = var.sshkey
51 | }
52 | }
53 |
54 | #Fix for centOS
55 | data "template_file" "meta_data" {
56 | template = file("${path.module}/network_config.cfg")
57 | vars = {
58 | domain = "${var.cluster_name}.${var.domain}"
59 | hostIP = var.network_data["hostIP"]
60 | dns = var.network_data["dns"]
61 | gateway = var.network_data["gateway"]
62 | network = var.network_data["network"]
63 | broadcast = var.network_data["broadcast"]
64 | iface = var.iface
65 | }
66 | }
67 |
68 |
69 | # Create the machine
70 | resource "libvirt_domain" "bastion" {
71 | # domain name in libvirt, not hostname
72 | name = var.hostname
73 | memory = var.memory*1024
74 | vcpu = var.cpu
75 | machine = "q35"
76 |
77 | cpu {
78 | mode = "host-passthrough"
79 | }
80 |
81 | disk {
82 | volume_id = libvirt_volume.os_image.id
83 | }
84 |
85 | network_interface {
86 | network_name = var.libvirt_network
87 | }
88 |
89 | cloudinit = libvirt_cloudinit_disk.commoninit.id
90 |
91 | # IMPORTANT
92 | # Ubuntu can hang is a isa-serial is not present at boot time.
93 | # If you find your CPU 100% and never is available this is why
94 | console {
95 | type = "pty"
96 | target_port = "0"
97 | target_type = "serial"
98 | }
99 |
100 | graphics {
101 | type = "vnc"
102 | listen_type = "address"
103 | autoport = "true"
104 | }
105 |
106 | xml {
107 | xslt = file("${path.module}/uefi-patch.xsl")
108 | }
109 | }
110 |
111 | terraform {
112 | required_version = ">= 1.0"
113 | required_providers {
114 | libvirt = {
115 | source = "dmacvicar/libvirt"
116 | version = "0.7.1"
117 | }
118 | }
119 | }
120 |
121 | output "ips" {
122 | value = "${flatten(libvirt_domain.bastion.*.network_interface.0.addresses)}"
123 | }
124 |
125 | output "macs" {
126 | value = "${flatten(libvirt_domain.bastion.*.network_interface.0.mac)}"
127 | }
128 |
--------------------------------------------------------------------------------
/terraform/bastion/cloud_init.cfg:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | hostname: ${hostname}
3 | fqdn: ${fqdn}
4 | manage_etc_hosts: true
5 | users:
6 | - name: ocpinstall
7 | sudo: ALL=(ALL) NOPASSWD:ALL
8 | groups: users, admin
9 | home: /home/ocpinstall
10 | shell: /bin/bash
11 | lock_passwd: false
12 | ssh-authorized-keys:
13 | - ${sshkey}
14 | # only cert auth via ssh (console access can still login)
15 | ssh_pwauth: true
16 | disable_root: false
17 | chpasswd:
18 | list: |
19 | ocpinstall:ocprocks
20 | expire: False
21 | packages:
22 | - qemu-guest-agent
23 | # every boot
24 | bootcmd:
25 | - [ sh, -c, 'echo $(date) | sudo tee -a /root/bootcmd.log' ]
26 | - cloud-init-per once rename_conn nmcli con mod System\ eth0 connection.id ${iface}
27 | # run once for setup
28 | runcmd:
29 | - [ sh, -c, 'echo $(date) | sudo tee -a /root/runcmd.log' ]
30 | # written to /var/log/cloud-init-output.log
31 | final_message: "The system is finall up, after $UPTIME seconds"
32 |
33 |
--------------------------------------------------------------------------------
/terraform/bastion/network_config.cfg:
--------------------------------------------------------------------------------
1 | #for virtio nic ethX
2 | #for en100X nic ensX
3 | network-interfaces: |
4 | auto ${iface}
5 | iface ${iface} inet static
6 | address ${hostIP}
7 | network ${network}
8 | netmask 255.255.255.0
9 | broadcast ${broadcast}
10 | gateway ${gateway}
11 | dns-nameservers ${dns}
12 | #bootcmd:
13 | # - ifdown eth0
14 | # - ifup eth0
15 |
--------------------------------------------------------------------------------
/terraform/bastion/uefi-patch.xsl:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/terraform/bootstrap/bootstrap.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "hostname" { default = "bootstrap" }
3 | variable "memory" { default = 16 }
4 | variable "cpu" { default = 4 }
5 | variable "vm_volume_size" { default = 40 }
6 | variable "libvirt_network" { default = "ocp" }
7 | variable "libvirt_pool" { default = "default" }
8 |
9 | # instance the provider
10 | provider "libvirt" {
11 | uri = "qemu:///system"
12 | }
13 |
14 | # fetch the latest ubuntu release image from their mirrors
15 | resource "libvirt_volume" "os_image" {
16 | name = "${var.hostname}-os_image"
17 | size = var.vm_volume_size*1073741824
18 | pool = var.libvirt_pool
19 | format = "qcow2"
20 | }
21 |
22 | # Create the machine
23 | resource "libvirt_domain" "bootstrap" {
24 | name = var.hostname
25 | memory = var.memory*1024
26 | vcpu = var.cpu
27 |
28 | cpu {
29 | mode = "host-passthrough"
30 | }
31 |
32 | disk {
33 | volume_id = libvirt_volume.os_image.id
34 | }
35 | network_interface {
36 | network_name = var.libvirt_network
37 | }
38 |
39 | boot_device {
40 | dev = [ "hd", "network" ]
41 | }
42 |
43 | console {
44 | type = "pty"
45 | target_port = "0"
46 | target_type = "serial"
47 | }
48 |
49 | graphics {
50 | type = "vnc"
51 | listen_type = "address"
52 | autoport = "true"
53 | }
54 | }
55 |
56 | terraform {
57 | required_version = ">= 1.0"
58 | required_providers {
59 | libvirt = {
60 | source = "dmacvicar/libvirt"
61 | version = "0.7.1"
62 | }
63 | }
64 | }
65 |
66 | output "macs" {
67 | value = "${flatten(libvirt_domain.bootstrap.*.network_interface.0.mac)}"
68 | }
69 |
70 |
--------------------------------------------------------------------------------
/terraform/libvirt-resources-sno/libvirt-resources.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "domain" { default = "hetzner.lab" }
3 | variable "dns" { default = "192.168.100.7" }
4 | variable "network_cidr" {
5 | type = list
6 | default = ["192.168.100.0/24"]
7 | }
8 | variable "cluster_name" { default = "ocp4" }
9 | variable "libvirt_pool_path" { default = "/var/lib/libvirt/images" }
10 |
11 | # instance the provider
12 | provider "libvirt" {
13 | uri = "qemu:///system"
14 | }
15 |
16 | # A pool for all cluster volumes
17 | resource "libvirt_pool" "cluster" {
18 | name = var.cluster_name
19 | type = "dir"
20 | path = "${var.libvirt_pool_path}/${var.cluster_name}"
21 | }
22 |
23 | resource "libvirt_network" "ocp_network" {
24 | name = var.cluster_name
25 | mode = "nat"
26 | domain = var.domain
27 | addresses = var.network_cidr
28 | dhcp {
29 | enabled = false
30 | }
31 | dns {
32 | enabled = true
33 | local_only = true
34 | }
35 | dnsmasq_options {
36 | options {
37 | option_name = "server"
38 | option_value = "${cidrhost(var.network_cidr[0],1)}"
39 | }
40 | options {
41 | option_name = "address"
42 | option_value = "/apps.${var.domain}/${var.dns}"
43 | }
44 | options {
45 | option_name = "address"
46 | option_value = "/api.${var.domain}/${var.dns}"
47 | }
48 | options {
49 | option_name = "address"
50 | option_value = "/api-int.${var.domain}/${var.dns}"
51 | }
52 | }
53 | }
54 |
55 | terraform {
56 | required_version = ">= 1.0"
57 | required_providers {
58 | libvirt = {
59 | source = "dmacvicar/libvirt"
60 | version = "0.7.1"
61 | }
62 | }
63 | }
64 |
65 |
--------------------------------------------------------------------------------
/terraform/libvirt-resources/libvirt-resources.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "domain" { default = "hetzner.lab" }
3 | variable "dns" { default = "192.168.100.7" }
4 | variable "network_cidr" {
5 | type = list
6 | default = ["192.168.100.0/24"]
7 | }
8 | variable "cluster_name" { default = "ocp4" }
9 | variable "libvirt_pool_path" { default = "/var/lib/libvirt/images" }
10 |
11 | # instance the provider
12 | provider "libvirt" {
13 | uri = "qemu:///system"
14 | }
15 |
16 | # A pool for all cluster volumes
17 | resource "libvirt_pool" "cluster" {
18 | name = var.cluster_name
19 | type = "dir"
20 | path = "${var.libvirt_pool_path}/${var.cluster_name}"
21 | }
22 |
23 | resource "libvirt_network" "ocp_network" {
24 | name = var.cluster_name
25 | mode = "nat"
26 | domain = var.domain
27 | addresses = var.network_cidr
28 |
29 | dhcp {
30 | enabled = false
31 | }
32 |
33 | dns {
34 | enabled = true
35 | local_only = true
36 | }
37 |
38 | dnsmasq_options {
39 | options {
40 | option_name = "server"
41 | option_value = "/${var.domain}/${var.dns}"
42 | }
43 | }
44 | }
45 |
46 | terraform {
47 | required_version = ">= 1.0"
48 | required_providers {
49 | libvirt = {
50 | source = "dmacvicar/libvirt"
51 | version = "0.7.1"
52 | }
53 | }
54 | }
55 |
56 |
--------------------------------------------------------------------------------
/terraform/loadbalancer/cloud_init.cfg:
--------------------------------------------------------------------------------
1 | #cloud-config
2 | hostname: ${hostname}
3 | fqdn: ${fqdn}
4 | manage_etc_hosts: true
5 | users:
6 | - name: ocpinstall
7 | sudo: ALL=(ALL) NOPASSWD:ALL
8 | groups: users, admin
9 | home: /home/ocpinstall
10 | shell: /bin/bash
11 | lock_passwd: false
12 | ssh-authorized-keys:
13 | - ${sshkey}
14 | # only cert auth via ssh (console access can still login)
15 | ssh_pwauth: true
16 | disable_root: false
17 | chpasswd:
18 | list: |
19 | ocpinstall:ocprocks
20 | expire: False
21 | packages:
22 | - qemu-guest-agent
23 | # every boot
24 | bootcmd:
25 | - [ sh, -c, 'echo $(date) | sudo tee -a /root/bootcmd.log' ]
26 | - cloud-init-per once rename_conn nmcli con mod System\ eth0 connection.id ${iface}
27 | # run once for setup
28 | runcmd:
29 | - [ sh, -c, 'echo $(date) | sudo tee -a /root/runcmd.log' ]
30 | # written to /var/log/cloud-init-output.log
31 | final_message: "The system is finall up, after $UPTIME seconds"
32 |
33 |
--------------------------------------------------------------------------------
/terraform/loadbalancer/loadbalancer.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "hostname" { default = "test" }
3 | variable "domain" { default = "hetzner.lab" }
4 | variable "cluster_name" { default = "ocp4" }
5 | variable "memory" { default = 1024*2 }
6 | variable "cpu" { default = 1 }
7 | variable "iface" { default = "eth0" }
8 | #variable "mac" { default = "FF:FF:FF:FF:FF:FF" }
9 | variable "libvirt_network" { default = "ocp4" }
10 | variable "libvirt_pool" { default = "ocp4" }
11 | variable "sshkey" { default = "" }
12 | variable "network_data" {
13 | type = map
14 | default = {
15 | hostIP = "192.168.100.31"
16 | broadcast = "192.168.100.255"
17 | dns = "192.168.100.1"
18 | gateway = "192.168.100.1"
19 | network = "192.168.100.0"
20 | }
21 | }
22 | # instance the provider
23 | provider "libvirt" {
24 | uri = "qemu:///system"
25 | }
26 |
27 | # fetch the latest ubuntu release image from their mirrors
28 | resource "libvirt_volume" "os_image" {
29 | name = "${var.hostname}-os_image"
30 | pool = var.libvirt_pool
31 | source = "https://cloud.centos.org/centos/9-stream/x86_64/images/CentOS-Stream-GenericCloud-9-latest.x86_64.qcow2"
32 | format = "qcow2"
33 | }
34 |
35 | # Use CloudInit ISO to add ssh-key to the instance
36 | resource "libvirt_cloudinit_disk" "commoninit" {
37 | name = "${var.hostname}-commoninit.iso"
38 | pool = var.libvirt_pool
39 | user_data = data.template_file.user_data.rendered
40 | meta_data = data.template_file.meta_data.rendered
41 | }
42 |
43 |
44 | data "template_file" "user_data" {
45 | template = file("${path.module}/cloud_init.cfg")
46 | vars = {
47 | hostname = "${var.hostname}.${var.cluster_name}.${var.domain}"
48 | fqdn = "${var.hostname}.${var.cluster_name}.${var.domain}"
49 | iface = var.iface
50 | sshkey = var.sshkey
51 | }
52 | }
53 |
54 | #Fix for centOS
55 | data "template_file" "meta_data" {
56 | template = file("${path.module}/network_config.cfg")
57 | vars = {
58 | domain = "${var.cluster_name}.${var.domain}"
59 | hostIP = var.network_data["hostIP"]
60 | dns = var.network_data["dns"]
61 | gateway = var.network_data["gateway"]
62 | broadcast = var.network_data["broadcast"]
63 | network = var.network_data["network"]
64 | iface = var.iface
65 | }
66 | }
67 |
68 |
69 | # Create the machine
70 | resource "libvirt_domain" "infra-machine" {
71 | name = var.hostname
72 | memory = var.memory
73 | vcpu = var.cpu
74 | machine = "q35"
75 |
76 | cpu {
77 | mode = "host-passthrough"
78 | }
79 |
80 | disk {
81 | volume_id = libvirt_volume.os_image.id
82 | }
83 | network_interface {
84 | network_name = var.libvirt_network
85 | }
86 |
87 | cloudinit = libvirt_cloudinit_disk.commoninit.id
88 |
89 | console {
90 | type = "pty"
91 | target_port = "0"
92 | target_type = "serial"
93 | }
94 |
95 | graphics {
96 | type = "vnc"
97 | listen_type = "address"
98 | autoport = "true"
99 | }
100 |
101 | xml {
102 | xslt = file("${path.module}/uefi-patch.xsl")
103 | }
104 | }
105 |
106 | terraform {
107 | required_version = ">= 1.0"
108 | required_providers {
109 | libvirt = {
110 | source = "dmacvicar/libvirt"
111 | version = "0.7.1"
112 | }
113 | }
114 | }
115 |
116 | output "ips" {
117 | value = "${flatten(libvirt_domain.infra-machine.*.network_interface.0.addresses)}"
118 | }
119 |
120 | output "macs" {
121 | value = "${flatten(libvirt_domain.infra-machine.*.network_interface.0.mac)}"
122 | }
123 |
--------------------------------------------------------------------------------
/terraform/loadbalancer/network_config.cfg:
--------------------------------------------------------------------------------
1 | #for virtio nic ethX
2 | #for en100X nic ensX
3 | network-interfaces: |
4 | auto ${iface}
5 | iface ${iface} inet static
6 | address ${hostIP}
7 | network ${network}
8 | netmask 255.255.255.0
9 | broadcast ${broadcast}
10 | gateway ${gateway}
11 | dns-nameservers ${dns}
12 | #bootcmd:
13 | # - ifdown eth0
14 | # - ifup eth0
15 |
--------------------------------------------------------------------------------
/terraform/loadbalancer/uefi-patch.xsl:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
--------------------------------------------------------------------------------
/terraform/masters/masters.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "hostname" { default = "master" }
3 | variable "memory" { default = 16 }
4 | variable "cpu" { default = 4 }
5 | variable "vm_count" { default = 3 }
6 | variable "vm_volume_size" { default = 40 }
7 | variable "vm_block_device" { default = false }
8 | variable "vm_block_device_size" { default = 100 }
9 | variable "vm_additional_nic" { default = false }
10 | variable "vm_additional_nic_network" { default = "default" }
11 | variable "libvirt_network" { default = "ocp" }
12 | variable "libvirt_pool" { default = "default" }
13 |
14 | provider "libvirt" {
15 | uri = "qemu:///system"
16 | }
17 |
18 | resource "libvirt_volume" "os_image" {
19 | count = var.vm_count
20 | name = "${var.hostname}-os_image-${count.index}"
21 | size = var.vm_volume_size*1073741824
22 | pool = var.libvirt_pool
23 | format = "qcow2"
24 | }
25 |
26 | resource "libvirt_volume" "storage_image" {
27 | count = tobool(lower(var.vm_block_device)) ? var.vm_count : 0
28 | name = "${var.hostname}-storage_image-${count.index}"
29 | pool = var.libvirt_pool
30 | size = var.vm_block_device_size*1073741824
31 | format = "qcow2"
32 | }
33 |
34 | # Create the machine
35 | resource "libvirt_domain" "master" {
36 | count = var.vm_count
37 | name = "${var.hostname}-${count.index}"
38 | memory = var.memory*1024
39 | vcpu = var.cpu
40 |
41 | cpu {
42 | mode = "host-passthrough"
43 | }
44 |
45 | disk {
46 | volume_id = libvirt_volume.os_image[count.index].id
47 | }
48 |
49 | dynamic "disk" {
50 | for_each = tobool(lower(var.vm_block_device)) ? { storage = true } : {}
51 | content {
52 | volume_id = libvirt_volume.storage_image[count.index].id
53 | }
54 | }
55 |
56 | network_interface {
57 | network_name = var.libvirt_network
58 | }
59 |
60 | dynamic "network_interface" {
61 | for_each = tobool(lower(var.vm_additional_nic)) ? { nic = true } : {}
62 | content {
63 | network_name = var.vm_additional_nic_network
64 | }
65 | }
66 |
67 | boot_device {
68 | dev = [ "hd", "network" ]
69 | }
70 |
71 | console {
72 | type = "pty"
73 | target_port = "0"
74 | target_type = "serial"
75 | }
76 |
77 | graphics {
78 | type = "vnc"
79 | listen_type = "address"
80 | autoport = "true"
81 | }
82 | }
83 |
84 | terraform {
85 | required_version = ">= 1.0"
86 | required_providers {
87 | libvirt = {
88 | source = "dmacvicar/libvirt"
89 | version = "0.7.1"
90 | }
91 | }
92 | }
93 |
94 | output "macs" {
95 | value = "${flatten(libvirt_domain.master.*.network_interface.0.mac)}"
96 | }
97 |
--------------------------------------------------------------------------------
/terraform/sno/master-sno.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "hostname" { default = "master-sno" }
3 | variable "memory" { default = 32 }
4 | variable "cpu" { default = 4 }
5 | variable "coreos_iso_path" { default = "" }
6 | variable "vm_volume_size" { default = 40 }
7 | variable "vm_net_ip" { default = "192.168.100.7" }
8 | variable "local_volume_size" { default = 50 }
9 | variable "local_volume_enabled" { default = false }
10 | variable "vm_additional_nic" { default = false }
11 | variable "vm_additional_nic_network" { default = "default" }
12 | variable "libvirt_network" { default = "ocp" }
13 | variable "libvirt_pool" { default = "default" }
14 |
15 | provider "libvirt" {
16 | uri = "qemu:///system"
17 | }
18 |
19 | resource "libvirt_volume" "os_image" {
20 | name = "${var.hostname}-os_image"
21 | size = var.vm_volume_size*1073741824
22 | pool = var.libvirt_pool
23 | format = "qcow2"
24 | }
25 |
26 | resource "libvirt_volume" "local_disk" {
27 | count = tobool(lower(var.local_volume_enabled)) ? 1 : 0
28 | name = "${var.hostname}-local_disk"
29 | pool = var.libvirt_pool
30 | size = var.local_volume_size*1073741824
31 | format = "qcow2"
32 | }
33 |
34 | # Create the machine
35 | resource "libvirt_domain" "master" {
36 | count = 1
37 | name = "${var.hostname}"
38 | memory = var.memory*1024
39 | vcpu = var.cpu
40 |
41 | cpu {
42 | mode = "host-passthrough"
43 | }
44 |
45 | disk {
46 | volume_id = libvirt_volume.os_image.id
47 | }
48 |
49 | dynamic "disk" {
50 | for_each = tobool(lower(var.local_volume_enabled)) ? { storage = true } : {}
51 | content {
52 | volume_id = libvirt_volume.local_disk[count.index].id
53 | }
54 | }
55 |
56 | disk {
57 | file = "${var.coreos_iso_path}"
58 | }
59 |
60 | network_interface {
61 | network_name = var.libvirt_network
62 | addresses = [ "${var.vm_net_ip}" ]
63 | }
64 |
65 | dynamic "network_interface" {
66 | for_each = tobool(lower(var.vm_additional_nic)) ? { nic = true } : {}
67 | content {
68 | network_name = var.vm_additional_nic_network
69 | }
70 | }
71 |
72 | boot_device {
73 | dev = [ "hd", "cdrom" ]
74 | }
75 |
76 | console {
77 | type = "pty"
78 | target_port = "0"
79 | target_type = "serial"
80 | }
81 |
82 | graphics {
83 | type = "vnc"
84 | listen_type = "address"
85 | autoport = "true"
86 | }
87 |
88 | }
89 |
90 | terraform {
91 | required_version = ">= 1.0"
92 | required_providers {
93 | libvirt = {
94 | source = "dmacvicar/libvirt"
95 | version = "0.7.1"
96 | }
97 | }
98 | }
99 |
100 | output "macs" {
101 | value = "${flatten(libvirt_domain.master.*.network_interface.0.mac)}"
102 | }
103 |
--------------------------------------------------------------------------------
/terraform/workers/workers.tf:
--------------------------------------------------------------------------------
1 | # variables that can be overriden
2 | variable "hostname" { default = "worker" }
3 | variable "memory" { default = 32 }
4 | variable "cpu" { default = 4 }
5 | variable "vm_count" { default = 3 }
6 | variable "libvirt_network" { default = "ocp4" }
7 | variable "libvirt_pool" { default = "ocp4" }
8 | variable "vm_volume_size" { default = 20 }
9 | variable "vm_block_device" { default = false }
10 | variable "vm_block_device_size" { default = 100 }
11 | variable "vm_additional_nic" { default = false }
12 | variable "vm_additional_nic_network" { default = "default" }
13 |
14 | # instance the provider
15 | provider "libvirt" {
16 | uri = "qemu:///system"
17 | }
18 |
19 | resource "libvirt_volume" "os_image" {
20 | count = var.vm_count
21 | name = "${var.hostname}-os_image-${count.index}"
22 | pool = var.libvirt_pool
23 | size = var.vm_volume_size*1073741824
24 | format = "qcow2"
25 | }
26 |
27 | resource "libvirt_volume" "storage_image" {
28 | count = tobool(lower(var.vm_block_device)) ? var.vm_count : 0
29 | name = "${var.hostname}-storage_image-${count.index}"
30 | pool = var.libvirt_pool
31 | size = var.vm_block_device_size*1073741824
32 | format = "qcow2"
33 | }
34 |
35 | # Create the machine
36 | resource "libvirt_domain" "worker" {
37 | count = var.vm_count
38 | name = "${var.hostname}-${count.index}"
39 | memory = var.memory*1024
40 | vcpu = var.cpu
41 |
42 | cpu {
43 | mode = "host-passthrough"
44 | }
45 |
46 | disk {
47 | volume_id = libvirt_volume.os_image[count.index].id
48 | }
49 |
50 | dynamic "disk" {
51 | for_each = tobool(lower(var.vm_block_device)) ? { storage = true } : {}
52 | content {
53 | volume_id = libvirt_volume.storage_image[count.index].id
54 | }
55 | }
56 |
57 | network_interface {
58 | network_name = var.libvirt_network
59 | }
60 |
61 | dynamic "network_interface" {
62 | for_each = tobool(lower(var.vm_additional_nic)) ? { nic = true } : {}
63 | content {
64 | network_name = var.vm_additional_nic_network
65 | }
66 | }
67 |
68 | boot_device {
69 | dev = [ "hd", "network" ]
70 | }
71 |
72 | console {
73 | type = "pty"
74 | target_port = "0"
75 | target_type = "serial"
76 | }
77 |
78 | graphics {
79 | type = "vnc"
80 | listen_type = "address"
81 | autoport = "true"
82 | }
83 | }
84 |
85 | terraform {
86 | required_version = ">= 1.0"
87 | required_providers {
88 | libvirt = {
89 | source = "dmacvicar/libvirt"
90 | version = "0.7.1"
91 | }
92 | }
93 | }
94 | output "macs" {
95 | value = "${flatten(libvirt_domain.worker.*.network_interface.0.mac)}"
96 | }
97 |
98 |
--------------------------------------------------------------------------------
/vars/cluster_vars.yml:
--------------------------------------------------------------------------------
1 | three_node: true
2 | domain: hetzner.lab
3 | network_cidr: 192.168.100.0/24
4 | cluster:
5 | name: ocp4
6 | version: stable
7 | ocp_user: admin
8 | ocp_pass: openshift
9 | pullSecret: ""
10 | additional_nic:
11 | enabled: false
12 | network:
13 | additional_block_device:
14 | enabled: false
15 | size: 100
16 | cluster_nodes:
17 | host_list:
18 | sno:
19 | ip: 192.168.100.7
20 | bootstrap:
21 | - ip: 192.168.100.6
22 | masters:
23 | - ip: 192.168.100.7
24 | - ip: 192.168.100.8
25 | - ip: 192.168.100.9
26 | workers:
27 | - ip: 192.168.100.10
28 | - ip: 192.168.100.11
29 | specs:
30 | bootstrap:
31 | vcpu: 4
32 | mem: 16
33 | disk: 60
34 | masters:
35 | vcpu: 4
36 | mem: 16
37 | disk: 60
38 | workers:
39 | vcpu: 2
40 | mem: 16
41 | disk: 60
42 |
--------------------------------------------------------------------------------
/vars/infra_vars.yml:
--------------------------------------------------------------------------------
1 | infra_nodes:
2 | host_list:
3 | bastion:
4 | - ip: 192.168.100.4
5 | loadbalancer:
6 | - ip: 192.168.100.5
7 |
8 | dhcp:
9 | timezone: "Europe/Rome"
10 | ntp: 204.11.201.10
11 |
--------------------------------------------------------------------------------
/vars/sno_vars.yml:
--------------------------------------------------------------------------------
1 | domain: hetzner.lab
2 | network_cidr: 192.168.100.0/24
3 | cluster:
4 | version: stable
5 | name: ocp4
6 | ocp_user: admin
7 | ocp_pass: openshift
8 | pullSecret: ""
9 | cluster_nodes:
10 | host_list:
11 | sno:
12 | ip: 192.168.100.7
13 | specs:
14 | sno:
15 | vcpu: 4
16 | mem: 32
17 | disk: 60
18 | local_storage:
19 | enabled: true
20 | volume_size: 50
21 | additional_nic:
22 | enabled: false
23 | network: ""
24 |
--------------------------------------------------------------------------------