├── terraform ├── ocp4-ai-cluster-net-pool │ ├── readme.txt │ └── ocp4.tf ├── pool-net │ └── poolnet.tf ├── ocp4-ai-cluster │ └── ocp4-lab.tf └── ai-bond │ └── ocp4-ai-bond.tf ├── scripts ├── wipe-cluster.sh ├── prepare-kvm-host.sh ├── full-deploy-ai-multinode.sh ├── full-deploy-ai-multinode-bond.sh └── full-deploy-ai-calico.sh ├── config ├── nmstate-bond-worker0.yaml ├── nmstate-bond-worker1.yaml ├── deployment-multinodes-calico.json ├── deployment-multinodes-3nodes.json └── dnsmasq.conf └── README.md /terraform/ocp4-ai-cluster-net-pool/readme.txt: -------------------------------------------------------------------------------- 1 | This terraform deploys: 2 | 3 | - images pool 4 | - ocp4-net network 5 | - Full OCP cluster VMs on ocp4-net 6 | -------------------------------------------------------------------------------- /scripts/wipe-cluster.sh: -------------------------------------------------------------------------------- 1 | CLUSTER_ID=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].id') 2 | 3 | echo Wiping cluster: $CLUSTER_ID 4 | 5 | curl -X DELETE "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" 6 | 7 | echo Wiping nodes 8 | 9 | terraform -chdir=/opt/terraform/ocp4-ai-cluster destroy -auto-approve 10 | 11 | rm -rf ~/.kube 12 | 13 | rm -f deployment-multinodes.json -------------------------------------------------------------------------------- /terraform/pool-net/poolnet.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | } 6 | } 7 | } 8 | # instance the provider 9 | provider "libvirt" { 10 | uri = "qemu:///system" 11 | } 12 | resource "libvirt_pool" "images" { 13 | name = "images" 14 | type = "dir" 15 | path = "/var/lib/libvirt/images" 16 | } 17 | resource "libvirt_network" "ocp_network" { 18 | name = "ocp4-net" 19 | mode = "nat" 20 | autostart = true 21 | domain = "lab.local" 22 | addresses = ["192.167.124.0/24"] 23 | bridge = "virbr-ocp4" 24 | dhcp { 25 | enabled = false 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /config/nmstate-bond-worker0.yaml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | - name: bond0 3 | description: Bond 4 | type: bond 5 | state: up 6 | ipv4: 7 | enabled: true 8 | dhcp: true 9 | auto-dns: true 10 | auto-gateway: true 11 | auto-routes: true 12 | link-aggregation: 13 | mode: balance-rr 14 | options: 15 | miimon: '140' 16 | port: 17 | - ens3 18 | - ens4 19 | - name: ens3 20 | state: up 21 | type: ethernet 22 | - name: ens4 23 | state: up 24 | type: ethernet 25 | - name: ens5 26 | state: up 27 | type: ethernet 28 | ipv4: 29 | address: 30 | - ip: 10.17.3.3 31 | prefix-length: 24 32 | enabled: true -------------------------------------------------------------------------------- /config/nmstate-bond-worker1.yaml: -------------------------------------------------------------------------------- 1 | interfaces: 2 | - name: bond0 3 | description: Bond 4 | type: bond 5 | state: up 6 | ipv4: 7 | enabled: true 8 | dhcp: true 9 | auto-dns: true 10 | auto-gateway: true 11 | auto-routes: true 12 | link-aggregation: 13 | mode: balance-rr 14 | options: 15 | miimon: '140' 16 | port: 17 | - ens3 18 | - ens4 19 | - name: ens3 20 | state: up 21 | type: ethernet 22 | - name: ens4 23 | state: up 24 | type: ethernet 25 | - name: ens5 26 | state: up 27 | type: ethernet 28 | ipv4: 29 | address: 30 | - ip: 10.17.3.4 31 | prefix-length: 24 32 | enabled: true 33 | -------------------------------------------------------------------------------- /config/deployment-multinodes-calico.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Cluster", 3 | "name": "ocpd", 4 | "openshift_version": "4.8", 5 | "base_dns_domain": "lab.local", 6 | "hyperthreading": "all", 7 | "ingress_vip": "192.167.124.8", 8 | "schedulable_masters": false, 9 | "platform": { 10 | "type": "baremetal" 11 | }, 12 | "user_managed_networking": false, 13 | "cluster_networks": [ 14 | { 15 | "cidr": "10.128.0.0/14", 16 | "host_prefix": 23 17 | } 18 | ], 19 | "service_networks": [ 20 | { 21 | "cidr": "172.31.0.0/16" 22 | } 23 | ], 24 | "machine_networks": [ 25 | { 26 | "cidr": "192.167.124.0/24" 27 | } 28 | ], 29 | "additional_ntp_source": "ntp1.hetzner.de", 30 | "vip_dhcp_allocation": false, 31 | "high_availability_mode": "Full", 32 | "hosts": [], 33 | "ssh_public_key": "Paste ssh pu key here", 34 | "pull_secret": "{PASTE pullsecret here}" 35 | } 36 | 37 | -------------------------------------------------------------------------------- /config/deployment-multinodes-3nodes.json: -------------------------------------------------------------------------------- 1 | { 2 | "kind": "Cluster", 3 | "name": "ocpd", 4 | "openshift_version": "4.8", 5 | "base_dns_domain": "lab.local", 6 | "hyperthreading": "all", 7 | "ingress_vip": "192.167.124.8", 8 | "schedulable_masters": true, 9 | "platform": { 10 | "type": "baremetal" 11 | }, 12 | "user_managed_networking": false, 13 | "cluster_networks": [ 14 | { 15 | "cidr": "10.128.0.0/14", 16 | "host_prefix": 23 17 | } 18 | ], 19 | "service_networks": [ 20 | { 21 | "cidr": "172.31.0.0/16" 22 | } 23 | ], 24 | "machine_networks": [ 25 | { 26 | "cidr": "192.167.124.0/24" 27 | } 28 | ], 29 | "network_type": "OVNKubernetes", 30 | "additional_ntp_source": "ntp1.hetzner.de", 31 | "vip_dhcp_allocation": false, 32 | "high_availability_mode": "Full", 33 | "hosts": [], 34 | "ssh_public_key": "Paste ssh pu key here", 35 | "pull_secret": "{PASTE pullsecret here}" 36 | } 37 | 38 | -------------------------------------------------------------------------------- /config/dnsmasq.conf: -------------------------------------------------------------------------------- 1 | domain-needed 2 | dhcp-authoritative 3 | bind-dynamic 4 | bogus-priv 5 | domain=ocpd.lab.local 6 | 7 | dhcp-range=192.167.124.114,192.167.124.114 8 | dhcp-option=3,192.167.124.1 9 | interface=eth0 10 | server=8.8.8.8 11 | 12 | #Wildcard for apps -- make changes to cluster-name (openshift) and domain (example.com) 13 | host-record=api.ocpd.lab.local,192.167.124.7 14 | address=/.apps.ocpd.lab.local/192.167.124.8 15 | 16 | #Static IPs for Masters 17 | dhcp-host=aa:bb:cc:11:42:10,ocp4-master1.ocpd.lab.local,192.167.124.10 18 | dhcp-host=aa:bb:cc:11:42:11,ocp4-master2.ocpd.lab.local,192.167.124.11 19 | dhcp-host=aa:bb:cc:11:42:12,ocp4-master3.ocpd.lab.local,192.167.124.12 20 | dhcp-host=aa:bb:cc:11:42:20,ocp4-worker1.ocpd.lab.local,192.167.124.13 21 | dhcp-host=aa:bb:cc:11:42:21,ocp4-worker2.ocpd.lab.local,192.167.124.14 22 | dhcp-host=aa:bb:cc:11:42:22,ocp4-worker3.ocpd.lab.local,192.167.124.15 23 | dhcp-host=aa:bb:cc:11:42:30,ocp4-worker1-ht.ocpd.lab.local,192.167.124.16 24 | -------------------------------------------------------------------------------- /scripts/prepare-kvm-host.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | TERRAVERSION=1.0.7 ####change me if needed 3 | dnf -y install https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm 4 | dnf install -y libvirt libvirt-devel qemu-kvm mkisofs python3-devel jq ipmitool git make bash-completion \ 5 | net-tools wget syslinux libvirt-libs tmux \ 6 | tar unzip go ipmitool virt-install libguestfs libguestfs-tools libguestfs-xfs net-tools virt-what nmap 7 | dnf group install "Development Tools" -y 8 | systemctl enable --now libvirtd 9 | wget https://releases.hashicorp.com/terraform/${TERRAVERSION}/terraform_${TERRAVERSION}_linux_amd64.zip 10 | unzip terraform_${TERRAVERSION}_linux_amd64.zip 11 | mv terraform /usr/local/sbin/ 12 | rm -f *zip 13 | 14 | sed -i 's/^# StrictHostKeyChecking ask/StrictHostKeyChecking no/g' /etc/ssh/ssh_config 15 | echo "UserKnownHostsFile=/dev/null" | sudo tee -a /etc/ssh/ssh_config > /dev/null 16 | sed -i 's/#UseDNS yes/UseDNS no/g' /etc/ssh/sshd_config 17 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux 18 | setenforce 0 19 | ###create kvm pool and network### 20 | 21 | terraform -chdir=/opt/terraform/pool-net init 22 | terraform -chdir=/opt/terraform/pool-net apply -auto-approve 23 | -------------------------------------------------------------------------------- /scripts/full-deploy-ai-multinode.sh: -------------------------------------------------------------------------------- 1 | export AI_URL='http://192.167.124.1:8090' 2 | export CLUSTER_SSHKEY=$(cat ~/.ssh/id_ed25519.pub) 3 | export PULL_SECRET=$(cat pull-secret.txt | jq -R .) 4 | 5 | cat << EOF > ./deployment-multinodes.json 6 | { 7 | "kind": "Cluster", 8 | "name": "ocpd", 9 | "openshift_version": "4.8", 10 | "base_dns_domain": "lab.local", 11 | "hyperthreading": "all", 12 | "ingress_vip": "192.167.124.8", 13 | "schedulable_masters": false, 14 | "platform": { 15 | "type": "baremetal" 16 | }, 17 | "user_managed_networking": false, 18 | "cluster_networks": [ 19 | { 20 | "cidr": "10.128.0.0/14", 21 | "host_prefix": 23 22 | } 23 | ], 24 | "service_networks": [ 25 | { 26 | "cidr": "172.31.0.0/16" 27 | } 28 | ], 29 | "machine_networks": [ 30 | { 31 | "cidr": "192.167.124.0/24" 32 | } 33 | ], 34 | "network_type": "OVNKubernetes", 35 | "additional_ntp_source": "ntp1.hetzner.de", 36 | "vip_dhcp_allocation": false, 37 | "high_availability_mode": "Full", 38 | "hosts": [], 39 | "ssh_public_key": "${CLUSTER_SSHKEY}", 40 | "pull_secret": ${PULL_SECRET} 41 | } 42 | EOF 43 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters" \ 44 | -d @./deployment-multinodes.json \ 45 | --header "Content-Type: application/json" \ 46 | | jq . 47 | 48 | 49 | CLUSTER_ID=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].id') 50 | 51 | echo $CLUSTER_ID 52 | 53 | echo Build ISO 54 | cat << EOF > ./discovery-iso-params.json 55 | { 56 | "ssh_public_key": "$CLUSTER_SSHKEY", 57 | "pull_secret": $PULL_SECRET, 58 | "image_type": "full-iso" 59 | } 60 | EOF 61 | 62 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" \ 63 | -d @discovery-iso-params.json \ 64 | --header "Content-Type: application/json" \ 65 | | jq '.' 66 | 67 | echo download ISO 68 | 69 | curl -L "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" -o /var/lib/libvirt/images/discovery_image_ocpd.iso 70 | 71 | 72 | 73 | echo Create and start Masters 74 | 75 | terraform -chdir=/opt/terraform/ocp4-ai-cluster init 76 | terraform -chdir=/opt/terraform/ocp4-ai-cluster/ apply -auto-approve 77 | 78 | 79 | 80 | echo Done!!! 81 | 82 | echo Wait for discovery process to happen 83 | 84 | Sleep 180 85 | 86 | echo Assign Master role to discovered nodes 87 | 88 | for i in `curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 89 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].id'| awk 'NR>0' |awk '{print $1;}'` 90 | do curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"hosts_roles\": [ { \"id\": \"$i\", \"role\": \"master\" } ]}" 91 | done 92 | 93 | 94 | echo set api IP 95 | 96 | curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"api_vip\": \"192.167.124.7\"}" 97 | 98 | echo Start workers 99 | for i in {1..3} 100 | do virsh start ocp4-worker$i 101 | done 102 | 103 | sleep 180 104 | 105 | echo Start instalation 106 | 107 | curl -X POST \ 108 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/actions/install" \ 109 | -H "accept: application/json" \ 110 | -H "Content-Type: application/json" 111 | 112 | STATUS=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].progress.total_percentage') 113 | 114 | echo Wait for install to complete 115 | 116 | while [[ $STATUS != 100 ]] 117 | do 118 | sleep 5 119 | STATUS=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].progress.total_percentage') 120 | done 121 | 122 | echo 123 | mkdir ~/.kube 124 | curl -X GET "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/kubeconfig" -H "accept: application/octet-stream" > .kube/config 125 | 126 | 127 | -------------------------------------------------------------------------------- /scripts/full-deploy-ai-multinode-bond.sh: -------------------------------------------------------------------------------- 1 | export AI_URL='http://192.167.124.1:8090' 2 | export CLUSTER_SSHKEY=$(cat ~/.ssh/id_ed25519.pub) 3 | export PULL_SECRET=$(cat pull-secret.txt | jq -R .) 4 | 5 | #####Create Cluster definition data file ###### 6 | 7 | cat << EOF > ./deployment-multinodes.json 8 | { 9 | "kind": "Cluster", 10 | "name": "ocpd", 11 | "openshift_version": "4.9", 12 | "base_dns_domain": "lab.local", 13 | "hyperthreading": "all", 14 | "ingress_vip": "192.167.124.8", 15 | "schedulable_masters": false, 16 | "platform": { 17 | "type": "baremetal" 18 | }, 19 | "user_managed_networking": false, 20 | "cluster_networks": [ 21 | { 22 | "cidr": "10.128.0.0/14", 23 | "host_prefix": 23 24 | } 25 | ], 26 | "service_networks": [ 27 | { 28 | "cidr": "172.31.0.0/16" 29 | } 30 | ], 31 | "machine_networks": [ 32 | { 33 | "cidr": "192.167.124.0/24" 34 | } 35 | ], 36 | "network_type": "OVNKubernetes", 37 | "additional_ntp_source": "ntp1.hetzner.de", 38 | "vip_dhcp_allocation": false, 39 | "high_availability_mode": "Full", 40 | "hosts": [], 41 | "ssh_public_key": "$CLUSTER_SSHKEY", 42 | "pull_secret": "$PULL_SECRET" 43 | } 44 | EOF 45 | 46 | 47 | #####Create cluster definition 48 | 49 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters" \ 50 | -d @./deployment-multinodes.json \ 51 | --header "Content-Type: application/json" \ 52 | | jq . 53 | 54 | 55 | CLUSTER_ID=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].id') 56 | 57 | echo $CLUSTER_ID 58 | 59 | 60 | 61 | #########create definition file for bond#### 62 | jq -n --arg NMSTATE_YAML1 "$(cat ~/bond/nmstate-bond-worker0.yaml)" --arg NMSTATE_YAML2 "$(cat ~/bond/nmstate-bond-worker1.yaml)" '{ 63 | "ssh_public_key": "$CLUSTER_SSHKEY", 64 | "image_type": "full-iso", 65 | "static_network_config": [ 66 | { 67 | "network_yaml": $NMSTATE_YAML1, 68 | "mac_interface_map": [{"mac_address": "aa:bb:cc:11:42:20", "logical_nic_name": "ens3"}, {"mac_address": "aa:bb:cc:11:42:50", "logical_nic_name": "ens4"},{"mac_address": "aa:bb:cc:11:42:60", "logical_nic_name": "ens5"}] 69 | }, 70 | { 71 | "network_yaml": $NMSTATE_YAML2, 72 | "mac_interface_map": [{"mac_address": "aa:bb:cc:11:42:21", "logical_nic_name": "ens3"}, {"mac_address": "aa:bb:cc:11:42:51", "logical_nic_name": "ens4"},{"mac_address": "aa:bb:cc:11:42:61", "logical_nic_name": "ens5"}] 73 | } 74 | ] 75 | }' > data-net 76 | 77 | 78 | #####Create image#### 79 | curl -H "Content-Type: application/json" -X POST -d @data-net ${AI_URL}/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image | jq . 80 | 81 | 82 | #####Download image##### 83 | 84 | curl -L "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" -o /var/lib/libvirt/images/discovery_image_ocpd.iso 85 | 86 | ####start masters## 87 | 88 | terraform -chdir=/opt/terraform/ai-bond apply -auto-approve 89 | 90 | 91 | 92 | #### 93 | 94 | for i in `curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 95 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].id'| awk 'NR>0' |awk '{print $1;}'` 96 | do curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"hosts_roles\": [ { \"id\": \"$i\", \"role\": \"master\" } ]}" 97 | done 98 | 99 | 100 | ###set api IP### 101 | 102 | curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"api_vip\": \"192.167.124.7\"}" 103 | 104 | ###Start workers#### 105 | for i in {0..1} 106 | do virsh start ocp4-worker$i 107 | done 108 | 109 | sleep 180 110 | 111 | curl -X POST \ 112 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/actions/install" \ 113 | -H "accept: application/json" \ 114 | -H "Content-Type: application/json" 115 | 116 | echo Wait for install to complete 117 | 118 | while [[ $STATUS != 100 ]] 119 | do 120 | sleep 5 121 | STATUS=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].progress.total_percentage') 122 | done 123 | 124 | echo 125 | mkdir ~/.kube 126 | curl -X GET "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/kubeconfig" -H "accept: application/octet-stream" > .kube/config 127 | -------------------------------------------------------------------------------- /terraform/ocp4-ai-cluster/ocp4-lab.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | } 6 | } 7 | } 8 | provider "libvirt" { 9 | uri = "qemu:///system" 10 | } 11 | # resource "libvirt_network" "ocp_network" { 12 | # name = "ocp4-net" 13 | # mode = "nat" 14 | # autostart = true 15 | # domain = "lab.local" 16 | # addresses = ["192.167.124/24"] 17 | # bridge = "virbr-ocp4" 18 | # dhcp { 19 | # enabled = false 20 | # } 21 | # } 22 | 23 | variable "worker" { 24 | type = list(string) 25 | default = ["ocp4-worker1", "ocp4-worker2","ocp4-worker3"] 26 | } 27 | variable "master" { 28 | type = list(string) 29 | default = ["ocp4-master1", "ocp4-master2","ocp4-master3"] 30 | } 31 | 32 | variable "worker-ht" { 33 | type = list(string) 34 | default = ["ocp4-worker1-ht"] 35 | } 36 | ####workers 37 | resource "libvirt_volume" "fatdisk-workers" { 38 | name = "fatdisk-${element(var.worker, count.index)}" 39 | pool = "images" 40 | size = 130000000000 41 | count = "${length(var.worker)}" 42 | } 43 | resource "libvirt_volume" "volume-mon-workers" { 44 | name = "volume-mon-${element(var.worker, count.index)}" 45 | pool = "images" 46 | size = "30000000000" 47 | format = "qcow2" 48 | count = "${length(var.worker)}" 49 | } 50 | resource "libvirt_volume" "volume-osd1-workers" { 51 | name = "volume-osd1-${element(var.worker, count.index)}" 52 | pool = "images" 53 | size = "30000000000" 54 | format = "qcow2" 55 | count = "${length(var.worker)}" 56 | } 57 | resource "libvirt_volume" "volume-osd2-workers" { 58 | name = "volume-osd2-${element(var.worker, count.index)}" 59 | pool = "images" 60 | size = "30000000000" 61 | format = "qcow2" 62 | count = "${length(var.worker)}" 63 | } 64 | resource "libvirt_domain" "workers" { 65 | name = "${element(var.worker, count.index)}" 66 | memory = "32000" 67 | vcpu = 8 68 | cpu { 69 | mode = "host-passthrough" 70 | } 71 | running = false 72 | boot_device { 73 | dev = ["hd","cdrom"] 74 | } 75 | network_interface { 76 | network_name = "ocp4-net" 77 | mac = "AA:BB:CC:11:42:2${count.index}" 78 | } 79 | console { 80 | type = "pty" 81 | target_port = "0" 82 | target_type = "serial" 83 | } 84 | 85 | console { 86 | type = "pty" 87 | target_type = "virtio" 88 | target_port = "1" 89 | } 90 | 91 | disk { 92 | volume_id = "${element(libvirt_volume.fatdisk-workers.*.id, count.index)}" 93 | } 94 | disk { 95 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 96 | } 97 | disk { 98 | volume_id = "${element(libvirt_volume.volume-mon-workers.*.id, count.index)}" 99 | } 100 | disk { 101 | volume_id = "${element(libvirt_volume.volume-osd1-workers.*.id, count.index)}" 102 | } 103 | disk { 104 | volume_id = "${element(libvirt_volume.volume-osd2-workers.*.id, count.index)}" 105 | } 106 | graphics { 107 | type = "spice" 108 | listen_type = "address" 109 | autoport = true 110 | } 111 | count = "${length(var.worker)}" 112 | # depends_on = [ 113 | # libvirt_network.ocp_network, 114 | # ] 115 | } 116 | ####workers-ht### 117 | resource "libvirt_volume" "fatdisk-worker-ht" { 118 | # name = "fatdisk-${element(var.worker, count.index)}" 119 | name = "fatdisk-${element(var.worker-ht, count.index)}" 120 | pool = "images" 121 | size = 130000000000 122 | count = "${length(var.worker-ht)}" 123 | } 124 | resource "libvirt_domain" "worker-ht" { 125 | name = "${element(var.worker-ht, count.index)}" 126 | memory = "32000" 127 | vcpu = 8 128 | cpu { 129 | mode = "host-passthrough" 130 | } 131 | running = false 132 | boot_device { 133 | dev = ["hd","cdrom"] 134 | } 135 | network_interface { 136 | network_name = "ocp4-net" 137 | mac = "AA:BB:CC:11:42:3${count.index}" 138 | } 139 | console { 140 | type = "pty" 141 | target_port = "0" 142 | target_type = "serial" 143 | } 144 | 145 | console { 146 | type = "pty" 147 | target_type = "virtio" 148 | target_port = "1" 149 | } 150 | 151 | disk { 152 | volume_id = "${element(libvirt_volume.fatdisk-worker-ht.*.id, count.index)}" 153 | } 154 | disk { 155 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 156 | } 157 | graphics { 158 | type = "spice" 159 | listen_type = "address" 160 | autoport = true 161 | } 162 | count = "${length(var.worker-ht)}" 163 | # depends_on = [ 164 | # libvirt_network.ocp_network, 165 | # ] 166 | } 167 | 168 | ####masters 169 | resource "libvirt_volume" "fatdisk-masters" { 170 | # name = "fatdisk-${element(var.master, count.index)}" 171 | name = "fatdisk-${element(var.master, count.index)}" 172 | pool = "images" 173 | size = 130000000000 174 | count = "${length(var.master)}" 175 | } 176 | 177 | 178 | resource "libvirt_domain" "masters" { 179 | name = "${element(var.master, count.index)}" 180 | memory = "32000" 181 | vcpu = 12 182 | cpu { 183 | mode = "host-passthrough" 184 | } 185 | running = true 186 | boot_device { 187 | dev = ["hd","cdrom"] 188 | } 189 | network_interface { 190 | network_name = "ocp4-net" 191 | mac = "AA:BB:CC:11:42:1${count.index}" 192 | } 193 | console { 194 | type = "pty" 195 | target_port = "0" 196 | target_type = "serial" 197 | } 198 | 199 | console { 200 | type = "pty" 201 | target_type = "virtio" 202 | target_port = "1" 203 | } 204 | 205 | disk { 206 | volume_id = "${element(libvirt_volume.fatdisk-masters.*.id, count.index)}" 207 | } 208 | disk { 209 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 210 | } 211 | graphics { 212 | type = "spice" 213 | listen_type = "address" 214 | autoport = true 215 | } 216 | count = "${length(var.master)}" 217 | # depends_on = [ 218 | # libvirt_network.ocp_network, 219 | # ] 220 | } 221 | -------------------------------------------------------------------------------- /terraform/ocp4-ai-cluster-net-pool/ocp4.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | } 6 | } 7 | } 8 | provider "libvirt" { 9 | uri = "qemu:///system" 10 | } 11 | resource "libvirt_pool" "images" { 12 | name = "images" 13 | type = "dir" 14 | path = "/var/lib/libvirt/images" 15 | } 16 | resource "libvirt_network" "ocp_network" { 17 | name = "ocp4-net" 18 | mode = "nat" 19 | autostart = true 20 | domain = "lab.local" 21 | addresses = ["192.167.124/24"] 22 | bridge = "virbr-ocp4" 23 | dhcp { 24 | enabled = false 25 | } 26 | depends_on = [ 27 | libvirt_pool.images, 28 | ] 29 | } 30 | 31 | variable "worker" { 32 | type = list(string) 33 | default = ["ocp4-worker1", "ocp4-worker2","ocp4-worker3"] 34 | } 35 | variable "master" { 36 | type = list(string) 37 | default = ["ocp4-master1", "ocp4-master2","ocp4-master3"] 38 | } 39 | 40 | variable "worker-ht" { 41 | type = list(string) 42 | default = ["ocp4-worker1-ht"] 43 | } 44 | ####workers 45 | resource "libvirt_volume" "fatdisk-workers" { 46 | name = "fatdisk-${element(var.worker, count.index)}" 47 | pool = "images" 48 | size = 130000000000 49 | count = "${length(var.worker)}" 50 | } 51 | resource "libvirt_volume" "volume-mon-workers" { 52 | name = "volume-mon-${element(var.worker, count.index)}" 53 | pool = "images" 54 | size = "30000000000" 55 | format = "qcow2" 56 | count = "${length(var.worker)}" 57 | } 58 | resource "libvirt_volume" "volume-osd1-workers" { 59 | name = "volume-osd1-${element(var.worker, count.index)}" 60 | pool = "images" 61 | size = "30000000000" 62 | format = "qcow2" 63 | count = "${length(var.worker)}" 64 | } 65 | resource "libvirt_volume" "volume-osd2-workers" { 66 | name = "volume-osd2-${element(var.worker, count.index)}" 67 | pool = "images" 68 | size = "30000000000" 69 | format = "qcow2" 70 | count = "${length(var.worker)}" 71 | } 72 | resource "libvirt_domain" "workers" { 73 | name = "${element(var.worker, count.index)}" 74 | memory = "32000" 75 | vcpu = 8 76 | cpu { 77 | mode = "host-passthrough" 78 | } 79 | running = false 80 | boot_device { 81 | dev = ["hd","cdrom"] 82 | } 83 | network_interface { 84 | network_name = "ocp4-net" 85 | mac = "AA:BB:CC:11:42:2${count.index}" 86 | } 87 | console { 88 | type = "pty" 89 | target_port = "0" 90 | target_type = "serial" 91 | } 92 | 93 | console { 94 | type = "pty" 95 | target_type = "virtio" 96 | target_port = "1" 97 | } 98 | 99 | disk { 100 | volume_id = "${element(libvirt_volume.fatdisk-workers.*.id, count.index)}" 101 | } 102 | disk { 103 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 104 | } 105 | disk { 106 | volume_id = "${element(libvirt_volume.volume-mon-workers.*.id, count.index)}" 107 | } 108 | disk { 109 | volume_id = "${element(libvirt_volume.volume-osd1-workers.*.id, count.index)}" 110 | } 111 | disk { 112 | volume_id = "${element(libvirt_volume.volume-osd2-workers.*.id, count.index)}" 113 | } 114 | graphics { 115 | type = "spice" 116 | listen_type = "address" 117 | autoport = true 118 | } 119 | count = "${length(var.worker)}" 120 | depends_on = [ 121 | libvirt_network.ocp_network, 122 | ] 123 | } 124 | ####workers-ht### 125 | resource "libvirt_volume" "fatdisk-worker-ht" { 126 | # name = "fatdisk-${element(var.worker, count.index)}" 127 | name = "fatdisk-${element(var.worker-ht, count.index)}" 128 | pool = "images" 129 | size = 130000000000 130 | count = "${length(var.worker-ht)}" 131 | } 132 | resource "libvirt_domain" "worker-ht" { 133 | name = "${element(var.worker-ht, count.index)}" 134 | memory = "32000" 135 | vcpu = 8 136 | cpu { 137 | mode = "host-passthrough" 138 | } 139 | running = false 140 | boot_device { 141 | dev = ["hd","cdrom"] 142 | } 143 | network_interface { 144 | network_name = "ocp4-net" 145 | mac = "AA:BB:CC:11:42:3${count.index}" 146 | } 147 | console { 148 | type = "pty" 149 | target_port = "0" 150 | target_type = "serial" 151 | } 152 | 153 | console { 154 | type = "pty" 155 | target_type = "virtio" 156 | target_port = "1" 157 | } 158 | 159 | disk { 160 | volume_id = "${element(libvirt_volume.fatdisk-worker-ht.*.id, count.index)}" 161 | } 162 | disk { 163 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 164 | } 165 | graphics { 166 | type = "spice" 167 | listen_type = "address" 168 | autoport = true 169 | } 170 | count = "${length(var.worker-ht)}" 171 | depends_on = [ 172 | libvirt_network.ocp_network, 173 | ] 174 | } 175 | 176 | ####masters 177 | resource "libvirt_volume" "fatdisk-masters" { 178 | # name = "fatdisk-${element(var.master, count.index)}" 179 | name = "fatdisk-${element(var.master, count.index)}" 180 | pool = "images" 181 | size = 130000000000 182 | count = "${length(var.master)}" 183 | } 184 | 185 | 186 | resource "libvirt_domain" "masters" { 187 | name = "${element(var.master, count.index)}" 188 | memory = "32000" 189 | vcpu = 12 190 | cpu { 191 | mode = "host-passthrough" 192 | } 193 | running = true 194 | boot_device { 195 | dev = ["hd","cdrom"] 196 | } 197 | network_interface { 198 | network_name = "ocp4-net" 199 | mac = "AA:BB:CC:11:42:1${count.index}" 200 | } 201 | console { 202 | type = "pty" 203 | target_port = "0" 204 | target_type = "serial" 205 | } 206 | 207 | console { 208 | type = "pty" 209 | target_type = "virtio" 210 | target_port = "1" 211 | } 212 | 213 | disk { 214 | volume_id = "${element(libvirt_volume.fatdisk-masters.*.id, count.index)}" 215 | } 216 | disk { 217 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 218 | } 219 | graphics { 220 | type = "spice" 221 | listen_type = "address" 222 | autoport = true 223 | } 224 | count = "${length(var.master)}" 225 | depends_on = [ 226 | libvirt_network.ocp_network, 227 | ] 228 | } 229 | -------------------------------------------------------------------------------- /terraform/ai-bond/ocp4-ai-bond.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | } 6 | } 7 | } 8 | # instance the provider 9 | provider "libvirt" { 10 | # uri = "qemu+ssh://root@kvm-ovh/system" 11 | uri = "qemu:///system" 12 | } 13 | resource "libvirt_network" "kube_network" { 14 | name = "ocp4ai-net" 15 | mode = "nat" 16 | autostart = true 17 | domain = "lab.local" 18 | addresses = ["10.17.3.0/24"] 19 | bridge = "br7" 20 | dhcp { 21 | enabled = false 22 | } 23 | dns { 24 | enabled = true 25 | local_only = false 26 | forwarders { 27 | address = "192.167.124.5" 28 | } 29 | 30 | } 31 | } 32 | resource "libvirt_network" "kube_network" { 33 | name = "ocp4-net" 34 | mode = "nat" 35 | autostart = true 36 | domain = "lab.local" 37 | addresses = ["192.167.124.0/24"] 38 | bridge = "br-bond" 39 | dhcp { 40 | enabled = false 41 | } 42 | dns { 43 | enabled = true 44 | local_only = false 45 | forwarders { 46 | address = "192.167.124.5" 47 | } 48 | 49 | } 50 | } 51 | 52 | variable "worker" { 53 | type = list(string) 54 | default = ["ocp4-worker0", "ocp4-worker1", "ocp4-worker2"] 55 | } 56 | variable "master" { 57 | type = list(string) 58 | default = ["ocp4-master1", "ocp4-master2","ocp4-master3"] 59 | } 60 | 61 | variable "worker-ht" { 62 | type = list(string) 63 | default = ["ocp4-worker1-ht"] 64 | } 65 | ####workers 66 | resource "libvirt_volume" "fatdisk-workers" { 67 | # name = "fatdisk-${element(var.worker, count.index)}" 68 | name = "fatdisk-${element(var.worker, count.index)}" 69 | pool = "images" 70 | size = 130000000000 71 | count = "${length(var.worker)}" 72 | } 73 | resource "libvirt_volume" "volume-mon-workers" { 74 | name = "volume-mon-${element(var.worker, count.index)}" 75 | pool = "images" 76 | size = "30000000000" 77 | format = "qcow2" 78 | count = "${length(var.worker)}" 79 | } 80 | resource "libvirt_volume" "volume-osd1-workers" { 81 | name = "volume-osd1-${element(var.worker, count.index)}" 82 | pool = "images" 83 | size = "30000000000" 84 | format = "qcow2" 85 | count = "${length(var.worker)}" 86 | } 87 | resource "libvirt_volume" "volume-osd2-workers" { 88 | name = "volume-osd2-${element(var.worker, count.index)}" 89 | pool = "images" 90 | size = "30000000000" 91 | format = "qcow2" 92 | count = "${length(var.worker)}" 93 | } 94 | resource "libvirt_domain" "workers" { 95 | name = "${element(var.worker, count.index)}" 96 | memory = "32000" 97 | vcpu = 8 98 | cpu { 99 | mode = "host-passthrough" 100 | } 101 | running = false 102 | boot_device { 103 | dev = ["hd","cdrom"] 104 | } 105 | network_interface { 106 | network_name = "ocp4-net" 107 | mac = "AA:BB:CC:11:42:2${count.index}" 108 | } 109 | network_interface { 110 | network_name = "ocp4-net" 111 | mac = "AA:BB:CC:11:42:5${count.index}" 112 | } 113 | network_interface { 114 | network_name = "ocp4ai-net" 115 | mac = "AA:BB:CC:11:42:6${count.index}" 116 | } 117 | 118 | console { 119 | type = "pty" 120 | target_port = "0" 121 | target_type = "serial" 122 | } 123 | 124 | console { 125 | type = "pty" 126 | target_type = "virtio" 127 | target_port = "1" 128 | } 129 | 130 | disk { 131 | volume_id = "${element(libvirt_volume.fatdisk-workers.*.id, count.index)}" 132 | } 133 | disk { 134 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 135 | } 136 | disk { 137 | volume_id = "${element(libvirt_volume.volume-mon-workers.*.id, count.index)}" 138 | } 139 | disk { 140 | volume_id = "${element(libvirt_volume.volume-osd1-workers.*.id, count.index)}" 141 | } 142 | disk { 143 | volume_id = "${element(libvirt_volume.volume-osd2-workers.*.id, count.index)}" 144 | } 145 | graphics { 146 | type = "spice" 147 | listen_type = "address" 148 | autoport = true 149 | } 150 | count = "${length(var.worker)}" 151 | depends_on = [ 152 | libvirt_network.kube_network, 153 | ] 154 | } 155 | ####workers-ht### 156 | resource "libvirt_volume" "fatdisk-worker-ht" { 157 | # name = "fatdisk-${element(var.worker, count.index)}" 158 | name = "fatdisk-${element(var.worker-ht, count.index)}" 159 | pool = "images" 160 | size = 130000000000 161 | count = "${length(var.worker-ht)}" 162 | } 163 | resource "libvirt_domain" "worker-ht" { 164 | name = "${element(var.worker-ht, count.index)}" 165 | memory = "32000" 166 | vcpu = 8 167 | cpu { 168 | mode = "host-passthrough" 169 | } 170 | running = false 171 | boot_device { 172 | dev = ["hd","cdrom"] 173 | } 174 | network_interface { 175 | network_name = "ocp4-net" 176 | mac = "AA:BB:CC:11:42:3${count.index}" 177 | } 178 | network_interface { 179 | network_name = "ocp4-net" 180 | mac = "AA:BB:CC:11:42:A${count.index}" 181 | } 182 | network_interface { 183 | network_name = "ocp4ai-net" 184 | mac = "AA:BB:CC:11:42:B${count.index}" 185 | } 186 | 187 | console { 188 | type = "pty" 189 | target_port = "0" 190 | target_type = "serial" 191 | } 192 | 193 | console { 194 | type = "pty" 195 | target_type = "virtio" 196 | target_port = "1" 197 | } 198 | 199 | disk { 200 | volume_id = "${element(libvirt_volume.fatdisk-worker-ht.*.id, count.index)}" 201 | } 202 | disk { 203 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 204 | } 205 | graphics { 206 | type = "spice" 207 | listen_type = "address" 208 | autoport = true 209 | } 210 | count = "${length(var.worker-ht)}" 211 | depends_on = [ 212 | libvirt_network.kube_network, 213 | ] 214 | } 215 | 216 | ####masters 217 | resource "libvirt_volume" "fatdisk-masters" { 218 | # name = "fatdisk-${element(var.master, count.index)}" 219 | name = "fatdisk-${element(var.master, count.index)}" 220 | pool = "images" 221 | size = 130000000000 222 | count = "${length(var.master)}" 223 | } 224 | 225 | 226 | resource "libvirt_domain" "masters" { 227 | name = "${element(var.master, count.index)}" 228 | memory = "32000" 229 | vcpu = 12 230 | cpu { 231 | mode = "host-passthrough" 232 | } 233 | running = true 234 | boot_device { 235 | dev = ["hd","cdrom"] 236 | } 237 | network_interface { 238 | network_name = "ocp4-net" 239 | mac = "AA:BB:CC:11:42:1${count.index}" 240 | } 241 | console { 242 | type = "pty" 243 | target_port = "0" 244 | target_type = "serial" 245 | } 246 | 247 | console { 248 | type = "pty" 249 | target_type = "virtio" 250 | target_port = "1" 251 | } 252 | 253 | disk { 254 | volume_id = "${element(libvirt_volume.fatdisk-masters.*.id, count.index)}" 255 | } 256 | disk { 257 | file = "/var/lib/libvirt/images/discovery_image_ocpd.iso" 258 | } 259 | graphics { 260 | type = "spice" 261 | listen_type = "address" 262 | autoport = true 263 | } 264 | count = "${length(var.master)}" 265 | depends_on = [ 266 | libvirt_network.kube_network, 267 | ] 268 | } 269 | -------------------------------------------------------------------------------- /scripts/full-deploy-ai-calico.sh: -------------------------------------------------------------------------------- 1 | export AI_URL='http://192.167.124.1:8090' 2 | export CLUSTER_SSHKEY=$(cat ~/.ssh/id_ed25519.pub) 3 | export PULL_SECRET=$(cat pull-secret.txt | jq -R .) 4 | 5 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters" \ 6 | -d @./deployment-multinodes-calico.json \ 7 | --header "Content-Type: application/json" \ 8 | | jq . 9 | 10 | 11 | CLUSTER_ID=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].id') 12 | 13 | echo $CLUSTER_ID 14 | 15 | ####patch cluster and set networking to Calico### 16 | 17 | 18 | curl \ 19 | --header "Content-Type: application/json" \ 20 | --request PATCH \ 21 | --data '"{\"networking\":{\"networkType\":\"Calico\"}}"' \ 22 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/install-config" 23 | 24 | curl -s -X GET \ 25 | --header "Content-Type: application/json" \ 26 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/install-config" | jq -r . 27 | 28 | #Build the deployment ISO: 29 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" \ 30 | -d @iso-params.json \ 31 | --header "Content-Type: application/json" \ 32 | | jq '.' 33 | 34 | #Download ISO 35 | curl -L "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" -o /var/lib/libvirt/images/discovery_image_ocpd.iso 36 | 37 | ####start masters## 38 | terraform -chdir=/opt/terraform/ocp4-ai-cluster init 39 | terraform -chdir=/opt/terraform/ocp4-ai-cluster apply -auto-approve 40 | 41 | 42 | echo Done!!! 43 | 44 | echo Download Calico manifests 45 | 46 | mkdir manifests 47 | curl https://docs.projectcalico.org/manifests/ocp/crds/01-crd-apiserver.yaml -o manifests/01-crd-apiserver.yaml 48 | curl https://docs.projectcalico.org/manifests/ocp/crds/01-crd-installation.yaml -o manifests/01-crd-installation.yaml 49 | curl https://docs.projectcalico.org/manifests/ocp/crds/01-crd-imageset.yaml -o manifests/01-crd-imageset.yaml 50 | curl https://docs.projectcalico.org/manifests/ocp/crds/01-crd-tigerastatus.yaml -o manifests/01-crd-tigerastatus.yaml 51 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_bgpconfigurations.yaml -o manifests/crd.projectcalico.org_bgpconfigurations.yaml 52 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_bgppeers.yaml -o manifests/crd.projectcalico.org_bgppeers.yaml 53 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_blockaffinities.yaml -o manifests/crd.projectcalico.org_blockaffinities.yaml 54 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_clusterinformations.yaml -o manifests/crd.projectcalico.org_clusterinformations.yaml 55 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_felixconfigurations.yaml -o manifests/crd.projectcalico.org_felixconfigurations.yaml 56 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_globalnetworkpolicies.yaml -o manifests/crd.projectcalico.org_globalnetworkpolicies.yaml 57 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_globalnetworksets.yaml -o manifests/crd.projectcalico.org_globalnetworksets.yaml 58 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_hostendpoints.yaml -o manifests/crd.projectcalico.org_hostendpoints.yaml 59 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_ipamblocks.yaml -o manifests/crd.projectcalico.org_ipamblocks.yaml 60 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_ipamconfigs.yaml -o manifests/crd.projectcalico.org_ipamconfigs.yaml 61 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_ipamhandles.yaml -o manifests/crd.projectcalico.org_ipamhandles.yaml 62 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_ippools.yaml -o manifests/crd.projectcalico.org_ippools.yaml 63 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_kubecontrollersconfigurations.yaml -o manifests/crd.projectcalico.org_kubecontrollersconfigurations.yaml 64 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_networkpolicies.yaml -o manifests/crd.projectcalico.org_networkpolicies.yaml 65 | curl https://docs.projectcalico.org/manifests/ocp/crds/calico/kdd/crd.projectcalico.org_networksets.yaml -o manifests/crd.projectcalico.org_networksets.yaml 66 | curl https://docs.projectcalico.org/manifests/ocp/tigera-operator/00-namespace-tigera-operator.yaml -o manifests/00-namespace-tigera-operator.yaml 67 | curl https://docs.projectcalico.org/manifests/ocp/tigera-operator/02-rolebinding-tigera-operator.yaml -o manifests/02-rolebinding-tigera-operator.yaml 68 | curl https://docs.projectcalico.org/manifests/ocp/tigera-operator/02-role-tigera-operator.yaml -o manifests/02-role-tigera-operator.yaml 69 | curl https://docs.projectcalico.org/manifests/ocp/tigera-operator/02-serviceaccount-tigera-operator.yaml -o manifests/02-serviceaccount-tigera-operator.yaml 70 | curl https://docs.projectcalico.org/manifests/ocp/tigera-operator/02-configmap-calico-resources.yaml -o manifests/02-configmap-calico-resources.yaml 71 | curl https://docs.projectcalico.org/manifests/ocp/tigera-operator/02-tigera-operator.yaml -o manifests/02-tigera-operator.yaml 72 | curl https://docs.projectcalico.org/manifests/ocp/01-cr-installation.yaml -o manifests/01-cr-installation.yaml 73 | curl https://docs.projectcalico.org/manifests/ocp/01-cr-apiserver.yaml -o manifests/01-cr-apiserver.yaml 74 | 75 | for i in `ls manifests` 76 | do 77 | j=$(cat manifests/$i | base64 -w 0) 78 | curl -X POST "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/manifests" \ 79 | -H "accept: application/json" -H "Content-Type: application/json" \ 80 | -d "{ \"folder\": \"manifests\", \"file_name\": \"$i\", \"content\": \"$j\"}" 81 | done 82 | 83 | #####Realtime kernel for workers#### 84 | echo Create a machine config for the real-time kernel 85 | 86 | 87 | cat << EOF > 99-worker-realtime.yaml 88 | apiVersion: machineconfiguration.openshift.io/v1 89 | kind: MachineConfig 90 | metadata: 91 | labels: 92 | machineconfiguration.openshift.io/role: "worker" 93 | name: 99-worker-realtime 94 | spec: 95 | kernelType: realtime 96 | EOF 97 | 98 | j=$(cat 99-worker-realtime.yaml | base64 -w 0) 99 | i=99-worker-realtime.yaml 100 | curl -X POST "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/manifests" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"folder\": \"openshift\", \"file_name\": \"$i\", \"content\": \"$j\"}" 101 | 102 | echo Wait for the discovery process to happen 103 | 104 | sleep 180 ####adjust to ur infra 105 | echo Get cluster info 106 | curl -s -X GET --header "Content-Type: application/json" "$AI_URL/api/assisted-install/v1/clusters" | jq . 107 | 108 | echo Assign Master role to discovered nodes 109 | 110 | for i in `curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 111 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].id'| awk 'NR>0' |awk '{print $1;}'` 112 | do curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"hosts_roles\": [ { \"id\": \"$i\", \"role\": \"master\" } ]}" 113 | done 114 | 115 | 116 | echo Set API IP 117 | 118 | curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"api_vip\": \"192.167.124.7\"}" 119 | 120 | echo Start workers 121 | 122 | for i in {1..2} 123 | do virsh start ocp4-worker$i 124 | done 125 | 126 | sleep 180 ####adjust to ur infra 127 | 128 | 129 | echo Start instalation 130 | curl -X POST \ 131 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/actions/install" \ 132 | -H "accept: application/json" \ 133 | -H "Content-Type: application/json" 134 | 135 | STATUS=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].progress.total_percentage') 136 | 137 | echo Wait for install to complete 138 | 139 | while [[ $STATUS != 100 ]] 140 | do 141 | sleep 5 142 | STATUS=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].progress.total_percentage') 143 | done 144 | 145 | echo 146 | mkdir ~/.kube 147 | curl -X GET "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/kubeconfig" -H "accept: application/octet-stream" > .kube/config 148 | 149 | 150 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Assisted Installer on premise deep dive 2 | 3 | ## Introduction 4 | 5 | In this series of blog posts, we will demonstrate how Infrastructure as a code becomes a reality with OpenShift Assisted Installer onprem. This post will leverage kvm to show how to use Assisted Installer to deploy OpenShift, but the concepts here can extend to baremetal or vSphere deployments just as easily. 6 | 7 | ## Lab Preparation 8 | 9 | In this lab we will simulate Baremetal nodes with KVM VMs. Terraform will be used to orchestrate this virtual infrastructure. 10 | A minimum of 256Gb of Ram and 500Gb SSD drive is recommended. The scripts and install steps below are based around the use of a Centos 8 machine as your host machine. 11 | In order to have everything set and all the bits installed, run the following commands: 12 | 13 | ```bash 14 | git clone https://github.com/latouchek/assisted-installer-deepdive.git 15 | cd assisted-installer-deepdive 16 | cp -r terraform /opt/ 17 | cd scripts 18 | sh prepare-kvm-host.sh 19 | ``` 20 | 21 | The script creates a dedicated ocp network. It is mandatory to have a DNS and a static DHCP server on that network. 22 | A `dnsmasq.conf` template is provided in `assisted-installer-deepdive/config/` with mac adresses matching the OCP VMs that we will deploy later. It can be run on the host or on a dedicated VM/container. 23 | 24 | ## Part I : Deploying the OpenShift Assisted Installer service on premise 25 | 26 | ### 1. Get the bits and build the service 27 | 28 | ```bash 29 | sed -i 's/SELINUX=enforcing/SELINUX=disabled/g' /etc/sysconfig/selinux 30 | setenforce 0 31 | dnf install -y @container-tools 32 | dnf group install "Development Tools" -y 33 | dnf -y install python3-pip socat make tmux git jq crun 34 | git clone https://github.com/openshift/assisted-service 35 | cd assisted-service 36 | IP=192.167.124.1 37 | AI_URL=http://$IP:8090 38 | ``` 39 | 40 | Modify **onprem-environment** and **Makefile** to set proper URL and port forwarding 41 | 42 | ```bash 43 | sed -i "s@SERVICE_BASE_URL=.*@SERVICE_BASE_URL=$AI_URL@" onprem-environment 44 | sed -i "s/5432,8000,8090,8080/5432:5432 -p 8000:8000 -p 8090:8090 -p 8080:8080/" Makefile 45 | make deploy-onprem 46 | . 47 | . 48 | . 49 | ``` 50 | 51 | If everything went well, we should see 4 containers running inside a pod 52 | 53 | ```bash 54 | [root@kvm-host ~]podman ps 55 | CONTAINER ID IMAGE COMMAND CREATED STATUS PORTS NAMES 56 | a940818185cb k8s.gcr.io/pause:3.5 3 minutes ago Up 2 minutes ago 0.0.0.0:5432->5432/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8090->8090/tcp 59b56cb07140-infra 57 | d94a46c8b515 quay.io/ocpmetal/postgresql-12-centos7:latest run-postgresql 2 minutes ago Up 2 minutes ago 0.0.0.0:5432->5432/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8090->8090/tcp db 58 | 8c0e90d8c4fa quay.io/ocpmetal/ocp-metal-ui:latest /opt/bitnami/scri... About a minute ago Up About a minute ago 0.0.0.0:5432->5432/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8090->8090/tcp ui 59 | e98627cdc5f8 quay.io/ocpmetal/assisted-service:latest /assisted-service 42 seconds ago Up 43 seconds ago 0.0.0.0:5432->5432/tcp, 0.0.0.0:8000->8000/tcp, 0.0.0.0:8080->8080/tcp, 0.0.0.0:8090->8090/tcp installer 60 | ``` 61 | 62 | ```bash 63 | [root@kvm-host ~] podman pod ps 64 | POD ID NAME STATUS CREATED INFRA ID # OF CONTAINERS 65 | 59b56cb07140 assisted-installer Running 4 minutes ago a940818185cb 4 66 | ``` 67 | 68 | API should be accessible at and GUI at 69 | 70 | API documentation can be found [here](https://generator.swagger.io/?url=https://raw.githubusercontent.com/openshift/assisted-service/master/swagger.yaml) 71 | 72 | ### 2. How does it work 73 | 74 | In order to provision a cluster the following process must be followed: 75 | 76 | - Create a new OpenShift cluster definition in a json file 77 | - Register the new cluster by presenting the definition data to the API 78 | - Create a discovery boot media the nodes will boot from in order to be introspected and validated 79 | - Assign roles to introspected nodes and complete the cluster definition 80 | - Trigger the deployment 81 | 82 | ## Part II : Using the Assisted Installer API 83 | 84 | In this part we will show how to deploy a 5 nodes OCP cluster by following the steps we mentioned above. 85 | Even though this lab is purely cli based it is recommended to have the [UI](http://192.167.124.1:8080/) on sight to understand the whole process. 86 | 87 | ### 1. Deploy our first cluster with AI API 88 | 89 | - Create a cluster definition file 90 | 91 | ```bash 92 | export CLUSTER_SSHKEY=$(cat ~/.ssh/id_ed25519.pub) 93 | export PULL_SECRET=$(cat pull-secret.txt | jq -R .) 94 | cat << EOF > ./deployment-multinodes.json 95 | { 96 | "kind": "Cluster", 97 | "name": "ocpd", 98 | "openshift_version": "4.8", 99 | "ocp_release_image": "quay.io/openshift-release-dev/ocp-release:4.8.5-x86_64", 100 | "base_dns_domain": "lab.local", 101 | "hyperthreading": "all", 102 | "ingress_vip": "192.167.124.8", 103 | "schedulable_masters": false, 104 | "high_availability_mode": "Full", 105 | "user_managed_networking": false, 106 | "platform": { 107 | "type": "baremetal" 108 | }, 109 | "cluster_networks": [ 110 | { 111 | "cidr": "10.128.0.0/14", 112 | "host_prefix": 23 113 | } 114 | ], 115 | "service_networks": [ 116 | { 117 | "cidr": "172.31.0.0/16" 118 | } 119 | ], 120 | "machine_networks": [ 121 | { 122 | "cidr": "192.167.124.0/24" 123 | } 124 | ], 125 | "network_type": "OVNKubernetes", 126 | "additional_ntp_source": "ntp1.hetzner.de", 127 | "vip_dhcp_allocation": false, 128 | "ssh_public_key": "$CLUSTER_SSHKEY", 129 | "pull_secret": $PULL_SECRET 130 | } 131 | EOF 132 | ``` 133 | 134 | **high_availability_mode** and **schedulable_masters** parameters let you decide what type of cluster you want to install. Here is how to set those parameters: 135 | 136 | - 3 nodes clusters: **"high_availability_mode": "Full"** and **"schedulable_masters": true** 137 | - 3+ nodes clusters: **"high_availability_mode": "Full"** and **"schedulable_masters": false** 138 | - Single Node: **"high_availability_mode": "None"** 139 | 140 | You can choose if you want to handle **loadbalancing** in house or leave it to OCP by setting **user_managed_networking** to **true**. In both case, DHCP and DNS server are mandatory (Only DNS in the case of a static IP deployment). 141 | 142 | - Use deployment-multinodes.json to register the new cluster 143 | 144 | ```bash 145 | AI_URL='http://192.167.124.1:8090' 146 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters" \ 147 | -d @./deployment-multinodes.json --header "Content-Type: application/json" | jq . 148 | ``` 149 | 150 | - Check cluster is registered 151 | Once the cluster definition has been sent to an the API we should be able to retrieve its unique id 152 | 153 | ```bash 154 | CLUSTER_ID=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].id') 155 | [root@kvm-host ~] echo $CLUSTER_ID 156 | 43b9c2f0-218e-4e76-8889-938fd52d6290 157 | ``` 158 | 159 | - Check the new cluster status 160 | 161 | ```bash 162 | [root@kvm-host ~] curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].status' 163 | pending-for-input 164 | ``` 165 | 166 | When registering a cluster, the assisted installer runs a series of validation tests to assess if the cluster is ready to be deployed. 167 | 'pending-for-input' tells us we need to take some actions. Let's take a look at validations_info: 168 | 169 | ```bash 170 | curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].validations_info'|jq . 171 | ``` 172 | 173 | We can see below that the installer is waiting for the hosts . Before building the hosts, we need to create the Discovery ISO. 174 | 175 | ```json 176 | { 177 | "id": "sufficient-masters-count", 178 | "status": "failure", 179 | "message": "Clusters must have exactly 3 dedicated masters. Please either add hosts, or disable the worker host" 180 | } 181 | ], 182 | 183 | { 184 | "id": "cluster-cidr-defined", 185 | "status": "success", 186 | "message": "The Cluster Network CIDR is defined." 187 | }, 188 | ``` 189 | 190 | - Build the discovery boot ISO 191 | The discovery boot ISO is a live CoreOS image that the nodes will boot from. Once booted an introspection will be performed by the discovery agent and data sent to the assisted service. If the node passes the validation tests its **status_info** will be **"Host is ready to be installed"**. 192 | We need some extra parameters to be injected into the ISO . To do so, we create a data file as described bellow: 193 | 194 | ```bash 195 | cat << EOF > ./discovery-iso-params.json 196 | { 197 | "ssh_public_key": "$CLUSTER_SSHKEY", 198 | "pull_secret": $PULL_SECRET, 199 | "image_type": "full-iso" 200 | } 201 | EOF 202 | ``` 203 | 204 | ISO is now ready to be built! Let's make the API call! As you can see we use the data file so pull-secret and ssh public key are injected into the live ISO. 205 | 206 | ```bash 207 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" \ 208 | -d @discovery-iso-params.json \ 209 | --header "Content-Type: application/json" \ 210 | | jq '.' 211 | ``` 212 | 213 | In real world we would need to present this ISO to our hosts so they can boot from it. Because we are using KVM, we are going to download the ISO in the libvirt images directory and later create the VMs 214 | 215 | ```bash 216 | curl \ 217 | -L "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image" \ 218 | -o /var/lib/libvirt/images/discovery_image_ocpd.iso 219 | ``` 220 | 221 | - Start the nodes and the discovery process 222 | In this lab, BM nodes are virtual and need to be provisioned first. A Terraform file is provided and will build 3 Masters, 4 workers. All the VMS are using the previously generated ISO to boot. Run the following commands inside the Terraform folder 223 | 224 | ```bash 225 | [root@kvm-host terraform-ocp4-cluster-ai] terraform init ; terraform apply -auto-approve 226 | Apply complete! Resources: 24 added, 0 changed, 0 destroyed. 227 | ``` 228 | 229 | ```bash 230 | [root@kvm-host terraform-ocp4-cluster-ai] virsh list --all 231 | Id Name State 232 | ----------------------------------- 233 | 59 ocp4-master3 running 234 | 60 ocp4-master1 running 235 | 61 ocp4-master2 running 236 | - ocp4-worker1 shut off 237 | - ocp4-worker1-ht shut off 238 | - ocp4-worker2 shut off 239 | - ocp4-worker3 shut off 240 | ``` 241 | 242 | Only the master nodes will start for now. Wait 1 mn for them to be discovered and check validations_info 243 | 244 | ```bash 245 | curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" \ 246 | -H "accept: application/json" \ 247 | -H "get_unregistered_clusters: false"| jq -r '.[].progress' 248 | ``` 249 | 250 | ```json 251 | ........ 252 | "hosts-data": [ 253 | { 254 | "id": "all-hosts-are-ready-to-install", 255 | "status": "success", 256 | "message": "All hosts in the cluster are ready to install." 257 | }, 258 | { 259 | "id": "sufficient-masters-count", 260 | "status": "success", 261 | "message": "The cluster has a sufficient number of master candidates." 262 | } 263 | ......... 264 | ``` 265 | 266 | Our hosts have been validated and are ready to be installed. Let's take a closer look at the discovery data. 267 | - Retrieve the discovery hosts data with an API call 268 | 269 | ```bash 270 | [root@kvm-host ~]curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" \ 271 | -H "accept: application/json" \ 272 | -H "get_unregistered_clusters: false"| jq -r '.[].hosts' 273 | ``` 274 | 275 | ```bash 276 | { 277 | "checked_in_at": "2021-09-15T22:57:25.484Z", 278 | "cluster_id": "71db492e-207e-47eb-af7b-c7c716c7e09d", 279 | "connectivity": "{\"remote_hosts\":[{\"host_id\":\"2121a000-d27e-4596-a408-6813d3114caf\",\"l2_connectivity\":[{\"outgoing_ip_address\":\"192.167.124.12\",\"outgoing_nic\":\"ens3\",\"remote_ip_address\":\"192.167.124.13\",\"remote_mac\":\"aa:bb:cc:11:42:11\",\"successful\":true}],\"l3_connectivity\":[{\"average_rtt_ms\":0.304,\"outgoing_nic\":\"ens3\",\"remote_ip_address\":\"192.167.124.13\",\"successful\":true}]},{\"host_id\":\"84083091-8c0c-470b-a157-d002dbeed785\",\"l2_connectivity\":[{\"outgoing_ip_address\":\"192.167.124.12\",\"outgoing_nic\":\"ens3\",\"remote_ip_address\":\"192.167.124.14\",\"remote_mac\":\"aa:bb:cc:11:42:12\",\"successful\":true}],\"l3_connectivity\":[{\"average_rtt_ms\":0.237,\"outgoing_nic\":\"ens3\",\"remote_ip_address\":\"192.167.124.14\",\"successful\":true}]}]}", 280 | "created_at": "2021-09-15T19:23:23.614Z", 281 | "discovery_agent_version": "latest", 282 | "domain_name_resolutions": "{\"resolutions\":[{\"domain_name\":\"api.ocpd.lab.local\",\"ipv4_addresses\":[\"192.167.124.7\"],\"ipv6_addresses\":[]},{\"domain_name\":\"api-int.ocpd.lab.local\",\"ipv4_addresses\":[],\"ipv6_addresses\":[]},{\"domain_name\":\"console-openshift-console.apps.ocpd.lab.local\",\"ipv4_addresses\":[\"192.167.124.8\"],\"ipv6_addresses\":[]},{\"domain_name\":\"validateNoWildcardDNS.ocpd.lab.local\",\"ipv4_addresses\":[],\"ipv6_addresses\":[]}]}", 283 | "href": "/api/assisted-install/v2/infra-envs/71db492e-207e-47eb-af7b-c7c716c7e09d/hosts/fa89d7cd-c2d9-4f26-bd78-155647a32b04", 284 | "id": "fa89d7cd-c2d9-4f26-bd78-155647a32b04", 285 | "infra_env_id": "71db492e-207e-47eb-af7b-c7c716c7e09d", 286 | "installation_disk_id": "/dev/disk/by-path/pci-0000:00:05.0", 287 | "installation_disk_path": "/dev/vda", 288 | "inventory": "{\"bmc_address\":\"0.0.0.0\",\"bmc_v6address\":\"::/0\",\"boot\":{\"current_boot_mode\":\"bios\"},\"cpu\":{\"architecture\":\"x86_64\",\"count\":12,\"flags\":[\"fpu\",\"vme\",\"de\",\"pse\",\"tsc\",\"msr\",\"pae\",\"mce\",\"cx8\",\"apic\",\"sep\",\"mtrr\",\"pge\",\"mca\",\"cmov\",\"pat\",\"pse36\",\"clflush\",\"mmx\",\"fxsr\",\"sse\",\"sse2\",\"ss\",\"syscall\",\"nx\",\"pdpe1gb\",\"rdtscp\",\"lm\",\"constant_tsc\",\"arch_perfmon\",\"rep_good\",\"nopl\",\"xtopology\",\"cpuid\",\"tsc_known_freq\",\"pni\",\"pclmulqdq\",\"vmx\",\"ssse3\",\"fma\",\"cx16\",\"pdcm\",\"pcid\",\"sse4_1\",\"sse4_2\",\"x2apic\",\"movbe\",\"popcnt\",\"tsc_deadline_timer\",\"aes\",\"xsave\",\"avx\",\"f16c\",\"rdrand\",\"hypervisor\",\"lahf_lm\",\"abm\",\"cpuid_fault\",\"invpcid_single\",\"pti\",\"ssbd\",\"ibrs\",\"ibpb\",\"stibp\",\"tpr_shadow\",\"vnmi\",\"flexpriority\",\"ept\",\"vpid\",\"ept_ad\",\"fsgsbase\",\"tsc_adjust\",\"bmi1\",\"avx2\",\"smep\",\"bmi2\",\"erms\",\"invpcid\",\"xsaveopt\",\"arat\",\"umip\",\"md_clear\",\"arch_capabilities\"],\"frequency\":3491.914,\"model_name\":\"Intel(R) Xeon(R) CPU E5-1650 v3 @ 3.50GHz\"},\"disks\":[ 289 | 290 | {\"bootable\":true,\"by_path\":\"/dev/disk/by-path/pci-0000:00:01.1-ata-1\",\"drive_type\":\"ODD\",\"hctl\":\"0:0:0:0\",\"id\":\"/dev/ 291 | 292 | 293 | "progress": { 294 | "current_stage": "", 295 | . 296 | }, 297 | "progress_stages": null, 298 | "role": "auto-assign", 299 | 300 | "user_name": "admin", 301 | "validations_info": "{\"hardware\":[{\"id\":\"has-inventory\",\"status\":\"success\",\"message\":\"Valid inventory exists for the host\"},{\"id\":\"has-min-cpu-cores\",\"status\":\"success\",\"message\":\"Sufficient CPU cores\"},{\"id\":\"has-min-memory\",\"status\":\"success\",\"message\":\"Sufficient minimum RAM\"},{\"id\":\"has-min-valid-disks\",\"status\":\"success\",\"message\":\"Sufficient disk capacity\"},{\"id\":\"has-cpu-cores-for-role\",\"status\":\"success\",\"message\":\"Sufficient CPU cores for role auto-assign\"},{\"id\":\"has-memory-for-role\",\"status\":\"success\",\"message\":\"Sufficient RAM for role auto-assign\"},{\"id\":\"hostname-unique\",\"status\":\"success\",\"message\":\"Hostname ocp4-master0.ocpd.lab.local is unique in cluster\"},{\"id\":\"hostname-valid\",\"status\":\"success\",\"message\":\"Hostname ocp4-master0.ocpd.lab.local is allowed\"},{\"id\":\"valid-platform\",\"status\":\"success\",\"message\":\"Platform KVM is allowed\"}, 302 | ............................................................................. 303 | {\"id\":\"sufficient-installation-disk-speed\",\"status\":\"success\",\"message\":\"Speed of installation disk has not yet been measured\"},{\"id\":\"compatible-with-cluster-platform\",\"status\":\"success\",\"message\":\"Host is compatible with cluster platform \"message\":\"lso is disabled\"},{\"id\":\"ocs-requirements-satisfied\",\"status\":\"success\",\"message\":\"ocs is disabled\"}]}" 304 | } 305 | ``` 306 | 307 | This is a truncated version of the full ouput as it contains quite a lot of informations. Basically the agent provides all hardware info to the assisted service so it can have a precise inventory of the host hardware and eventually validate the nodes. 308 | To get more info about validation and hardware inventory, you can use these 2 one liners 309 | 310 | ```bash 311 | curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" \ 312 | -H "get_unregistered_clusters: false"| jq -r '.[].validations_info'|jq . 313 | ``` 314 | 315 | ```bash 316 | curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" \ 317 | -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].inventory'|jq -r . 318 | ``` 319 | 320 | One important point to notice is that each hosts gets its own id after this process. We can extract these with the following call: 321 | 322 | ```bash 323 | [root@kvm-host ~] curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 324 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].id' 325 | 326 | 2121a000-d27e-4596-a408-6813d3114caf 327 | 84083091-8c0c-470b-a157-d002dbeed785 328 | fa89d7cd-c2d9-4f26-bd78-155647a32b04 329 | ``` 330 | 331 | - Assign role to discovered Nodes 332 | 333 | After validation, each node gets the 'auto-assign' role. We can check with this API call: 334 | 335 | ```bash 336 | [root@kvm-host ~]curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].role' 337 | auto-assign 338 | auto-assign 339 | auto-assign 340 | ``` 341 | 342 | If you want something a bit more predictable, you can assign roles based on nodes id. Since only our master nodes have been discovered, we will assign them the master role: 343 | 344 | ```bash 345 | for i in `curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 346 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].id'| awk 'NR>0' |awk '{print $1;}'` 347 | do curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"hosts_roles\": [ { \"id\": \"$i\", \"role\": \"master\" } ]}" 348 | done 349 | 350 | ``` 351 | 352 | Check the result: 353 | 354 | ```bash 355 | [root@kvm-host ~]curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 356 | -H "accept: application/json" \ 357 | -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].role' 358 | master 359 | master 360 | master 361 | ``` 362 | 363 | - Add workers, complete configuration and trigger the installation 364 | 365 | It's now time to start our workers. The same discovery process will take place and the new nodes will get the **auto-assign** role. Because a cluster cannot have more than 3 masters, we are sure **auto-assign=worker** this time. 366 | Because we set **vip_dhcp_allocation** to **false** in the cluster definition file, we need to set **api_vip** parameter before we can trigger the installation. 367 | 368 | ```bash 369 | curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" \ 370 | -H "accept: application/json"\ 371 | -H "Content-Type: application/json" -d "{ \"api_vip\": \"192.167.124.7\"}" 372 | ``` 373 | 374 | And finally start installation: 375 | 376 | ```bash 377 | curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" \ 378 | -H "accept: application/json" \ 379 | -H "Content-Type: application/json" -d "{ \"api_vip\": \"192.167.124.7\"}" 380 | ``` 381 | 382 | During the installation process, disks will be written and nodes will reboot. One of the masters will also play the bootstrap role until the control plane is ready then the installation will continue as usual. 383 | 384 | - Monitoring the installation progress 385 | We can closely monitor the nodes states during the installation process: 386 | 387 | ```bash 388 | [root@kvm-host ~]curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 389 | -H "accept: application/json" \ 390 | -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].progress' 391 | { 392 | "current_stage": "Writing image to disk", 393 | "installation_percentage": 42, 394 | "progress_info": "92%", 395 | "stage_started_at": "2021-09-16T15:56:39.275Z", 396 | "stage_updated_at": "2021-09-16T15:57:31.215Z" 397 | } 398 | { 399 | "current_stage": "Writing image to disk", 400 | "installation_percentage": 42, 401 | "progress_info": "93%", 402 | "stage_started_at": "2021-09-16T15:56:38.290Z", 403 | "stage_updated_at": "2021-09-16T15:57:31.217Z" 404 | } 405 | { 406 | "current_stage": "Writing image to disk", 407 | "installation_percentage": 30, 408 | "progress_info": "92%", 409 | "stage_started_at": "2021-09-16T15:56:38.698Z", 410 | "stage_updated_at": "2021-09-16T15:57:31.218Z" 411 | } 412 | { 413 | "current_stage": "Waiting for control plane", 414 | "installation_percentage": 44, 415 | "stage_started_at": "2021-09-16T15:56:32.053Z", 416 | "stage_updated_at": "2021-09-16T15:56:32.053Z" 417 | } 418 | { 419 | "current_stage": "Waiting for control plane", 420 | "installation_percentage": 44, 421 | "stage_started_at": "2021-09-16T15:56:42.398Z", 422 | "stage_updated_at": "2021-09-16T15:56:42.398Z" 423 | } 424 | ``` 425 | 426 | To monitor the whole installation progress: 427 | 428 | ```bash 429 | [root@kvm-host ~]curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" \ 430 | -H "accept: application/json" \ 431 | -H "get_unregistered_clusters: false"| jq -r '.[].progress' 432 | { 433 | "finalizing_stage_percentage": 100, 434 | "installing_stage_percentage": 100, 435 | "preparing_for_installation_stage_percentage": 100, 436 | "total_percentage": 100 437 | } 438 | 439 | ``` 440 | 441 | - Retrieve kubeconfig and credentials 442 | 443 | ```bash 444 | [root@kvm-host ~] curl -X GET "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/kubeconfig" \ 445 | -H "accept: application/octet-stream" > .kube/config 446 | % Total % Received % Xferd Average Speed Time Time Time Current 447 | Dload Upload Total Spent Left Speed 448 | 100 12104 100 12104 0 0 2955k 0 --:--:-- --:--:-- --:--:-- 2955k 449 | [root@kvm-host ~]oc get nodes 450 | NAME STATUS ROLES AGE VERSION 451 | ocp4-master0.ocpd.lab.local Ready master 119m v1.21.1+9807387 452 | ocp4-master1.ocpd.lab.local Ready master 134m v1.21.1+9807387 453 | ocp4-master2.ocpd.lab.local Ready master 134m v1.21.1+9807387 454 | ocp4-worker0.ocpd.lab.local Ready worker 119m v1.21.1+9807387 455 | ocp4-worker1.ocpd.lab.local Ready worker 119m v1.21.1+9807387 456 | ``` 457 | 458 | ```bash 459 | [root@kvm-host ~] curl -X GET "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/credentials" \ 460 | -H "accept: application/json" |jq -r . 461 | % Total % Received % Xferd Average Speed Time Time Time Current 462 | Dload Upload Total Spent Left Speed 463 | 100 132 100 132 0 0 44000 0 --:--:-- --:--:-- --:--:-- 44000 464 | { 465 | "console_url": "https://console-openshift-console.apps.ocpd.lab.local", 466 | "password": "8Tepe-uxF7Q-ztHg5-yoKPQ", 467 | "username": "kubeadmin" 468 | } 469 | ``` 470 | 471 | ## Part III : Day 2 Operations 472 | 473 | ## Adding worker nodes 474 | 475 | In order to add extra workers to an existing cluster the following process must be followed: 476 | 477 | - Create a new 'AddHost cluster' 478 | 479 | This creates a new OpenShift cluster definition for adding nodes to our existing OCP cluster. 480 | We have to manually generate the new cluster id and name and make sure the api_vip_dnsname matches the existing cluster. 481 | 482 | ```bash 483 | ###Generate id for addhost cluster#### 484 | 485 | NCLUSTER_ID=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].id'| tr b c) 486 | ##### creating addhost cluster 487 | echo $NCLUSTER_ID 488 | 489 | curl -X POST "http://192.167.124.1:8090/api/assisted-install/v1/add_hosts_clusters" \ 490 | -H "accept: application/json" -H "Content-Type: application/json" \ 491 | -d "{ \"id\": \"$NCLUSTER_ID\", \"name\": \"ocp2\", \"api_vip_dnsname\": \"api.ocpd.lab.local\", \"openshift_version\": \"4.8\"}" 492 | 493 | ``` 494 | 495 | - Create a new discovery boot media the new nodes will boot from in order to be introspected and validated 496 | 497 | ```bash 498 | ####Patch new cluster to add pullsecret#### 499 | 500 | cat << EOF > ./new-params.json 501 | { 502 | "ssh_public_key": "$CLUSTER_SSHKEY", 503 | "pull_secret": $PULL_SECRET 504 | } 505 | EOF 506 | curl -s -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID" -d @new-params.json --header "Content-Type: application/json" | jq '.' 507 | 508 | ####create and download new ISO #### 509 | curl -s -X POST "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID/downloads/image" \ 510 | -d @new-params.json --header "Content-Type: application/json" \ 511 | | jq '.' 512 | 513 | curl -L "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID/downloads/image" \ 514 | -o /var/lib/libvirt/images/default/discovery_image_ocpd2.iso 515 | ``` 516 | 517 | - Before starting the extra workers, make sure they boot from the new created ISO 518 | 519 | ```bash 520 | virsh change-media --domain ocp4-worker1-ht hda \ 521 | --current --update \ 522 | --source /var/lib/libvirt/images/default/discovery_image_ocpd2.iso 523 | virsh start --domain ocp4-worker1-ht 524 | ``` 525 | 526 | - Let's take a closer look at both clusters 527 | 528 | ```bash 529 | curl -s -X GET -H "Content-Type: application/json" "$AI_URL/api/assisted-install/v1/clusters" | jq -r .[].id 530 | ``` 531 | 532 | Output should look like this: 533 | 534 | ```bash 535 | 58fb589e-2f8b-44ee-b056-08499ba7ddd5 <-- UUID of the existing cluster 536 | 58fc589e-2f8c-44ee-c056-08499ca7ddd5 <-- UUID of the AddHost cluster 537 | ``` 538 | 539 | Use the API to get more details: 540 | 541 | ```bash 542 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 543 | "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID" | jq -r .kind 544 | AddHostsCluster 545 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 546 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" | jq -r .kind 547 | Cluster 548 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 549 | "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID" | jq -r .status 550 | adding-hosts 551 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 552 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" | jq -r .status 553 | installed 554 | ####Check nodes for each clusters#### 555 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 556 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" | jq -r .hosts[].requested_hostname 557 | ocp4-master1.ocpd.lab.local 558 | ocp4-master2.ocpd.lab.local 559 | ocp4-worker0.ocpd.lab.local 560 | ocp4-worker1.ocpd.lab.local 561 | ocp4-master0.ocpd.lab.local 562 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 563 | "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID" | jq -r .hosts[].requested_hostname 564 | ocp4-worker1-ht.ocpd.lab.local 565 | [root@kvm-host ~] curl -s -X GET --header "Content-Type: application/json" \ 566 | "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID" | jq -r .hosts[].role 567 | auto-assign 568 | ``` 569 | 570 | We see above that everything is working as expected: 571 | - The new cluster is in **adding-hosts** state 572 | - The existing cluster is in **installed** state 573 | - The new worker has been discovered and has been given the right role 574 | 575 | - Start the new node installation: 576 | 577 | ```bash 578 | curl -X POST "$AI_URL/api/assisted-install/v1/clusters/$NCLUSTER_ID/actions/install_hosts" \ 579 | -H "accept: application/json" | jq '.' 580 | ``` 581 | 582 | As soon the installation begin,the new node will get the worker roles 583 | 584 | ```bash 585 | [root@kvm-host ~] curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].role' 586 | master 587 | master 588 | worker 589 | worker 590 | master 591 | worker 592 | ``` 593 | 594 | - Wait for the new worker to reboot and check pending CSRs 595 | 596 | ```bash 597 | [root@kvm-host ~] oc get csr|grep Pending 598 | csr-5jrm7 5m55s kubernetes.io/kube-apiserver-client-kubelet system:serviceaccount:openshift-machine-config-operator:node-bootstrapper Pending 599 | 600 | ###Approve all CSR### 601 | [root@kvm-host ~] for csr in $(oc -n openshift-machine-api get csr | awk '/Pending/ {print $1}'); do oc adm certificate approve $csr;done 602 | certificatesigningrequest.certificates.k8s.io/csr-5jrm7 approved 603 | ``` 604 | 605 | We should now see the new node: 606 | 607 | ```bash 608 | [root@kvm-host ~] oc get nodes 609 | NAME STATUS ROLES AGE VERSION 610 | ocp4-master0.ocpd.lab.local Ready master 59m v1.22.0-rc.0+75ee307 611 | ocp4-master1.ocpd.lab.local Ready master 40m v1.22.0-rc.0+75ee307 612 | ocp4-master2.ocpd.lab.local Ready master 59m v1.22.0-rc.0+75ee307 613 | ocp4-worker0.ocpd.lab.local Ready worker 42m v1.22.0-rc.0+75ee307 614 | ocp4-worker1-ht.ocpd.lab.local NotReady worker 48s v1.22.0-rc.0+75ee307 615 | ocp4-worker1.ocpd.lab.local Ready worker 42m v1.22.0-rc.0+75ee307 616 | ``` 617 | 618 | After a few minutes we should see: 619 | 620 | ```bash 621 | [root@kvm-host ~] oc get nodes 622 | NAME STATUS ROLES AGE VERSION 623 | ocp4-master0.ocpd.lab.local Ready master 62m v1.22.0-rc.0+75ee307 624 | ocp4-master1.ocpd.lab.local Ready master 44m v1.22.0-rc.0+75ee307 625 | ocp4-master2.ocpd.lab.local Ready master 62m v1.22.0-rc.0+75ee307 626 | ocp4-worker0.ocpd.lab.local Ready worker 45m v1.22.0-rc.0+75ee307 627 | ocp4-worker1-ht.ocpd.lab.local Ready worker 3m54s v1.22.0-rc.0+75ee307 628 | ocp4-worker1.ocpd.lab.local Ready worker 45m v1.22.0-rc.0+75ee307 629 | ``` 630 | 631 | - Check with AI API 632 | 633 | ```bash 634 | [root@kvm-host ~] curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" \ 635 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r .[].hosts[].status 636 | installed 637 | installed 638 | installed 639 | installed 640 | installed 641 | added-to-existing-cluster 642 | ``` 643 | 644 | We succefully added an extra worker. 645 | 646 | ## Part IV : Network Tweaks 647 | 648 | ## Bond configuration 649 | 650 | In order to provision a cluster with bonded interfaces for Workers, we need to use the **static_network_config** parameter when building the Discovery ISO. 651 | 652 | ```json 653 | "static_network_config": [ 654 | { 655 | "network_yaml": "Network state in json format for a specific node", 656 | "mac_interface_map": [ 657 | { 658 | "mac_address": "string", 659 | "logical_nic_name": "string" 660 | } 661 | ] 662 | } 663 | ] 664 | ``` 665 | 666 | - Let's take a look at the different values we need to provide: 667 | 668 | - "network_yaml": "Network state in json format" 669 | - Nodes network configuration is handled by [kubernetes-nmstate](https://github.com/nmstate/kubernetes-nmstate) and needs to be provided in JSON. For simplicity we will first write the desired network state in YAML format and JSON encode it for each nodes. Many examples are provided [here](https://nmstate.io/examples.html#interfaces-ethernet) and [here](https://github.com/openshift/assisted-service/blob/master/docs/user-guide/restful-api-guide.md). 670 | 671 | The YAML below describes the bond definition we will write for each workers in our Lab. 672 | 673 | In this example we create a bond with ens3 and ens4 as slaves and we assign a static ip to ens5. Each node needs its own nmstate file. 674 | 675 | ```yaml 676 | interfaces: 677 | - name: bond0 678 | description: Bond 679 | type: bond 680 | state: up 681 | ipv4: 682 | enabled: true 683 | dhcp: true 684 | auto-dns: true 685 | auto-gateway: true 686 | auto-routes: true 687 | link-aggregation: 688 | mode: balance-rr 689 | options: 690 | miimon: '140' 691 | port: 692 | - ens3 693 | - ens4 694 | - name: ens3 695 | state: up 696 | type: ethernet 697 | - name: ens4 698 | state: up 699 | type: ethernet 700 | - name: ens5 701 | state: up 702 | type: ethernet 703 | ipv4: 704 | address: 705 | - ip: 10.17.3.4 706 | prefix-length: 24 707 | enabled: true 708 | ``` 709 | 710 | - "mac_interface_map": [] 711 | - Because all nodes will boot from the same Discovery ISO, mac addresses and logical nic names need to be mapped as shown in example bellow: 712 | 713 | ```json 714 | { 715 | "mac_interface_map": [ 716 | { 717 | "mac_address": "aa:bb:cc:11:42:21", 718 | "logical_nic_name": "ens3" 719 | }, 720 | { 721 | "mac_address": "aa:bb:cc:11:42:51", 722 | "logical_nic_name": "ens4" 723 | }, 724 | { 725 | "mac_address": "aa:bb:cc:11:42:61", 726 | "logical_nic_name": "ens5" 727 | } 728 | ] 729 | } 730 | ``` 731 | 732 | It is important to understand that with both nmstates and mac mapping, each node can be individually configured when booting from the same Discovery ISO. 733 | Now we have described the logic, let's create the data file we'll present to the API. We'll use jq to json encode and inject nmstate YAML into our final data file. 734 | 735 | - Prepare the environment: 736 | 737 | In this lab workers node have 3 NICs connected to 2 different networks (See Terraform file provided for more details) 738 | - nmstate files are provided in the git repo 739 | 740 | ```bash 741 | cd assisted-installer-deepdive 742 | mkdir ~/bond 743 | cp config/nmstate* ~/bond/ 744 | ``` 745 | 746 | - Create the network data file 747 | 748 | ```bash 749 | export AI_URL='http://192.167.124.1:8090' 750 | export CLUSTER_SSHKEY=$(cat ~/.ssh/id_ed25519.pub) 751 | export PULL_SECRET=$(cat pull-secret.txt | jq -R .) 752 | export NODE_SSH_KEY="$CLUSTER_SSHKEY" 753 | cd /root/ 754 | 755 | jq -n --arg SSH_KEY "$NODE_SSH_KEY" --arg NMSTATE_YAML1 "$(cat ~/bond/nmstate-bond-worker0.yaml)" --arg NMSTATE_YAML2 "$(cat ~/bond/nmstate-bond-worker1.yaml)" '{ 756 | "ssh_public_key": $CLUSTER_SSHKEY", 757 | "image_type": "full-iso", 758 | "static_network_config": [ 759 | { 760 | "network_yaml": $NMSTATE_YAML1, 761 | "mac_interface_map": [{"mac_address": "aa:bb:cc:11:42:20", "logical_nic_name": "ens3"}, {"mac_address": "aa:bb:cc:11:42:50", "logical_nic_name": "ens4"},{"mac_address": "aa:bb:cc:11:42:60", "logical_nic_name": "ens5"}] 762 | }, 763 | { 764 | "network_yaml": $NMSTATE_YAML2, 765 | "mac_interface_map": [{"mac_address": "aa:bb:cc:11:42:21", "logical_nic_name": "ens3"}, {"mac_address": "aa:bb:cc:11:42:51", "logical_nic_name": "ens4"},{"mac_address": "aa:bb:cc:11:42:61", "logical_nic_name": "ens5"}] 766 | } 767 | ] 768 | }' > bond-workers 769 | 770 | ``` 771 | 772 | - Build the image 773 | 774 | ```bash 775 | curl -H "Content-Type: application/json" -X POST \ 776 | -d @bond-workers ${AI_URL}/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/image | jq . 777 | ``` 778 | 779 | - Deploy the cluster 780 | 781 | **_For a fully automated deployment use the script full-deploy-ai-multinode-bond.sh provided in the git repo_** 782 | 783 | ```bash 784 | ###Create infra ### 785 | 786 | terraform -chdir=/opt/terraform/ai-bond apply -auto-approve 787 | 788 | ####Assign master role to master VMs#### 789 | 790 | for i in `curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true"\ 791 | -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].hosts[].id'| awk 'NR>0' |awk '{print $1;}'` 792 | do curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"hosts_roles\": [ { \"id\": \"$i\", \"role\": \"master\" } ]}" 793 | done 794 | 795 | ###set api IP### 796 | 797 | curl -X PATCH "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID" -H "accept: application/json" -H "Content-Type: application/json" -d "{ \"api_vip\": \"192.167.124.7\"}" 798 | 799 | ###Start workers#### 800 | for i in {0..1} 801 | do virsh start ocp4-worker$i 802 | done 803 | 804 | sleep 180 805 | 806 | ###Start installation### 807 | curl -X POST \ 808 | "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/actions/install" \ 809 | -H "accept: application/json" \ 810 | -H "Content-Type: application/json" 811 | 812 | echo Wait for install to complete 813 | 814 | while [[ $STATUS != 100 ]] 815 | do 816 | sleep 5 817 | STATUS=$(curl -s -X GET "$AI_URL/api/assisted-install/v2/clusters?with_hosts=true" -H "accept: application/json" -H "get_unregistered_clusters: false"| jq -r '.[].progress.total_percentage') 818 | done 819 | 820 | echo Download kubeconfig 821 | mkdir ~/.kube 822 | curl -X GET "$AI_URL/api/assisted-install/v1/clusters/$CLUSTER_ID/downloads/kubeconfig" -H "accept: application/octet-stream" > .kube/config 823 | 824 | ``` 825 | 826 | - Check workers are configured as requested: 827 | - After sshing into worker0 we can see all connections were created 828 | 829 | ```bash 830 | [root@ocp4-worker0 ~] ls -1 /etc/NetworkManager/system-connections/ 831 | bond0.nmconnection 832 | br-ex.nmconnection 833 | ens3-slave-ovs-clone.nmconnection 834 | ens3.nmconnection 835 | ens4-slave-ovs-clone.nmconnection 836 | ens4.nmconnection 837 | ens5.nmconnection 838 | ovs-if-br-ex.nmconnection 839 | ovs-if-phys0.nmconnection 840 | ovs-port-br-ex.nmconnection 841 | ovs-port-phys0.nmconnection 842 | ``` 843 | 844 | - Check configuration for each NIC: 845 | 846 | ```bash 847 | cat /etc/NetworkManager/system-connections/ens3.nmconnection 848 | [connection] 849 | id=ens3 850 | uuid=501c47c3-7c1d-4424-b131-f40dd89827a9 851 | type=ethernet 852 | interface-name=ens3 853 | master=bond0 854 | permissions= 855 | slave-type=bond 856 | autoconnect=true 857 | autoconnect-priority=1 858 | 859 | [ethernet] 860 | mac-address-blacklist= 861 | ``` 862 | 863 | ```bash 864 | cat /etc/NetworkManager/system-connections/ens4.nmconnection 865 | [connection] 866 | id=ens4 867 | uuid=6baa8165-0fa0-4eae-83cb-f89462aa6f18 868 | type=ethernet 869 | interface-name=ens4 870 | master=bond0 871 | permissions= 872 | slave-type=bond 873 | autoconnect=true 874 | autoconnect-priority=1 875 | 876 | [ethernet] 877 | mac-address-blacklist= 878 | ``` 879 | 880 | ```bash 881 | cat /etc/NetworkManager/system-connections/ens5.nmconnection 882 | [connection] 883 | id=ens5 884 | uuid=c04c3a19-c8d7-4c6b-a836-c64b573de270 885 | type=ethernet 886 | interface-name=ens5 887 | permissions= 888 | autoconnect=true 889 | autoconnect-priority=1 890 | 891 | [ethernet] 892 | mac-address-blacklist= 893 | 894 | [ipv4] 895 | address1=10.17.3.3/24 896 | dhcp-client-id=mac 897 | dns-search= 898 | method=manual 899 | 900 | [ipv6] 901 | addr-gen-mode=eui64 902 | dhcp-duid=ll 903 | dhcp-iaid=mac 904 | dns-search= 905 | method=disabled 906 | 907 | [proxy] 908 | ``` 909 | 910 | 911 | ### Thank you for reading 912 | 913 | ## References 914 | 915 | - [Deploying Single Node OpenShift via Assisted Installer API](https://schmaustech.blogspot.com/2021/08/deploying-single-node-openshift-via.html) 916 | - [Cilium Installation with OpenShift Assisted Installer](https://cloudcult.dev/cilium-installation-openshift-assisted-installer/) 917 | - [https://generator.swagger.io/?url=https://raw.githubusercontent.com/openshift/assisted-service/master/swagger.yaml](https://generator.swagger.io/?url=https://raw.githubusercontent.com/openshift/assisted-service/master/swagger.yaml) 918 | - [https://github.com/sonofspike/assisted-service-onprem](https://github.com/sonofspike/assisted-service-onprem) 919 | - [https://github.com/karmab/assisted-installer-cli](https://github.com/karmab/assisted-installer-cli) 920 | - [https://github.com/rh-telco-tigers/Assisted-Installer-API](https://github.com/rh-telco-tigers/Assisted-Installer-API) 921 | --------------------------------------------------------------------------------