├── src ├── cli │ ├── __init__.py │ ├── commands │ │ ├── __init__.py │ │ ├── help_command.py │ │ ├── command.py │ │ ├── env_command.py │ │ ├── commands_factory.py │ │ └── test_command.py │ ├── __main__.py │ ├── key_binding.py │ ├── cli_utils.py │ ├── application.py │ └── prompt_handler.py ├── manage │ ├── __init__.py │ └── manageable_options.yaml ├── tests │ ├── __init__.py │ ├── global_variables │ │ └── __init__.py │ ├── pytest.ini │ ├── test_day2.py │ ├── templates │ │ ├── day2_baremetal.yaml.j2 │ │ └── day2_machine.yaml.j2 │ └── config │ │ └── __init__.py ├── assisted_test_infra │ ├── __init__.py │ ├── test_infra │ │ ├── helper_classes │ │ │ ├── __init__.py │ │ │ ├── kube_helpers │ │ │ │ ├── idict.py │ │ │ │ ├── base_resource.py │ │ │ │ ├── __init__.py │ │ │ │ └── secret.py │ │ │ ├── config │ │ │ │ ├── base_redfish_config.py │ │ │ │ ├── base_vsphere_config.py │ │ │ │ ├── base_nutanix_config.py │ │ │ │ ├── base_infra_env_config.py │ │ │ │ ├── __init__.py │ │ │ │ ├── base_day2_cluster_config.py │ │ │ │ ├── base_terraform_config.py │ │ │ │ ├── base_oci_config.py │ │ │ │ ├── base_entity_config.py │ │ │ │ ├── base_cluster_config.py │ │ │ │ └── base_nodes_config.py │ │ │ ├── cluster_host.py │ │ │ └── events_handler.py │ │ ├── controllers │ │ │ ├── ipxe_controller │ │ │ │ ├── __init__.py │ │ │ │ └── server │ │ │ │ │ ├── Dockerfile │ │ │ │ │ └── local_ipxe_server.py │ │ │ ├── proxy_controller │ │ │ │ ├── __init__.py │ │ │ │ └── templates │ │ │ │ │ └── squid.conf.j2 │ │ │ ├── tang_controller │ │ │ │ ├── __init__.py │ │ │ │ └── tang_controller.py │ │ │ ├── node_controllers │ │ │ │ ├── disk.py │ │ │ │ └── __init__.py │ │ │ ├── __init__.py │ │ │ └── iptables.py │ │ ├── tools │ │ │ ├── __init__.py │ │ │ └── concurrently.py │ │ ├── utils │ │ │ ├── network_utils.py │ │ │ ├── __init__.py │ │ │ ├── base_name.py │ │ │ ├── entity_name.py │ │ │ ├── manifests.py │ │ │ ├── terraform_util.py │ │ │ ├── release_image_utils.py │ │ │ ├── k8s_utils.py │ │ │ └── env_var.py │ │ ├── __init__.py │ │ └── exceptions.py │ ├── download_logs │ │ ├── __init__.py │ │ ├── events.html.j2 │ │ └── resources │ │ │ └── man_sosreport.sh │ └── resources │ │ └── bootstrap_in_place │ │ ├── sno-worker-install.sh │ │ ├── install-config.yaml │ │ └── sno-worker-live.ign.j2 ├── consts │ ├── durations.py │ ├── kube_api.py │ ├── resources.py │ ├── __init__.py │ └── env_defaults.py ├── virsh_cleanup │ ├── __init__.py │ └── __main__.py ├── triggers │ ├── __init__.py │ └── olm_operators_trigger.py ├── service_client │ ├── __init__.py │ ├── client_factory.py │ └── client_validator.py └── cleanup.py ├── isort.cfg ├── packer_files ├── nutanix_centos_template │ ├── sources.pkr.hcl │ ├── variables.pkr.hcl │ ├── build.pkr.hcl │ ├── vars.json │ └── centos-config │ │ └── ks.cfg └── vsphere_centos_template │ ├── build.pkr.hcl │ ├── vars.json │ ├── centos-config │ └── centos8-ks.cfg │ └── sources.pkr.hcl ├── docs ├── assisted-testing.md ├── assisted-deployment.md ├── warning.md ├── build-image.md ├── overview.md ├── prerequisites.md ├── assisted-service-client.md └── getting-started.md ├── .dockerignore ├── .ansible-lint ├── ansible_files ├── roles │ ├── setup_ssh_key_pair │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── config.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── ofcir_release │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── meta │ │ │ └── main.yml │ ├── oci_export_connection_details │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── ci-machine-config.sh.j2 │ │ │ └── inventory.j2 │ │ ├── files │ │ │ ├── packet-conf.sh │ │ │ └── fix-uid.sh │ │ └── tasks │ │ │ └── main.yml │ ├── setup_ipip_tunnel │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ └── main.yml │ ├── ofcir_acquire │ │ ├── defaults │ │ │ └── main.yml │ │ ├── meta │ │ │ └── argument_specs.yml │ │ └── tasks │ │ │ └── main.yml │ ├── heterogeneous_cluster_export_primary_device_connection_details │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ ├── ci-machine-config.sh.j2 │ │ │ └── inventory.j2 │ │ ├── files │ │ │ └── packet-conf.sh │ │ ├── meta │ │ │ └── argument_specs.yml │ │ └── tasks │ │ │ └── main.yml │ ├── setup_sftp_share │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── rclone.service.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── setup_libvirtd │ │ ├── handlers │ │ │ └── main.yml │ │ ├── files │ │ │ └── allow-cross-network-traffic.sh │ │ └── tasks │ │ │ └── main.yml │ ├── heterogeneous_cluster_export_day2_configuration │ │ ├── defaults │ │ │ └── main.yml │ │ ├── tasks │ │ │ └── main.yml │ │ └── templates │ │ │ └── assisted-additional-config.j2 │ ├── oci_setup_for_test_infra │ │ ├── defaults │ │ │ └── main.yml │ │ ├── templates │ │ │ └── assisted-additional-config.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── oci_create_infra │ │ ├── templates │ │ │ └── terraform.tfvars.j2 │ │ └── tasks │ │ │ └── main.yml │ ├── heterogeneous_cluster_release_cir │ │ ├── tasks │ │ │ └── main.yml │ │ └── meta │ │ │ └── argument_specs.yml │ ├── oci_destroy_infra │ │ └── tasks │ │ │ └── main.yml │ ├── oci_cleanup_resources │ │ ├── defaults │ │ │ └── main.yml │ │ └── tasks │ │ │ ├── main.yml │ │ │ └── cleanup_resources.yml │ ├── heterogeneous_cluster_extract_cir_name │ │ ├── meta │ │ │ └── argument_specs.yml │ │ └── tasks │ │ │ └── main.yml │ ├── heterogeneous_cluster_prepare_inventory │ │ ├── tasks │ │ │ └── main.yml │ │ └── meta │ │ │ └── argument_specs.yml │ └── common │ │ ├── setup_ssh_key_pair │ │ └── tasks │ │ │ └── main.yml │ │ └── setup_sftp_share │ │ └── tasks │ │ └── main.yml ├── ansible.cfg ├── vars │ ├── ci.yml │ ├── standalone_ofcir_heterogeneous_infra_sample.yml │ ├── ci_ofcir_heterogeneous_infrastructure.yml │ ├── ci_oci_infrastucture.yml │ └── standalone_oci_sample.yml ├── oci_generic_destroy_ci_machine_playbook.yml ├── oci_generic_cleanup_playbook.yml ├── ofcir_heterogeneous_destroy_infra_playbook.yml └── oci_generic_create_ci_machine_playbook.yml ├── .ci-operator.yaml ├── storage_pool └── .gitignore ├── terraform_files ├── vsphere-ci-machine │ ├── output.tf │ └── variables-vsphere.tf ├── nutanix-ci-machine │ ├── output.tf │ ├── main.tf │ └── variables-nutanix.tf ├── oci-ci-machine │ ├── output.tf │ ├── 00_main.tf │ ├── variables.tf │ ├── 02_compute.tf │ └── 01_networking.tf ├── nutanix │ ├── main.tf │ └── virtual_machines.tf ├── baremetal_host │ ├── libvirt_domain_custom.xsl │ ├── variables.tf │ └── main.tf └── limit_ip_dhcp_range.xsl ├── OWNERS ├── pyproject.toml ├── .gitignore ├── scripts ├── kexec │ ├── install-cluster.sh │ ├── README.md │ └── coreos-redeploy.sh ├── pull_dockerfile_images.sh ├── deploy_prometheus_ui.sh ├── install_k8s_clients.sh ├── test_ui.sh ├── ibmcloud_post_install.sh ├── create_full_environment.sh ├── deploy_ui.sh └── assisted_deployment.sh ├── .pre-commit-config.yaml ├── requirements-dev.txt ├── .flake8 ├── .github └── dependabot.yml ├── requirements.txt ├── skipper.yaml └── .yaspeller.json /src/cli/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/manage/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/tests/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/assisted_test_infra/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /isort.cfg: -------------------------------------------------------------------------------- 1 | [tool.isort] 2 | line-length = 120 3 | -------------------------------------------------------------------------------- /packer_files/nutanix_centos_template/sources.pkr.hcl: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /packer_files/nutanix_centos_template/variables.pkr.hcl: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/consts/durations.py: -------------------------------------------------------------------------------- 1 | MINUTE = 60 2 | HOUR = 60 * MINUTE 3 | -------------------------------------------------------------------------------- /docs/assisted-testing.md: -------------------------------------------------------------------------------- 1 | # Assisted Testing 2 | 3 | To Be Continued -------------------------------------------------------------------------------- /docs/assisted-deployment.md: -------------------------------------------------------------------------------- 1 | # Assisted Deployment 2 | 3 | To Be Continued -------------------------------------------------------------------------------- /.dockerignore: -------------------------------------------------------------------------------- 1 | storage_pool 2 | **/*.iso 3 | build/ 4 | assisted-service/ 5 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/ipxe_controller/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/proxy_controller/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/tang_controller/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.ansible-lint: -------------------------------------------------------------------------------- 1 | --- 2 | skip_list: 3 | - var-naming[no-role-prefix] 4 | - package-latest 5 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_ssh_key_pair/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssh_private_key_name: "id_cluster" 3 | -------------------------------------------------------------------------------- /.ci-operator.yaml: -------------------------------------------------------------------------------- 1 | build_root_image: 2 | namespace: openshift 3 | name: release 4 | tag: golang-1.16 5 | -------------------------------------------------------------------------------- /ansible_files/roles/ofcir_release/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | request_retries: 60 3 | request_delay_seconds: 30 4 | -------------------------------------------------------------------------------- /storage_pool/.gitignore: -------------------------------------------------------------------------------- 1 | # Ignore everything in this directory 2 | * 3 | # Except this file 4 | !.gitignore 5 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_export_connection_details/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | primary_device_group_name: "primary" 3 | -------------------------------------------------------------------------------- /src/cli/commands/__init__.py: -------------------------------------------------------------------------------- 1 | from .commands_factory import CommandFactory 2 | 3 | __all__ = ["CommandFactory"] 4 | -------------------------------------------------------------------------------- /src/tests/global_variables/__init__.py: -------------------------------------------------------------------------------- 1 | from .default_variables import DefaultVariables 2 | 3 | __all__ = ["DefaultVariables"] 4 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_ipip_tunnel/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ipip_device_name: tun0 3 | ipip_connection_name: "{{ ipip_device_name }}" 4 | -------------------------------------------------------------------------------- /ansible_files/roles/ofcir_acquire/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | acquire_timeout_seconds: 70 3 | request_retries: 60 4 | request_delay_seconds: 30 5 | -------------------------------------------------------------------------------- /src/manage/manageable_options.yaml: -------------------------------------------------------------------------------- 1 | deregister_clusters: 2 | method: delete_cluster 3 | measure_field: updated_at 4 | days_back: 20 5 | 6 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_primary_device_connection_details/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | primary_device_group_name: "primary" 3 | -------------------------------------------------------------------------------- /terraform_files/vsphere-ci-machine/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | description = "IP" 3 | value = vsphere_virtual_machine.vm.default_ip_address 4 | } -------------------------------------------------------------------------------- /src/virsh_cleanup/__init__.py: -------------------------------------------------------------------------------- 1 | from .virsh_cleanup import DEFAULT_SKIP_LIST, clean_virsh_resources 2 | 3 | __all__ = ["clean_virsh_resources", "DEFAULT_SKIP_LIST"] 4 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_sftp_share/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ssh_user: root 3 | rclone_config_file: /etc/rclone.conf 4 | rclone_remote_name: share_dir 5 | rclone_type_sftp: sftp 6 | -------------------------------------------------------------------------------- /ansible_files/ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | host_key_checking = False 3 | remote_tmp = /tmp/ansible 4 | verbosity = 2 5 | stdout_callback = yaml 6 | 7 | [ssh_connection] 8 | retries = 10 9 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_libvirtd/handlers/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Restart libvirtd service 3 | ansible.builtin.systemd: 4 | name: libvirtd.service 5 | state: restarted 6 | -------------------------------------------------------------------------------- /docs/warning.md: -------------------------------------------------------------------------------- 1 | # ⚠️ Warning ⚠️ 2 | 3 | This framework modifies system configurations and requires elevated permissions. It is not recommended to run on a personal or production machine. -------------------------------------------------------------------------------- /src/cli/__main__.py: -------------------------------------------------------------------------------- 1 | from cli.application import CliApplication 2 | 3 | if __name__ == "__main__" or __name__ == "src.cli.__main__": 4 | cli = CliApplication() 5 | cli.run() 6 | -------------------------------------------------------------------------------- /terraform_files/nutanix-ci-machine/output.tf: -------------------------------------------------------------------------------- 1 | output "ip_address" { 2 | description = "IP" 3 | value = lookup(nutanix_virtual_machine.vm.nic_list.0.ip_endpoint_list[0], "ip") 4 | } 5 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_day2_configuration/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | subnet_length_ipv4: 24 3 | subnet_length_ipv6: 64 4 | network_if: tt0 5 | secondary_network_if: stt1 6 | -------------------------------------------------------------------------------- /src/triggers/__init__.py: -------------------------------------------------------------------------------- 1 | from .default_triggers import get_default_triggers 2 | from .env_trigger import Trigger 3 | from .olm_operators_trigger import OlmOperatorsTrigger 4 | 5 | __all__ = ["Trigger", "get_default_triggers", "OlmOperatorsTrigger"] 6 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_libvirtd/files/allow-cross-network-traffic.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env sh 2 | ( 3 | flock 3 4 | iptables -S | grep -E "LIBVIRT_FW[IO] .* REJECT" | sed -e 's/^-A/iptables -D/g' -e 's/$/ || true/g' | sh 5 | ) 3>/tmp/iptables.lock 6 | -------------------------------------------------------------------------------- /OWNERS: -------------------------------------------------------------------------------- 1 | # See the OWNERS docs: https://git.k8s.io/community/contributors/guide/owners.md 2 | 3 | approvers: 4 | - tsorya 5 | - eliorerz 6 | - eranco74 7 | - omertuc 8 | - adriengentil 9 | - danmanor 10 | - rccrdpccl 11 | - avishayt 12 | - yoavsc0302 13 | -------------------------------------------------------------------------------- /src/assisted_test_infra/download_logs/__init__.py: -------------------------------------------------------------------------------- 1 | from .download_logs import collect_debug_info_from_cluster, download_must_gather, gather_sosreport_data 2 | 3 | __all__ = ["download_must_gather", "gather_sosreport_data", "collect_debug_info_from_cluster"] 4 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.pyright] 2 | extraPaths = [ "src" ] 3 | 4 | [tool.black] 5 | line-length = 120 6 | target-version = ['py36'] 7 | include = '\.pyi?$' 8 | 9 | [tool.isort] 10 | multi_line_output = 3 11 | line_length = 120 12 | include_trailing_comma = true 13 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/tools/__init__.py: -------------------------------------------------------------------------------- 1 | from .assets import LibvirtNetworkAssets 2 | from .concurrently import run_concurrently 3 | from .terraform_utils import TerraformUtils 4 | 5 | __all__ = ["TerraformUtils", "run_concurrently", "LibvirtNetworkAssets"] 6 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/kube_helpers/idict.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | 4 | class IDict(ABC): 5 | def __repr__(self): 6 | return str(self.as_dict()) 7 | 8 | @abstractmethod 9 | def as_dict(self) -> dict: 10 | pass 11 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_primary_device_connection_details/templates/ci-machine-config.sh.j2: -------------------------------------------------------------------------------- 1 | export IP="{{ hostvars[groups[primary_device_group_name][0]].access_public_ipv4 }}" 2 | export SSH_KEY_FILE="{{ hostvars[groups[primary_device_group_name][0]].ansible_ssh_private_key_file }}" 3 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_export_connection_details/templates/ci-machine-config.sh.j2: -------------------------------------------------------------------------------- 1 | source "${SHARED_DIR}/fix-uid.sh" 2 | 3 | export IP="{{ hostvars[groups[primary_device_group_name][0]].ansible_host }}" 4 | export SSH_KEY_FILE="{{ hostvars[groups[primary_device_group_name][0]].ansible_ssh_private_key_file }}" 5 | -------------------------------------------------------------------------------- /packer_files/vsphere_centos_template/build.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | vsphere = { 4 | source = "github.com/hashicorp/vsphere" 5 | version = "= 1.3.0" 6 | } 7 | } 8 | } 9 | 10 | 11 | build { 12 | sources = ["sources.vsphere-iso.test-infra-template"] 13 | } 14 | -------------------------------------------------------------------------------- /src/service_client/__init__.py: -------------------------------------------------------------------------------- 1 | from .assisted_service_api import InventoryClient, ServiceAccount 2 | from .client_factory import ClientFactory 3 | from .logger import SuppressAndLog, add_log_record, log 4 | 5 | __all__ = ["InventoryClient", "ClientFactory", "log", "add_log_record", "SuppressAndLog", "ServiceAccount"] 6 | -------------------------------------------------------------------------------- /terraform_files/oci-ci-machine/output.tf: -------------------------------------------------------------------------------- 1 | output "ci_machine_inventory" { 2 | value = { 3 | "public_ip" : oci_core_instance.ci_instance.public_ip, 4 | "display_name" : oci_core_instance.ci_instance.display_name, 5 | "ssh_private_key_path" : var.private_ssh_key_path, 6 | "user" : "root", 7 | } 8 | } 9 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/ipxe_controller/server/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM quay.io/assisted-installer-ops/base-python:3.12 2 | 3 | ARG SERVER_IP 4 | ARG SERVER_PORT 5 | ENV SERVER_IP=${SERVER_IP} 6 | ENV SERVER_PORT=${SERVER_PORT} 7 | 8 | COPY . . 9 | 10 | CMD [ "python", "./local_ipxe_server.py" ] 11 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_primary_device_connection_details/files/packet-conf.sh: -------------------------------------------------------------------------------- 1 | source "${SHARED_DIR}/ci-machine-config.sh" 2 | 3 | export SSHOPTS=(-o 'ConnectTimeout=5' -o 'StrictHostKeyChecking=no' -o 'UserKnownHostsFile=/dev/null' -o 'ServerAliveInterval=90' -o LogLevel=ERROR -i "${SSH_KEY_FILE}") 4 | -------------------------------------------------------------------------------- /src/assisted_test_infra/resources/bootstrap_in_place/sno-worker-install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -euxo pipefail 4 | 5 | if coreos-installer install --ignition=/root/config.ign ${INSTALL_DEVICE}; then 6 | echo "Worker OS installation succeeded!" 7 | else 8 | echo "Worker OS installation failed!" 9 | exit 1 10 | fi 11 | 12 | reboot 13 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_export_connection_details/files/packet-conf.sh: -------------------------------------------------------------------------------- 1 | source "${SHARED_DIR}/fix-uid.sh" 2 | source "${SHARED_DIR}/ci-machine-config.sh" 3 | 4 | export SSHOPTS=(-o 'ConnectTimeout=5' -o 'StrictHostKeyChecking=no' -o 'UserKnownHostsFile=/dev/null' -o 'ServerAliveInterval=90' -o LogLevel=ERROR -o 'ConnectionAttempts=10' -i "${SSH_KEY_FILE}") 5 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_day2_configuration/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Export day2 configuration in assisted-additional-config to shared_dir 3 | ansible.builtin.template: 4 | src: "assisted-additional-config.j2" 5 | dest: "{{ shared_dir }}/assisted-additional-config" 6 | mode: "0644" 7 | when: shared_dir is defined 8 | -------------------------------------------------------------------------------- /src/tests/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | kube_api: mark test as part of kube api tests. 4 | override_controller_configuration: mark for overriding the controller configuration fixture 5 | override_cluster_configuration: mark for overriding the cluster configuration fixture 6 | override_infra_env_configuration: mark for overriding the cluster infra_env fixture 7 | -------------------------------------------------------------------------------- /ansible_files/vars/ci.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # CI related vars 3 | build_id: "{{ lookup('env', 'BUILD_ID') }}" 4 | cluster_profile_dir: "{{ lookup('env', 'CLUSTER_PROFILE_DIR') }}" 5 | job_name_hash: "{{ lookup('env', 'JOB_NAME_HASH') }}" 6 | ci_namespace: "{{ lookup('env', 'NAMESPACE') }}" 7 | openshift_ci: "{{ lookup('env', 'OPENSHIFT_CI') | bool }}" 8 | shared_dir: "{{ lookup('env', 'SHARED_DIR') }}" 9 | -------------------------------------------------------------------------------- /ansible_files/vars/standalone_ofcir_heterogeneous_infra_sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ofcir_url: "https://127.0.0.1:8443/v1/ofcir" 3 | ofcir_token: "token" 4 | primary_cir_identifier: "primary" 5 | secondary_cir_identifier: "secondary" 6 | primary_cir_type: "assisted_medium_el9" 7 | secondary_cir_type: "assisted_arm64_el9" 8 | shared_dir: /tmp/test 9 | private_key_path: ~/Documents/auth/packet.pem 10 | -------------------------------------------------------------------------------- /packer_files/vsphere_centos_template/vars.json: -------------------------------------------------------------------------------- 1 | { 2 | "vsphere_username": "placeholder", 3 | "vsphere_password": "placeholder", 4 | "vsphere_cluster": "placeholder", 5 | "vsphere_datacenter": "placeholder", 6 | "vsphere_datastore": "placeholder", 7 | "vsphere_network": "placeholder", 8 | "vsphere_server": "placeholder", 9 | "vm_name": "packer-test" 10 | } 11 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | build 2 | assisted-service 3 | .idea 4 | .vscode 5 | reports/ 6 | *__pycache__* 7 | minikube 8 | *.log 9 | **/RCS/** 10 | *~ 11 | .env 12 | capi 13 | sno-bootstrap-manifests 14 | .pip/ 15 | **/UNKNOWN.egg-info 16 | # terraform 17 | .terraform.lock.hcl 18 | terraform.tfstate 19 | terraform.tfstate.backup 20 | .terraform 21 | **/cloud-config.yaml 22 | custom_manifests/ 23 | venv 24 | 25 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_setup_for_test_infra/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | oci_compute_shape: "VM.Standard.E5.Flex" 3 | oci_control_plane_shape: "VM.Standard.E5.Flex" 4 | 5 | oci_infrastructure_zip_url: "https://github.com/oracle-quickstart/oci-openshift/releases/download/v1.0.0/create-cluster-v1.0.0.zip" 6 | oci_infrastructure_zip_file: "{{ hostvars[ci_machine].ansible_user_dir }}/oci/infrastructure.zip" 7 | -------------------------------------------------------------------------------- /packer_files/nutanix_centos_template/build.pkr.hcl: -------------------------------------------------------------------------------- 1 | packer { 2 | required_plugins { 3 | nutanix = { 4 | version = "0.9.0" 5 | source = "github.com/nutanix-cloud-native/nutanix" 6 | } 7 | } 8 | } 9 | 10 | # Dummy module need to be removed 11 | # need a CI PR before removing this 12 | source "null" "test" { 13 | communicator = "none" 14 | } 15 | 16 | build { 17 | sources = ["null.test"] 18 | } -------------------------------------------------------------------------------- /packer_files/nutanix_centos_template/vars.json: -------------------------------------------------------------------------------- 1 | { 2 | "nutanix_username": "placeholder", 3 | "nutanix_password": "placeholder", 4 | "nutanix_insecure": "true", 5 | "nutanix_endpoint": "placeholder", 6 | "nutanix_port": "placeholder", 7 | "nutanix_cluster": "placeholder", 8 | "nutanix_subnet": "placeholder", 9 | "centos_iso_image_name": "placeholder", 10 | "image_name": "assisted-test-infra-machine" 11 | } 12 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_redfish_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from typing import List 4 | 5 | from assisted_test_infra.test_infra.helper_classes.config.base_nodes_config import BaseNodesConfig 6 | 7 | 8 | @dataclass 9 | class BaseRedfishConfig(BaseNodesConfig, ABC): 10 | redfish_user: str = None 11 | redfish_password: str = None 12 | redfish_machines: List[str] = None 13 | redfish_enabled: bool = False 14 | -------------------------------------------------------------------------------- /src/cleanup.py: -------------------------------------------------------------------------------- 1 | import pathlib 2 | import shutil 3 | 4 | REMOVE_FILES = ["*.py[co]", "test_infra.log"] 5 | REMOVE_FOLDERS = ["__pycache__", "UNKNOWN.egg-info", "build", "reports", ".mypy_cache"] 6 | 7 | 8 | for file in REMOVE_FILES: 9 | for p in pathlib.Path(".").rglob(file): 10 | print(f"Removing file {p}") 11 | p.unlink() 12 | 13 | for folder in REMOVE_FOLDERS: 14 | for p in pathlib.Path(".").rglob(folder): 15 | print(f"Removing dir {p}") 16 | shutil.rmtree(p) 17 | -------------------------------------------------------------------------------- /src/tests/test_day2.py: -------------------------------------------------------------------------------- 1 | from junit_report import JunitTestSuite 2 | 3 | from tests.base_test import BaseTest 4 | 5 | 6 | class TestDay2(BaseTest): 7 | # Install day1 cluster and deploy day2 nodes (cloud flow). 8 | # Or, deploy day2 nodes on an installed cluster if CLUSTER_ID env var is specified. 9 | @JunitTestSuite() 10 | def test_deploy_day2_nodes_cloud(self, day2_cluster): 11 | day2_cluster.prepare_for_installation() 12 | day2_cluster.start_install_and_wait_for_installed() 13 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_create_infra/templates/terraform.tfvars.j2: -------------------------------------------------------------------------------- 1 | unique_id = "{{ unique_id }}" 2 | private_ssh_key_path = "{{ private_ssh_key_path }}" 3 | public_ssh_key_path = "{{ public_ssh_key_path }}" 4 | oci_tenancy_id = "{{ oci_tenancy_id }}" 5 | oci_user_id = "{{ oci_user_id }}" 6 | oci_fingerprint = "{{ oci_fingerprint }}" 7 | oci_region = "{{ oci_region }}" 8 | oci_private_key_path = "{{ oci_private_key_path }}" 9 | oci_compartment_id = "{{ oci_compartment_id }}" 10 | -------------------------------------------------------------------------------- /src/tests/templates/day2_baremetal.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: metal3.io/v1alpha1 3 | kind: BareMetalHost 4 | metadata: 5 | name: {{ master_host_name }} 6 | namespace: openshift-machine-api 7 | annotations: 8 | spec: 9 | automatedCleaningMode: metadata 10 | bootMACAddress: 00:00:00:00:00:02 11 | bootMode: UEFI 12 | customDeploy: 13 | method: install_coreos 14 | externallyProvisioned: true 15 | online: true 16 | userData: 17 | name: master-user-data-managed 18 | namespace: openshift-machine-api 19 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_release_cir/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Define CI resource file path 3 | ansible.builtin.set_fact: 4 | cir_file_path: "{{ shared_dir }}/cir_{{ cir_type }}_{{ cir_identifier }}.json" 5 | 6 | - name: Parse CI resource file 7 | ansible.builtin.set_fact: 8 | ci_resource: "{{ lookup('file', cir_file_path) | from_json }}" 9 | 10 | - name: Release CI resource 11 | ansible.builtin.include_role: 12 | name: ofcir_release 13 | vars: 14 | cir_name: "{{ ci_resource.name }}" 15 | -------------------------------------------------------------------------------- /scripts/kexec/install-cluster.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh -e 2 | 3 | if [[ -z $ISO_URL ]]; then 4 | echo "usage: ISO_URL=https://assisted/cluster/discovery/image $(basename $0)" 5 | exit 1 6 | fi 7 | 8 | SCRIPT='coreos-redeploy.sh' 9 | SSH_OPTS='-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10' 10 | HOSTS=$* 11 | 12 | for host in $HOSTS; do 13 | echo starting update of $host 14 | scp $SSH_OPTS $SCRIPT core@$host:/tmp && 15 | ssh -fn $SSH_OPTS core@$host sudo /tmp/$SCRIPT $ISO_URL & 16 | done 17 | -------------------------------------------------------------------------------- /src/tests/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .global_configs import ( 2 | ClusterConfig, 3 | Day2ClusterConfig, 4 | InfraEnvConfig, 5 | OciConfig, 6 | TerraformConfig, 7 | VSphereConfig, 8 | global_variables, 9 | reset_global_variables, 10 | ) 11 | 12 | __all__ = [ 13 | "ClusterConfig", 14 | "InfraEnvConfig", 15 | "InfraEnvConfig", 16 | "TerraformConfig", 17 | "Day2ClusterConfig", 18 | "VSphereConfig", 19 | "OciConfig", 20 | "global_variables", 21 | "reset_global_variables", 22 | ] 23 | -------------------------------------------------------------------------------- /terraform_files/oci-ci-machine/00_main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | oci = { 4 | source = "oracle/oci" 5 | version = "6.23.0" 6 | } 7 | cloudinit = { 8 | source = "hashicorp/cloudinit" 9 | version = "2.3.2" 10 | } 11 | } 12 | } 13 | 14 | provider "oci" { 15 | tenancy_ocid = var.oci_tenancy_id 16 | user_ocid = var.oci_user_id 17 | fingerprint = var.oci_fingerprint 18 | private_key_path = var.oci_private_key_path 19 | region = var.oci_region 20 | } 21 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/network_utils.py: -------------------------------------------------------------------------------- 1 | import ipaddress 2 | from typing import List 3 | 4 | 5 | def get_cidr_by_interface(interface: str) -> str: 6 | return str(ipaddress.ip_interface(interface).network) 7 | 8 | 9 | def any_interface_in_cidr(interfaces: List[str], cidr: str) -> bool: 10 | network = ipaddress.ip_network(cidr) 11 | return any(ipaddress.ip_interface(ifc).ip in network for ifc in interfaces) 12 | 13 | 14 | def get_ip_from_interface(interface: str) -> str: 15 | return str(ipaddress.ip_interface(interface).ip) 16 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_export_connection_details/templates/inventory.j2: -------------------------------------------------------------------------------- 1 | {# 2 | The CI expects the node that runs test-infra to be in the "primary" group 3 | Other nodes are ungrouped, and will be targeted when the playbook runs on "all" nodes 4 | -#} 5 | 6 | {% for hostname in groups["all"] %} 7 | {{- hostname }} ansible_host={{ hostvars[hostname]["ansible_host"] }} ansible_user={{ hostvars[hostname]["ansible_user"] }} ansible_ssh_private_key_file={{ hostvars[hostname]["ansible_ssh_private_key_file"] }} 8 | {% endfor %} 9 | 10 | [primary] 11 | {{ groups["primary"] | first }} 12 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_ssh_key_pair/templates/config.j2: -------------------------------------------------------------------------------- 1 | {% for host in ansible_play_hosts %} 2 | host {{ hostvars[host].inventory_hostname }} 3 | {# take first private IP available, default on public IP if no private IP -#} 4 | {% set private_ips = hostvars[host].ansible_all_ipv4_addresses | sort | ansible.utils.ipaddr('private') -%} 5 | Hostname {{ private_ips | first | default(hostvars[host].ansible_all_ipv4_addresses[0]) }} 6 | User {{ hostvars[host].ansible_user_id }} 7 | IdentityFile {{ ssh_private_key_path }} 8 | StrictHostKeyChecking no 9 | 10 | {% endfor %} 11 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/proxy_controller/templates/squid.conf.j2: -------------------------------------------------------------------------------- 1 | {% if authenticated %} 2 | auth_param basic program /usr/lib/squid/basic_ncsa_auth /etc/squid/squid-users 3 | acl all_auth proxy_auth REQUIRED 4 | http_access allow all_auth 5 | {% else %} 6 | acl all src 0.0.0.0/0 7 | acl all src ::/0 8 | {% if denied_port %} 9 | acl denied_ports port {{ denied_port }} 10 | http_access deny denied_ports 11 | {% endif %} 12 | http_access allow all 13 | {% endif %} 14 | http_port {{ port }} 15 | cache deny all 16 | debug_options ALL,1 33,2 28,9F 17 | coredump_dir /var/spool/squid -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/node_controllers/disk.py: -------------------------------------------------------------------------------- 1 | from dataclasses import dataclass 2 | from enum import Enum 3 | 4 | 5 | class DiskSourceType(Enum): 6 | FILE = 1 7 | NETWORK = 2 8 | BLOCK = 3 9 | DIR = 4 10 | VOLUME = 5 11 | NVME = 6 12 | OTHER = 7 13 | 14 | 15 | @dataclass 16 | class Disk: 17 | type: str 18 | alias: str 19 | wwn: str 20 | bus: str 21 | target: str 22 | source_type: str 23 | source_path: str 24 | source_pool: str 25 | source_volume: str 26 | 27 | def __str__(self): 28 | return self.target 29 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/ipxe_controller/server/local_ipxe_server.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import os 4 | from http.server import CGIHTTPRequestHandler, HTTPServer 5 | 6 | ip = os.getenv("SERVER_IP", "192.168.122.1") 7 | port = int(os.getenv("SERVER_PORT", 8500)) 8 | 9 | # Make sure the server is hosting the iPXE scripts directory 10 | dir = f"{os.getcwd()}/ipxe_scripts" 11 | os.chdir(dir) 12 | 13 | # Create server object 14 | server_object = HTTPServer(server_address=(ip, port), RequestHandlerClass=CGIHTTPRequestHandler) 15 | # Start the web server 16 | server_object.serve_forever() 17 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_primary_device_connection_details/templates/inventory.j2: -------------------------------------------------------------------------------- 1 | {# 2 | The CI expects the node that runs test-infra to be in the "primary" group 3 | Other nodes are ungrouped, and will be targeted when the playbook runs on "all" nodes 4 | -#} 5 | 6 | {% for hostname in groups["all"] %} 7 | {{- hostname }} ansible_host={{ hostvars[hostname]["ansible_host"] }} ansible_user={{ hostvars[hostname]["ansible_user"] }} ansible_ssh_private_key_file={{ hostvars[hostname]["ansible_ssh_private_key_file"] }} 8 | {% endfor %} 9 | 10 | [primary] 11 | {{ groups["primary"] | first }} 12 | -------------------------------------------------------------------------------- /ansible_files/vars/ci_ofcir_heterogeneous_infrastructure.yml: -------------------------------------------------------------------------------- 1 | --- 2 | ofcir_url: "{{ lookup('env', 'OFCIR_URL') | default('https://ofcir-service.ofcir-system.svc.cluster.local/v1/ofcir', true) }}" 3 | primary_cir_type: "{{ lookup('env', 'PRIMARY_CIR_TYPE') | default('assisted_medium_el9', true) }}" 4 | secondary_cir_type: "{{ lookup('env', 'SECONDARY_CIR_TYPE') | default('assisted_arm64_el9', true) }}" 5 | ofcir_token: "{{ lookup('file', cluster_profile_dir + '/ofcir-auth-token') | trim }}" 6 | private_key_path: "{{ cluster_profile_dir }}/packet-ssh-key" 7 | primary_cir_identifier: "primary" 8 | secondary_cir_identifier: "secondary" 9 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_destroy_infra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Restore Terraform state file from {{ oci_tf_state_file }}" 3 | ansible.builtin.copy: 4 | src: "{{ oci_tf_state_file }}" 5 | dest: "{{ oci_terraform_workdir }}/terraform.tfstate" 6 | force: true 7 | mode: "0644" 8 | 9 | - name: "Destroy terraform infrastructure" 10 | community.general.terraform: 11 | project_path: "{{ oci_terraform_workdir }}" 12 | state: absent 13 | force_init: false 14 | state_file: "{{ oci_tf_state_file }}" 15 | variables_files: 16 | - "{{ oci_tf_vars_file }}" 17 | register: destroyed_tf 18 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_cleanup_resources/defaults/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # these type are cleaned up by removing their parent resource 3 | # we exclude them from the removal to avoid conflict type of errors 4 | excluded_types: 5 | - oci_core_network_security_group_security_rule 6 | - oci_core_private_ip 7 | - oci_core_public_ip 8 | - oci_load_balancer_backend 9 | - oci_load_balancer_backend_set 10 | - oci_load_balancer_listener 11 | - oci_network_load_balancer_backend 12 | - oci_network_load_balancer_backend_set 13 | - oci_network_load_balancer_listener 14 | - oci_objectstorage_preauthrequest 15 | expired_after_hours: 7 16 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_vsphere_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | 4 | from assisted_test_infra.test_infra.helper_classes.config.base_nodes_config import BaseNodesConfig 5 | 6 | 7 | @dataclass 8 | class BaseVSphereConfig(BaseNodesConfig, ABC): 9 | vsphere_server: str = None 10 | vsphere_username: str = None 11 | vsphere_password: str = None 12 | vsphere_cluster: str = None 13 | vsphere_datacenter: str = None 14 | vsphere_datastore: str = None 15 | vsphere_network: str = None 16 | vsphere_parent_folder: str = None 17 | vsphere_folder: str = None 18 | -------------------------------------------------------------------------------- /src/consts/kube_api.py: -------------------------------------------------------------------------------- 1 | from .durations import HOUR, MINUTE 2 | 3 | CRD_API_GROUP = "agent-install.openshift.io" 4 | CRD_API_VERSION = "v1beta1" 5 | 6 | CRD_AGENT_INSTALL_GROUP = "extensions.hive.openshift.io" 7 | CRD_AGENT_INSTALL_VERSION = "v1beta1" 8 | 9 | HIVE_API_GROUP = "hive.openshift.io" 10 | HIVE_API_VERSION = "v1" 11 | 12 | 13 | DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT = 5 * MINUTE 14 | DEFAULT_WAIT_FOR_CRD_STATE_TIMEOUT = 5 * MINUTE 15 | DEFAULT_WAIT_FOR_AGENTS_TIMEOUT = 5 * MINUTE 16 | DEFAULT_WAIT_FOR_INSTALLATION_COMPLETE_TIMEOUT = 2 * HOUR 17 | DEFAULT_WAIT_FOR_ISO_URL_TIMEOUT = 5 * MINUTE 18 | DEFAULT_WAIT_FOR_KUBECONFIG_TIMEOUT = 5 * MINUTE 19 | -------------------------------------------------------------------------------- /docs/build-image.md: -------------------------------------------------------------------------------- 1 | # Build Image 2 | 3 | This project uses `skipper` for executing some of its operations inside containers. It is configured to work with an already built image `assisted-test-infra:latest`. To build the image, run: 4 | 5 | ```bash 6 | make image_build 7 | ``` 8 | 9 | the building of the image is automatically executed during the [setup](./setup.md). 10 | 11 | ## Adjusting the python client 12 | 13 | For building this image, a valid [python client package](./assisted-service-client.md) is required. For building the image with specific client, see [these](./assisted-service-client.md#rebuilding-the-client-with-the-build-image) instructions. -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_nutanix_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | 4 | from assisted_test_infra.test_infra.helper_classes.config.base_cluster_config import BaseClusterConfig 5 | from assisted_test_infra.test_infra.helper_classes.config.base_nodes_config import BaseNodesConfig 6 | 7 | 8 | @dataclass 9 | class BaseNutanixConfig(BaseNodesConfig, BaseClusterConfig, ABC): 10 | nutanix_username: str = None 11 | nutanix_password: str = None 12 | nutanix_endpoint: str = None 13 | nutanix_port: int = None 14 | nutanix_cluster: str = None 15 | nutanix_subnet: str = None 16 | -------------------------------------------------------------------------------- /docs/overview.md: -------------------------------------------------------------------------------- 1 | # Test-Infra 2 | 3 | The `assisted-test-infra` project provides a comprehensive framework for testing the OpenShift Assisted Installer in a simulated bare-metal environment. It uses libvirt-based virtual machines to emulate physical hosts, enabling realistic end-to-end testing workflows for OpenShift cluster installation. 4 | 5 | This project is primarily used for development, CI, and QE purposes. It includes Makefile targets and utility scripts to automate deployment, testing, and cleanup tasks. 6 | 7 | The framework is built around `pytest`: execution flows are structured as tests, and the various operations needed to reach those test goals are implemented as fixtures. -------------------------------------------------------------------------------- /ansible_files/roles/setup_ipip_tunnel/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Enable IP forwarding 3 | ansible.posix.sysctl: 4 | name: net.ipv4.ip_forward 5 | value: "1" 6 | sysctl_set: true 7 | 8 | - name: Configure IPIP tunnel 9 | community.general.nmcli: 10 | type: ipip 11 | conn_name: "{{ ipip_connection_name }}" 12 | ifname: "{{ ipip_device_name }}" 13 | ip_tunnel_local: "{{ ipip_local_ipv4 }}" 14 | ip_tunnel_remote: "{{ ipip_remote_ipv4 }}" 15 | ip4: "{{ ipip_tunnel_ipv4 }}" 16 | routes4: "{{ ipip_route_to_network_ipv4 }} {{ ipip_tunnel_ipv4 | ansible.utils.ipaddr('peer') }}" 17 | autoconnect: true 18 | zone: "trusted" 19 | state: present 20 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_sftp_share/templates/rclone.service.j2: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=rclone: Remote FUSE filesystem for cloud storage config {{ rclone_config_file }} 3 | Documentation=man:rclone(1) 4 | After=network-online.target 5 | Wants=network-online.target 6 | 7 | [Service] 8 | Type=notify 9 | ExecStart=rclone mount \ 10 | --config {{ rclone_config_file }} \ 11 | --allow-other \ 12 | --log-level INFO \ 13 | --stats 1m \ 14 | --stats-one-line \ 15 | {{ rclone_remote_name }}:{{ shared_directory }} {{ shared_directory }} 16 | ExecStop=fusermount -u {{ shared_directory }} 17 | 18 | [Install] 19 | WantedBy=default.target 20 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | fail_fast: true 2 | repos: 3 | - repo: meta 4 | hooks: 5 | - id: check-useless-excludes 6 | - repo: https://github.com/pycqa/isort 7 | rev: 5.8.0 8 | hooks: 9 | - id: isort 10 | name: isort (python) 11 | args: [--profile=black, --line-length=120] 12 | - repo: https://github.com/python/black 13 | rev: 21.9b0 14 | hooks: 15 | - id: black 16 | args: [--line-length=120] 17 | - repo: https://gitlab.com/pycqa/flake8 18 | rev: 4.0.1 19 | hooks: 20 | - id: flake8 21 | additional_dependencies: [ flake8-bugbear, flake8-eradicate, pep8-naming, flake8-use-fstring, flake8-colors ] 22 | args: [--statistics] 23 | -------------------------------------------------------------------------------- /src/consts/resources.py: -------------------------------------------------------------------------------- 1 | from consts import GB, MiB_UNITS 2 | 3 | DEFAULT_WORKER_MEMORY: int = 8 * MiB_UNITS 4 | DEFAULT_ARBITER_MEMORY: int = 8 * MiB_UNITS 5 | DEFAULT_MASTER_MEMORY: int = 16 * MiB_UNITS 6 | DEFAULT_MTU: int = 1500 7 | DEFAULT_WORKER_DISK_GB: int = 20 8 | DEFAULT_ARBITER_DISK_GB: int = 20 9 | DEFAULT_MASTER_DISK_GB: int = 100 10 | DEFAULT_WORKER_DISK: int = DEFAULT_WORKER_DISK_GB * GB 11 | DEFAULT_ARBITER_DISK: int = DEFAULT_ARBITER_DISK_GB * GB 12 | DEFAULT_MASTER_DISK: int = DEFAULT_MASTER_DISK_GB * GB 13 | DEFAULT_DISK_COUNT: int = 1 14 | DEFAULT_WORKER_CPU: int = 2 15 | DEFAULT_ARBITER_CPU: int = 2 16 | DEFAULT_MASTER_CPU: int = 4 17 | DEFAULT_MASTER_SNO_MEMORY: int = 16 * MiB_UNITS 18 | DEFAULT_MASTER_SNO_CPU: int = 8 19 | -------------------------------------------------------------------------------- /src/assisted_test_infra/resources/bootstrap_in_place/install-config.yaml: -------------------------------------------------------------------------------- 1 | apiVersion: v1 2 | baseDomain: redhat.com 3 | compute: 4 | - architecture: amd64 5 | hyperthreading: Enabled 6 | name: worker 7 | platform: {} 8 | replicas: 0 9 | controlPlane: 10 | architecture: amd64 11 | hyperthreading: Enabled 12 | name: master 13 | platform: {} 14 | replicas: 1 15 | metadata: 16 | creationTimestamp: null 17 | name: test-infra-cluster 18 | networking: 19 | clusterNetwork: 20 | - cidr: 10.128.0.0/14 21 | hostPrefix: 23 22 | machineNetwork: 23 | - cidr: 192.168.126.0/24 24 | networkType: OVNKubernetes 25 | serviceNetwork: 26 | - 172.30.0.0/16 27 | platform: 28 | none: {} 29 | publish: External 30 | pullSecret: '{}' 31 | sshKey: "" 32 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_libvirtd/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install libvirtd 3 | ansible.builtin.package: 4 | name: 5 | - libvirt-client 6 | - libvirt-daemon-kvm 7 | state: present 8 | notify: 9 | - Restart libvirtd service 10 | 11 | - name: Create libvirt network hook directory 12 | ansible.builtin.file: 13 | path: /etc/libvirt/hooks/network.d 14 | state: directory 15 | mode: "0755" 16 | 17 | - name: Install libvirt network hook to allow cross network traffic 18 | ansible.builtin.copy: 19 | src: allow-cross-network-traffic.sh 20 | dest: /etc/libvirt/hooks/network.d/allow-cross-network-traffic.sh 21 | owner: root 22 | group: root 23 | mode: "0755" 24 | notify: 25 | - Restart libvirtd service 26 | -------------------------------------------------------------------------------- /ansible_files/roles/ofcir_release/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Release resource {{ cir_name }}" 3 | ansible.builtin.uri: 4 | url: "{{ ofcir_url }}/{{ cir_name }}" 5 | method: DELETE 6 | headers: 7 | "X-OFCIRTOKEN": "{{ ofcir_token }}" 8 | validate_certs: false 9 | timeout: "{{ release_timeout_seconds | default(30) }}" 10 | register: release_response 11 | until: release_response.status in [200, 204] 12 | retries: "{{ request_retries }}" 13 | delay: "{{ request_delay_seconds }}" 14 | failed_when: release_response.status == -1 or release_response.status >= 400 15 | 16 | - name: "Confirm resource release" 17 | ansible.builtin.debug: 18 | msg: "Successfully released CI resource {{ cir_name }}." 19 | when: release_response.status in [200, 204] 20 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_primary_device_connection_details/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Export connection details for the primary device 5 | description: 6 | - This role exports connection configuration files for the primary device 7 | - It generates configuration scripts and inventory files for CI machine setup 8 | author: Assisted Test Infrastructure Team 9 | options: 10 | shared_dir: 11 | description: Directory path where configuration files will be exported 12 | type: path 13 | required: true 14 | primary_device_group_name: 15 | description: Name of the Ansible group containing the primary device 16 | type: str 17 | default: "primary" 18 | -------------------------------------------------------------------------------- /terraform_files/nutanix/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | nutanix = { 4 | source = "nutanix/nutanix" 5 | version = "1.9.1" 6 | } 7 | } 8 | } 9 | 10 | provider "nutanix" { 11 | username = var.nutanix_username 12 | password = var.nutanix_password 13 | endpoint = var.nutanix_endpoint 14 | port = var.nutanix_port 15 | insecure = true 16 | wait_timeout = 60 17 | session_auth = false 18 | } 19 | 20 | data "nutanix_cluster" "cluster" { 21 | name = var.nutanix_cluster 22 | } 23 | 24 | data "nutanix_subnet" "subnet" { 25 | subnet_name = var.nutanix_subnet 26 | } 27 | 28 | resource "nutanix_image" "image" { 29 | name = "${var.cluster_name}.iso" 30 | description = "Downloaded ISO" 31 | source_path = var.iso_download_path 32 | } 33 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/__init__.py: -------------------------------------------------------------------------------- 1 | from assisted_test_infra.test_infra import utils 2 | from assisted_test_infra.test_infra.helper_classes.config import ( 3 | BaseClusterConfig, 4 | BaseDay2ClusterConfig, 5 | BaseEntityConfig, 6 | BaseInfraEnvConfig, 7 | BaseNutanixConfig, 8 | BaseRedfishConfig, 9 | BaseTerraformConfig, 10 | BaseVSphereConfig, 11 | ) 12 | from assisted_test_infra.test_infra.utils.entity_name import ClusterName, InfraEnvName 13 | 14 | __all__ = [ 15 | "InfraEnvName", 16 | "ClusterName", 17 | "BaseInfraEnvConfig", 18 | "BaseTerraformConfig", 19 | "BaseClusterConfig", 20 | "BaseDay2ClusterConfig", 21 | "utils", 22 | "BaseEntityConfig", 23 | "BaseNutanixConfig", 24 | "BaseVSphereConfig", 25 | "BaseRedfishConfig", 26 | ] 27 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/exceptions.py: -------------------------------------------------------------------------------- 1 | class InstallationError(Exception): 2 | pass 3 | 4 | 5 | class InstallationFailedError(InstallationError): 6 | DEFAULT_MESSAGE = "All the nodes must be in valid status, but got some in error" 7 | 8 | def __init__(self, message=DEFAULT_MESSAGE, *args: object) -> None: 9 | super().__init__(message, *args) 10 | 11 | 12 | class InstallationPendingActionError(InstallationFailedError): 13 | DEFAULT_MESSAGE = "All the nodes must be in valid status, but got some pending" 14 | 15 | 16 | class ReturnedToReadyAfterInstallationStartsError(InstallationError): 17 | DEFAULT_MESSAGE = "Some nodes returned to ready state after installation was started" 18 | 19 | def __init__(self, message=DEFAULT_MESSAGE, *args: object) -> None: 20 | super().__init__(message, *args) 21 | -------------------------------------------------------------------------------- /scripts/kexec/README.md: -------------------------------------------------------------------------------- 1 | # KEXEC 2 | 3 | This folder contains a script that can use SSH to connect to an existing machines (e.g. where a cluster is already deployed) and boot them into discovery mode. 4 | I Found it useful on bare metal, where virtual media is slow (or doesn't exists) and customizing pxe configuration is a pain. 5 | in theory, this can work on any machine with an OS on existing public clouds (e.g. packet, AWS BM etc) 6 | 7 | ## Usage 8 | 9 | 1. Create a new cluster using the assisted installer 10 | 2. Configure your download ISO and copy its link 11 | 3. On some machine, where you have ssh access to the hosts, execute: 12 | 13 | ```bash 14 | ISO_URL= ./install-cluster.sh hostA hostB hostC ... 15 | ``` 16 | 17 | 4. Machines should appear in the assisted installer UI and you could continue with regular installation. 18 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/node_controllers/__init__.py: -------------------------------------------------------------------------------- 1 | from .adapter_controller import AdapterController 2 | from .disk import Disk 3 | from .kvm_s390x_controller import KVMs390xController 4 | from .libvirt_controller import LibvirtController 5 | from .node import Node 6 | from .node_controller import NodeController 7 | from .oci_api_controller import OciApiController 8 | from .redfish_controller import RedfishController 9 | from .terraform_controller import TerraformController 10 | from .vsphere_controller import VSphereController 11 | 12 | __all__ = [ 13 | "TerraformController", 14 | "NodeController", 15 | "VSphereController", 16 | "Disk", 17 | "Node", 18 | "LibvirtController", 19 | "OciApiController", 20 | "KVMs390xController", 21 | "RedfishController", 22 | "AdapterController", 23 | ] 24 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/__init__.py: -------------------------------------------------------------------------------- 1 | from .env_var import EnvVar 2 | from .k8s_utils import wait_for_pod_ready 3 | from .logs_utils import verify_logs_uploaded 4 | from .terraform_util import TerraformControllerUtil 5 | from .utils import * # TODO - temporary import all old utils 6 | from .utils import ( 7 | are_host_progress_in_stage, 8 | config_etc_hosts, 9 | fetch_url, 10 | get_env, 11 | get_openshift_release_image, 12 | recreate_folder, 13 | run_command, 14 | ) 15 | 16 | __all__ = [ 17 | "verify_logs_uploaded", 18 | "get_env", 19 | "EnvVar", 20 | "are_host_progress_in_stage", 21 | "TerraformControllerUtil", 22 | "get_openshift_release_image", 23 | "recreate_folder", 24 | "fetch_url", 25 | "config_etc_hosts", 26 | "run_command", 27 | "wait_for_pod_ready", 28 | ] 29 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | black==25.9.0 2 | isort==7.0.0 3 | flake8==7.1.2 4 | flake8-bugbear==23.12.2 # Flake8 plugin that help identify likely bugs and design problems 5 | flake8_formatter_junit_xml==0.0.6 # JUnit XML Formatter for flake8. 6 | flake8-colors==0.1.9 # ANSI colors highlight for Flake8 7 | flake8-eradicate==1.5.0 # Flake8 plugin to find commented out (or so called "dead") cod 8 | pep8-naming==0.15.1 # Check code against PEP 8 naming conventions 9 | flake8-use-fstring==1.4 # Jump-start into modern Python by forcing yourself to use f-strings 10 | flake8-black==0.4.0 # Check black formatting using flake8 11 | flake8-isort==7.0.0 # Check isort formatting using flake8 12 | zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability 13 | ansible-lint==25.9.2 14 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_extract_cir_name/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Extract CI resource name from OFCIR response file 5 | description: 6 | - This role extracts the resource name from a previously saved OFCIR response file 7 | - It reads the JSON file and extracts the 'name' field for use in subsequent operations 8 | author: Assisted Test Infrastructure Team 9 | options: 10 | cir_type: 11 | description: Type of CI resource that was acquired 12 | type: str 13 | required: true 14 | cir_identifier: 15 | description: Unique identifier for the CI resource 16 | type: str 17 | required: true 18 | shared_dir: 19 | description: Directory path where CI resource information is stored 20 | type: path 21 | required: true 22 | -------------------------------------------------------------------------------- /scripts/kexec/coreos-redeploy.sh: -------------------------------------------------------------------------------- 1 | #! /bin/sh -e 2 | 3 | if [[ $EUID -ne 0 ]]; then 4 | echo "This script must be run as root" 5 | exit 1 6 | fi 7 | 8 | ISO_URL=$1 9 | MOUNT='mount -o loop,ro image' 10 | KERNEL='images/vmlinuz' 11 | INITRD='images/initramfs.img' 12 | KERNEL_ARG='mitigations=auto,nosmt systemd.unified_cgroup_hierarchy=0 coreos.liveiso=fedora-coreos-31.20200319.dev.1 rd.neednet=1 ip=dhcp ignition.firstboot ignition.platform.id=metal' 13 | KEXEC_PATH='/usr/local/bin' 14 | KEXEC_IMG='quay.io/ohadlevy/kexec' 15 | 16 | podman run --privileged --rm -v $KEXEC_PATH:/hostbin $KEXEC_IMG cp /kexec /hostbin 17 | 18 | TMP=$(mktemp -d) 19 | 20 | cd $TMP 21 | mkdir mnt 22 | curl -O $ISO_URL 23 | $MOUNT mnt && cd mnt 24 | 25 | printf '%s %s\n' "$(date)" "$line" 26 | echo kexecing $(hostname)... rebooting. 27 | 28 | $KEXEC_PATH/kexec --force --initrd=$INITRD --append="$KERNEL_ARG" $KERNEL 29 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_export_connection_details/files/fix-uid.sh: -------------------------------------------------------------------------------- 1 | # Ensure our UID, which is randomly generated, is in /etc/passwd. This is required 2 | # to be able to SSH. 3 | if ! whoami &> /dev/null; then 4 | if [ -x "$(command -v nss_wrapper.pl)" ]; then 5 | grep -v -e ^default -e ^$(id -u) /etc/passwd > "/tmp/passwd" 6 | echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> "/tmp/passwd" 7 | export LD_PRELOAD=libnss_wrapper.so 8 | export NSS_WRAPPER_PASSWD=/tmp/passwd 9 | export NSS_WRAPPER_GROUP=/etc/group 10 | elif [[ -w /etc/passwd ]]; then 11 | echo "${USER_NAME:-default}:x:$(id -u):0:${USER_NAME:-default} user:${HOME}:/sbin/nologin" >> "/etc/passwd" 12 | else 13 | echo "No nss wrapper, /etc/passwd is not writeable, and user matching this uid is not found." 14 | exit 1 15 | fi 16 | fi 17 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_infra_env_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from typing import List 4 | 5 | from .base_entity_config import BaseEntityConfig 6 | 7 | 8 | @dataclass 9 | class BaseInfraEnvConfig(BaseEntityConfig, ABC): 10 | """ 11 | Define all configurations variables that are needed for Cluster during it's execution 12 | All arguments must have default to None with type hint 13 | """ 14 | 15 | infra_env_id: str = None 16 | static_network_config: List[dict] = None 17 | ignition_config_override: str = None 18 | verify_download_iso_ssl: bool = None 19 | is_static_ip: bool = None 20 | kernel_arguments: List[dict[str, str]] = None 21 | host_installer_args: List[dict[str, str]] = None 22 | set_infraenv_version: bool = None 23 | static_ips_vlan: bool = None 24 | vlan_id: int = None 25 | -------------------------------------------------------------------------------- /scripts/pull_dockerfile_images.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | export CONTAINER_COMMAND=${CONTAINER_COMMAND:-podman} 4 | 5 | dockerfiles=$(find . -name "Dockerfile.*" -not -name "*.ocp") 6 | images=$(cat ${dockerfiles} | grep -i FROM | awk '{print $2}') 7 | images=${images//--from=} 8 | 9 | echo "### Attempting to pull assisted dockerfile images (best effort) ###" 10 | echo "Images found: ${images}" 11 | 12 | for image in ${images}; do 13 | if [[ ${image} =~ (.*\/.*:.*) ]]; then 14 | for i in {1..5}; do 15 | echo "Image ${image} - pull attempt ${i}" 16 | ${CONTAINER_COMMAND} pull "${image}" ; rc=$? 17 | if [[ "${rc}" -eq 0 ]]; then 18 | echo "Image pulled successfully" 19 | break 20 | fi 21 | 22 | echo "Failed to pull image ${image}" 23 | if [[ "${i}" -ne 5 ]]; then 24 | echo "Retrying ..." 25 | sleep 5 26 | fi 27 | 28 | done 29 | fi 30 | done 31 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/base_name.py: -------------------------------------------------------------------------------- 1 | import uuid 2 | 3 | import consts 4 | from assisted_test_infra.test_infra import utils 5 | 6 | 7 | def get_name_suffix(length: str = consts.SUFFIX_LENGTH): 8 | return str(uuid.uuid4())[:length] 9 | 10 | 11 | class BaseName: 12 | def __init__(self, env_var: str, default_prefix: str, prefix: str = None, suffix: str = None): 13 | self._default_prefix = default_prefix 14 | self.prefix = prefix if prefix is not None else utils.get_env(env_var, default_prefix) 15 | self.suffix = suffix if suffix is not None else get_name_suffix() 16 | 17 | def __str__(self): 18 | return self.get() 19 | 20 | def __repr__(self): 21 | return self.get() 22 | 23 | def get(self): 24 | name = self.prefix 25 | if self.prefix == self._default_prefix and self.suffix: 26 | name = self.prefix + "-" + self.suffix 27 | return name 28 | -------------------------------------------------------------------------------- /ansible_files/vars/ci_oci_infrastucture.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # OCI related vars, built from CI vars 3 | unique_id: "{{ build_id }}" 4 | oci_tf_state_file: "{{ shared_dir }}/{{ unique_id }}.tfstate" 5 | oci_tf_vars_file: "{{ shared_dir }}/{{ unique_id }}.tfvars" 6 | oci_tenancy_id: "{{ lookup('file', cluster_profile_dir + '/tenancy_ocid') | trim }}" 7 | oci_user_id: "{{ lookup('file', cluster_profile_dir + '/user_ocid') | trim }}" 8 | oci_fingerprint: "{{ lookup('file', cluster_profile_dir + '/fingerprint') | trim }}" 9 | oci_region: "{{ lookup('file', cluster_profile_dir + '/region') | trim }}" 10 | oci_private_key_path: "{{ cluster_profile_dir }}/private_key" 11 | oci_compartment_id: "{{ lookup('file', cluster_profile_dir + '/parent_compartment_ocid') | trim }}" 12 | oci_private_ssh_key_path: "{{ cluster_profile_dir }}/oci-ssh-key" 13 | oci_public_ssh_key_path: "{{ cluster_profile_dir }}/oci-public-ssh-key" 14 | oci_dns_zone: "assisted-ci.oci-rhelcert.edge-sro.rhecoeng.com" 15 | -------------------------------------------------------------------------------- /docs/prerequisites.md: -------------------------------------------------------------------------------- 1 | # Prerequisites 2 | 3 | - CentOS 9 / RHEL 9 / Rocky 9 / AlmaLinux 9 host 4 | - File system that supports d_type 5 | - Ideally on a bare metal host with at least 64G of RAM. 6 | - Run as a user with password-less `sudo` access or be ready to enter `sudo` password for prepare phase. 7 | - Make sure to unset the KUBECONFIG variable in the same shell where you run `make`. 8 | - install git & make binaries: 9 | ```bash 10 | dnf install -y make git 11 | ``` 12 | - Generate ssh keys if missing: 13 | ```bash 14 | ssh-keygen -t rsa -f ~/.ssh/id_rsa -P '' 15 | ``` 16 | - Get a valid pull secret (JSON string) from [redhat.com](https://console.redhat.com/openshift/install/pull-secret) if you want to test the installation (not needed for testing only the discovery flow). Export it as: 17 | ```bash 18 | export PULL_SECRET='' 19 | # or alternatively, define PULL_SECRET_FILE="/path/to/pull/secret/file" 20 | ``` -------------------------------------------------------------------------------- /src/cli/commands/help_command.py: -------------------------------------------------------------------------------- 1 | from prompt_toolkit.completion import NestedCompleter 2 | from prompt_toolkit.shortcuts import message_dialog 3 | from tabulate import tabulate 4 | 5 | from .command import Command 6 | 7 | 8 | class HelpCommand(Command): 9 | HELP_COMMAND = "help" 10 | 11 | @classmethod 12 | def get_completer(cls): 13 | return NestedCompleter.from_nested_dict({cls.HELP_COMMAND: None}) 14 | 15 | def handle(self): 16 | headers = ("", "Key", "Single Press", "Double Press") 17 | keys = [ 18 | ("1", "Control + C", "Clear the text if exist else exit the cli"), 19 | ("2", "Control + Q", "Exit the cli"), 20 | ("3", "Tab", "Enter and navigate the autocomplete menu"), 21 | ("4", "Right", "Step right or autocomplete from history"), 22 | ] 23 | table = tabulate(keys, headers=headers, tablefmt="fancy_grid") 24 | 25 | message_dialog(title="Help", text=str(table)).run() 26 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_extract_cir_name/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Construct CIR file path for type" 3 | ansible.builtin.set_fact: 4 | cir_file_path: "{{ shared_dir }}/cir_{{ cir_type }}_{{ cir_identifier }}.json" 5 | 6 | - name: "Check if CIR file exists" 7 | ansible.builtin.stat: 8 | path: "{{ cir_file_path }}" 9 | register: cir_file_stat 10 | 11 | - name: "Read CIR file and extract resource name" 12 | ansible.builtin.set_fact: 13 | resource_info: "{{ lookup('file', cir_file_path) | from_json }}" 14 | 15 | - name: "Set resource_name fact from file" 16 | ansible.builtin.set_fact: 17 | cir_name: "{{ resource_info.name }}" 18 | when: resource_info.name is defined and resource_info.name != "" 19 | 20 | - name: "Fail if resource name could not be extracted" 21 | ansible.builtin.fail: 22 | msg: "Could not extract resource 'name' from {{ cir_file_path }}. Content: {{ resource_info }}" 23 | when: cir_name is not defined or cir_name == "" 24 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/__init__.py: -------------------------------------------------------------------------------- 1 | from .base_cluster_config import BaseClusterConfig 2 | from .base_config import BaseConfig 3 | from .base_day2_cluster_config import BaseDay2ClusterConfig 4 | from .base_entity_config import BaseEntityConfig 5 | from .base_infra_env_config import BaseInfraEnvConfig 6 | from .base_nodes_config import BaseNodesConfig 7 | from .base_nutanix_config import BaseNutanixConfig 8 | from .base_oci_config import BaseOciConfig 9 | from .base_redfish_config import BaseRedfishConfig 10 | from .base_terraform_config import BaseTerraformConfig 11 | from .base_vsphere_config import BaseVSphereConfig 12 | 13 | __all__ = [ 14 | "BaseClusterConfig", 15 | "BaseDay2ClusterConfig", 16 | "BaseVSphereConfig", 17 | "BaseNutanixConfig", 18 | "BaseOciConfig", 19 | "BaseTerraformConfig", 20 | "BaseInfraEnvConfig", 21 | "BaseEntityConfig", 22 | "BaseNodesConfig", 23 | "BaseConfig", 24 | "BaseRedfishConfig", 25 | ] 26 | -------------------------------------------------------------------------------- /ansible_files/vars/standalone_oci_sample.yml: -------------------------------------------------------------------------------- 1 | --- 2 | # Adapt and use this var file if you want to easily provision OCI infra outside CI environment, e.g.: 3 | # ansible-playbook -e "@vars/standalone_oci_sample.yml" oci_generic_create_ci_machine_playbook.yml 4 | shared_dir: /tmp 5 | unique_id: "{{ lookup('env', 'USER') }}test" 6 | oci_tf_state_file: "{{ shared_dir }}/{{ unique_id }}.tfstate" 7 | oci_tf_vars_file: "{{ shared_dir }}/{{ unique_id }}.tfvars" 8 | oci_tenancy_id: "{{ lookup('env', 'OCI_TENANCY_ID') }}" 9 | oci_user_id: "{{ lookup('env', 'OCI_USER_ID') }}" 10 | oci_fingerprint: "{{ lookup('env', 'OCI_FINGERPRINT') }}" 11 | oci_region: "{{ lookup('env', 'OCI_REGION') }}" 12 | oci_private_key_path: "{{ lookup('env', 'OCI_PRIVATE_KEY_PATH') }}" 13 | oci_compartment_id: "{{ lookup('env', 'OCI_COMPARTMENT_ID') }}" 14 | oci_dns_zone: "{{ lookup('env', 'OCI_DNS_ZONE') }}" 15 | oci_private_ssh_key_path: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa" 16 | oci_public_ssh_key_path: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa.pub" 17 | -------------------------------------------------------------------------------- /.flake8: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore = 3 | # E203 and W503 are rules that conflict with Black 4 | E203, 5 | W503, 6 | 7 | # B024 abstract base class with no abstract methods 8 | B024, 9 | 10 | # FS003 weirdly thinks things are supposed to be f-strings? Seems completely broken 11 | FS003, 12 | 13 | # ignoring cases of methods in abstract class that are left empty (with 'pass') 14 | # linter prefers having it reimplemented in inheriting classes 15 | B027, 16 | 17 | # ignoring suggestion of {str!r} instead of '{str}' 18 | B028, 19 | 20 | max-line-length = 120 21 | max-complexity = 18 22 | 23 | ; Exclude consts and utils __init__ files due to violation of F401 - imported but unused 24 | ; TODO - After removing import * from those init files delete this excluded files 25 | exclude = 26 | venv/ 27 | .venv/ 28 | build/ 29 | .github/ 30 | .idea/ 31 | .code/ 32 | src/consts/__init__.py 33 | src/assisted_test_infra/test_infra/utils/__init__.py 34 | .git 35 | assisted-service/ 36 | -------------------------------------------------------------------------------- /src/tests/templates/day2_machine.yaml.j2: -------------------------------------------------------------------------------- 1 | --- 2 | apiVersion: machine.openshift.io/v1beta1 3 | kind: Machine 4 | metadata: 5 | annotations: 6 | machine.openshift.io/instance-state: externally provisioned 7 | metal3.io/BareMetalHost: openshift-machine-api/{{ master_host_name }} 8 | finalizers: 9 | - machine.machine.openshift.io 10 | generation: 3 11 | labels: 12 | machine.openshift.io/cluster-api-cluster: {{ cluster_name }} 13 | machine.openshift.io/cluster-api-machine-role: master 14 | machine.openshift.io/cluster-api-machine-type: master 15 | name: {{ master_host_name }} 16 | namespace: openshift-machine-api 17 | spec: 18 | metadata: {} 19 | providerSpec: 20 | value: 21 | apiVersion: baremetal.cluster.k8s.io/v1alpha1 22 | customDeploy: 23 | method: install_coreos 24 | hostSelector: {} 25 | image: 26 | checksum: "" 27 | url: "" 28 | kind: BareMetalMachineProviderSpec 29 | metadata: 30 | creationTimestamp: null 31 | userData: 32 | name: master-user-data-managed 33 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_primary_device_connection_details/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if primary group exists and contains exactly 1 host 3 | ansible.builtin.fail: 4 | msg: Group primary must exits and contain exactly 1 host 5 | when: groups[primary_device_group_name] is not defined or (groups[primary_device_group_name] | length != 1) 6 | 7 | - name: Export connection details of primary device 8 | when: shared_dir is defined 9 | block: 10 | - name: "Export connection details of primary device" 11 | ansible.builtin.template: 12 | src: "ci-machine-config.sh.j2" 13 | dest: "{{ shared_dir }}/ci-machine-config.sh" 14 | mode: "0644" 15 | 16 | - name: Write Packet common configuration file 17 | ansible.builtin.copy: 18 | src: packet-conf.sh 19 | dest: "{{ shared_dir }}/packet-conf.sh" 20 | mode: "0644" 21 | 22 | - name: Write Ansible inventory 23 | ansible.builtin.template: 24 | src: inventory.j2 25 | dest: "{{ shared_dir }}/inventory" 26 | mode: "0644" 27 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/entity_name.py: -------------------------------------------------------------------------------- 1 | import consts 2 | from assisted_test_infra.test_infra.utils.base_name import BaseName 3 | 4 | 5 | class ClusterName(BaseName): 6 | def __init__(self, prefix: str = None, suffix: str = None): 7 | super().__init__( 8 | env_var="CLUSTER_NAME", 9 | default_prefix=consts.CLUSTER_PREFIX, 10 | prefix=prefix, 11 | suffix=suffix, 12 | ) 13 | 14 | 15 | class InfraEnvName(BaseName): 16 | def __init__(self, prefix: str = None, suffix: str = None): 17 | super().__init__( 18 | env_var="INFRA_ENV_NAME", 19 | default_prefix=consts.INFRA_ENV_PREFIX, 20 | prefix=prefix, 21 | suffix=suffix, 22 | ) 23 | 24 | 25 | class SpokeClusterNamespace(BaseName): 26 | def __init__(self, prefix: str = None, suffix: str = None): 27 | super().__init__( 28 | env_var="SPOKE_NAMESPACE", 29 | default_prefix=consts.DEFAULT_SPOKE_NAMESPACE, 30 | prefix=prefix, 31 | suffix=suffix, 32 | ) 33 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_release_cir/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Release CI resource by reading from stored file 5 | description: 6 | - This role reads a previously saved OFCIR resource file and releases the resource 7 | - It delegates to the ofcir/release role with the extracted resource name 8 | author: Assisted Test Infrastructure Team 9 | options: 10 | cir_type: 11 | description: Type of CI resource to release 12 | type: str 13 | required: true 14 | cir_identifier: 15 | description: Unique identifier for the CI resource 16 | type: str 17 | required: true 18 | shared_dir: 19 | description: Directory path where CI resource information is stored 20 | type: path 21 | required: true 22 | ofcir_token: 23 | description: Authentication token for OFCIR API 24 | type: str 25 | required: true 26 | no_log: true 27 | ofcir_url: 28 | description: Base URL for the OFCIR API 29 | type: str 30 | required: true 31 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_prepare_inventory/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Add provided instances to inventory 3 | ansible.builtin.add_host: 4 | name: "{{ item.name }}" 5 | groups: "{{ item.groups }}" 6 | ansible_host: "{{ item.ip }}" 7 | ansible_user: "root" 8 | ansible_ssh_private_key_file: "{{ private_key_path }}" 9 | access_public_ipv4: "{{ item.ip }}" 10 | loop: "{{ instances }}" 11 | loop_control: 12 | label: "{{ item.name }}" 13 | 14 | - name: Gather default IP from each instance 15 | ansible.builtin.shell: | 16 | set -o pipefail 17 | ip route get 1 | sed 's/^.*src \([^ ]*\).*$/\1/;q' 18 | delegate_to: "{{ item.name }}" 19 | register: default_ip_results 20 | loop: "{{ instances }}" 21 | loop_control: 22 | label: "{{ item.name }}" 23 | changed_when: false 24 | 25 | - name: Re-add each host with its default IP 26 | ansible.builtin.add_host: 27 | name: "{{ item.item.name }}" 28 | groups: "{{ item.item.groups }}" 29 | access_default_ipv4: "{{ item.stdout }}" 30 | loop: "{{ default_ip_results.results }}" 31 | loop_control: 32 | label: "{{ item.item.name }}" 33 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_setup_for_test_infra/templates/assisted-additional-config.j2: -------------------------------------------------------------------------------- 1 | export CLUSTER_NAME=test-infra-oci-{{ lookup('community.general.random_string', special=false, upper=false) }} 2 | 3 | export PLATFORM=external 4 | export TF_PLATFORM=oci 5 | export EXTERNAL_PLATFORM_NAME=oci 6 | export EXTERNAL_CLOUD_CONTROLLER_MANAGER=External 7 | 8 | export OCI_COMPARTMENT="{{ oci_compartment_id }}" 9 | export OCI_USER="{{ oci_user_id }}" 10 | export OCI_PRIVATE_KEY_PATH="{{ oci_private_key_path_ci_machine }}" 11 | export OCI_PUBLIC_KEY_FINGERPRINT="{{ oci_fingerprint }}" 12 | export OCI_TENANCY="{{ oci_tenancy_id }}" 13 | export OCI_REGION="{{ oci_region }}" 14 | 15 | export OCI_INFRASTRUCTURE_ZIP_FILE="{{ oci_infrastructure_zip_file }}" 16 | export OCI_COMPUTE_SHAPE="{{ oci_compute_shape }}" 17 | export OCI_CONTROL_PLANE_SHAPE="{{ oci_control_plane_shape }}" 18 | 19 | export BASE_DOMAIN="{{ unique_id }}.{{ oci_dns_zone }}" 20 | export SERVICE_URL="{{ oci_ci_machine_public_ip }}" 21 | 22 | export HOST_INSTALLER_ARGS='{"args": ["--append-karg", "console=ttyS0"]}' 23 | export KERNEL_ARGUMENTS='[{"operation": "append", "value": "console=ttyS0"}]' 24 | -------------------------------------------------------------------------------- /src/cli/commands/command.py: -------------------------------------------------------------------------------- 1 | from abc import ABC, abstractmethod 2 | 3 | from prompt_toolkit.completion import DummyCompleter 4 | 5 | from service_client import log 6 | 7 | 8 | class Command(ABC): 9 | """Define a command handler""" 10 | 11 | _log_default_level = log.level 12 | 13 | def __init__(self, text: str): 14 | self._text = text 15 | self._args = None 16 | 17 | @property 18 | def text(self): 19 | return self._text 20 | 21 | @property 22 | def args(self): 23 | return self._args 24 | 25 | @args.setter 26 | def args(self, args: str): 27 | self._args = [arg for arg in args.split(" ") if arg] if args else [] 28 | 29 | @classmethod 30 | @abstractmethod 31 | def get_completer(cls): 32 | pass 33 | 34 | @abstractmethod 35 | def handle(self): 36 | pass 37 | 38 | 39 | class DummyCommand(Command): 40 | """Dummy command handler - Prevent getting None command on cases where command test is empty""" 41 | 42 | @classmethod 43 | def get_completer(cls): 44 | return DummyCompleter() 45 | 46 | def handle(self): 47 | pass 48 | -------------------------------------------------------------------------------- /src/cli/key_binding.py: -------------------------------------------------------------------------------- 1 | from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent 2 | from prompt_toolkit.keys import Keys 3 | 4 | from cli import cli_utils 5 | 6 | bindings = KeyBindings() 7 | 8 | 9 | @bindings.add(Keys.ControlZ) 10 | def _(_): 11 | pass 12 | 13 | 14 | @bindings.add(Keys.ControlQ) 15 | def _(event: KeyPressEvent): 16 | event.app.exit() 17 | 18 | 19 | @bindings.add(Keys.ControlC) 20 | def _(event: KeyPressEvent): 21 | text = event.app.current_buffer.text 22 | if not text: 23 | event.app.exit() 24 | else: 25 | event.app.current_buffer.text = "" 26 | 27 | 28 | @bindings.add(Keys.Enter) 29 | def _(event: KeyPressEvent): 30 | text = event.app.current_buffer.text 31 | keys = text.split(" ") if text else [] 32 | 33 | if len(keys) > 0 and keys[-1] in cli_utils.get_env_args_keys(): 34 | event.app.current_buffer.insert_text("=") 35 | elif text and text[-1] != " ": 36 | event.app.current_buffer.insert_text(" ") 37 | else: 38 | if text: 39 | event.app.current_buffer.history.store_string(text) 40 | event.app.current_buffer.validate_and_handle() 41 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://help.github.com/github/administering-a-repository/configuration-options-for-dependency-updates 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "pip" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | assignees: 13 | - "eliorerz" 14 | - "adriengentil" 15 | labels: 16 | - "approved" 17 | - "lgtm" 18 | - "ok-to-test" 19 | - "dependencies" 20 | - "python" 21 | groups: # dependabot will only open one PR to update all dependencies 22 | python-dependencies: 23 | patterns: 24 | - "*" 25 | exclude-patterns: 26 | - "flake8-bugbear" 27 | - "netaddr" 28 | 29 | - package-ecosystem: "docker" 30 | directory: "/" 31 | schedule: 32 | interval: "weekly" 33 | 34 | - package-ecosystem: "terraform" 35 | directory: "/" 36 | schedule: 37 | interval: "weekly" 38 | -------------------------------------------------------------------------------- /src/triggers/olm_operators_trigger.py: -------------------------------------------------------------------------------- 1 | from contextlib import suppress 2 | from typing import Callable, List 3 | 4 | from assisted_test_infra.test_infra.utils.operators_utils import resource_param 5 | from consts import OperatorResource 6 | from triggers.env_trigger import Trigger, Triggerable 7 | 8 | 9 | class OlmOperatorsTrigger(Trigger): 10 | def __init__(self, conditions: List[Callable[[Triggerable], bool]], operator: str, is_sno: bool = False): 11 | super().__init__(conditions=conditions, operator=operator) 12 | self._operator = operator 13 | self._is_sno = is_sno 14 | 15 | def handle(self, config: Triggerable): 16 | variables_to_set = self.get_olm_variables(config) 17 | config.handle_trigger(self._conditions_strings, variables_to_set) 18 | 19 | def get_olm_variables(self, config: Triggerable) -> dict: 20 | operator_variables = {} 21 | 22 | for key in OperatorResource.get_resource_dict().keys(): 23 | with suppress(AttributeError): 24 | operator_variables[key] = resource_param(getattr(config, key), key, self._operator, self._is_sno) 25 | 26 | return operator_variables 27 | -------------------------------------------------------------------------------- /scripts/deploy_prometheus_ui.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | source scripts/utils.sh 5 | 6 | export NODE_IP=$(get_main_ip) 7 | export PROMETHEUS_SERVICE_NAME=prometheus-k8s 8 | export NAMESPACE=${NAMESPACE:-assisted-installer} 9 | export EXTERNAL_PORT=${EXTERNAL_PORT:-true} 10 | export PROMETHEUS_UI_PORT=$(( 9091 + $NAMESPACE_INDEX )) 11 | export OCP_PROMETHEUS_UI_PORT=$(( 9091 + $NAMESPACE_INDEX )) 12 | 13 | if [[ ("${DEPLOY_TARGET}" != "minikube") ]]; then 14 | exit 0 15 | fi 16 | 17 | mkdir -p build 18 | 19 | print_log "Wait till ui Prometheus Port is ready" 20 | wait_for_url_and_run "$(minikube service ${PROMETHEUS_SERVICE_NAME} -n ${NAMESPACE} --url)" "echo \"waiting for ${PROMETHEUS_SERVICE_NAME}\"" 21 | 22 | add_firewalld_port $PROMETHEUS_UI_PORT 23 | 24 | print_log "Starting port forwarding for deployment/${PROMETHEUS_SERVICE_NAME} on port $PROMETHEUS_UI_PORT" 25 | wait_for_url_and_run "http://${NODE_IP}:${PROMETHEUS_UI_PORT}" "spawn_port_forwarding_command $PROMETHEUS_SERVICE_NAME $PROMETHEUS_UI_PORT $NAMESPACE $NAMESPACE_INDEX $KUBECONFIG minikube" 26 | print_log "Prometheus UI can be reached at http://${NODE_IP}:${PROMETHEUS_UI_PORT}" 27 | 28 | print_log "Done" 29 | -------------------------------------------------------------------------------- /ansible_files/roles/ofcir_release/meta/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Release a CI resource back to OFCIR 5 | description: 6 | - This role releases a previously acquired CI resource back to OFCIR 7 | - It sends a DELETE request to the OFCIR API to free up the resource 8 | author: Assisted Test Infrastructure Team 9 | options: 10 | cir_name: 11 | description: Name of the CI resource to release 12 | type: str 13 | required: true 14 | ofcir_token: 15 | description: Authentication token for OFCIR API 16 | type: str 17 | required: true 18 | no_log: true 19 | ofcir_url: 20 | description: Base URL for the OFCIR API 21 | type: str 22 | required: true 23 | release_timeout_seconds: 24 | description: Timeout in seconds for the release request 25 | type: int 26 | default: 30 27 | request_retries: 28 | description: Number of retries for API requests 29 | type: int 30 | default: 60 31 | request_delay_seconds: 32 | description: Delay in seconds between retry attempts 33 | type: int 34 | default: 30 35 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_export_connection_details/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if primary group exists and contains exactly 1 host 3 | ansible.builtin.fail: 4 | msg: Group primary must exits and contain exactly 1 host 5 | when: groups[primary_device_group_name] is not defined or (groups[primary_device_group_name] | length != 1) 6 | 7 | - name: Export connection details of primary device 8 | when: shared_dir is defined 9 | block: 10 | - name: "Export connection details of primary device" 11 | ansible.builtin.template: 12 | src: "ci-machine-config.sh.j2" 13 | dest: "{{ shared_dir }}/ci-machine-config.sh" 14 | mode: "0644" 15 | 16 | - name: Write fix uid file 17 | ansible.builtin.copy: 18 | src: fix-uid.sh 19 | dest: "{{ shared_dir }}/fix-uid.sh" 20 | mode: "0644" 21 | 22 | - name: Write Packet common configuration file 23 | ansible.builtin.copy: 24 | src: packet-conf.sh 25 | dest: "{{ shared_dir }}/packet-conf.sh" 26 | mode: "0644" 27 | 28 | - name: Write Ansible inventory 29 | ansible.builtin.template: 30 | src: inventory.j2 31 | dest: "{{ shared_dir }}/inventory" 32 | mode: "0644" 33 | -------------------------------------------------------------------------------- /src/cli/cli_utils.py: -------------------------------------------------------------------------------- 1 | import functools 2 | import json 3 | import subprocess 4 | from typing import List 5 | 6 | from tests.global_variables.default_variables import DefaultVariables 7 | 8 | __global_variables = DefaultVariables() 9 | 10 | 11 | def get_namespace() -> List[str]: 12 | res = subprocess.check_output(["kubectl", "get", "ns", "--output=json"]) 13 | try: 14 | namespaces = json.loads(res) 15 | except json.JSONDecodeError: 16 | return [] 17 | 18 | return [ns["metadata"]["name"] for ns in namespaces["items"]] 19 | 20 | 21 | @functools.cache 22 | def get_boolean_keys(): 23 | bool_env_vars = [ 24 | __global_variables.get_env(k).var_keys 25 | for k in __global_variables.__dataclass_fields__ 26 | if isinstance(getattr(__global_variables, k), bool) 27 | ] 28 | return [item for sublist in bool_env_vars for item in sublist] 29 | 30 | 31 | @functools.cache 32 | def get_env_args_keys(): 33 | env_vars = [__global_variables.get_env(k).var_keys for k in __global_variables.__dataclass_fields__] 34 | return [item for sublist in env_vars for item in sublist] 35 | 36 | 37 | @functools.cache 38 | def inventory_client(): 39 | return __global_variables.get_api_client() 40 | -------------------------------------------------------------------------------- /ansible_files/oci_generic_destroy_ci_machine_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Destroy OCI instrastructure provisionned by test-infra 3 | hosts: primary 4 | tasks: 5 | - name: Destroy OCI instrastructure provisionned by test-infra 6 | ansible.builtin.shell: | 7 | source /root/config.sh 8 | export AI_URL="http://${SERVICE_URL}:8090" 9 | export CLUSTER_ID=$(aicli info cluster "${CLUSTER_NAME}" -f id -v) 10 | make destroy_nodes_oci 11 | args: 12 | chdir: /home/assisted 13 | retries: 5 14 | delay: 30 15 | register: result 16 | until: result is succeeded 17 | ignore_errors: true 18 | changed_when: false 19 | 20 | - name: Destroy OCI infrastructure provisionned for CI machine 21 | hosts: localhost 22 | vars_prompt: 23 | - name: oci_tf_state_file 24 | prompt: Terrafom state file that was used to create the infrastructure 25 | private: false 26 | - name: oci_tf_vars_file 27 | prompt: Place where the Terrafom variable file will be stored 28 | private: false 29 | vars: 30 | oci_terraform_workdir: "{{ [playbook_dir, '..', 'terraform_files', 'oci-ci-machine'] | path_join | realpath }}" 31 | roles: 32 | - role: oci_destroy_infra 33 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/manifests.py: -------------------------------------------------------------------------------- 1 | from copy import deepcopy 2 | from dataclasses import dataclass 3 | from pathlib import Path 4 | from typing import ClassVar, List 5 | 6 | 7 | @dataclass 8 | class Manifest: 9 | __ALLOWED_FOLDERS: ClassVar[List[str]] = ["manifests", "openshift"] 10 | 11 | folder: str 12 | file_name: str 13 | content: str 14 | 15 | def is_folder_allowed(self) -> bool: 16 | return self.folder in self.__ALLOWED_FOLDERS 17 | 18 | def get_allowed_folders(self) -> List[str]: 19 | return deepcopy(self.__ALLOWED_FOLDERS) 20 | 21 | @classmethod 22 | def get_manifests(cls, path: Path) -> List["Manifest"]: 23 | manifest_files = [] 24 | if path.is_dir(): 25 | for file_type in ("yaml", "yml", "json"): 26 | manifest_files.extend(list(path.rglob(f"*.{file_type}"))) 27 | else: 28 | manifest_files.append(path) 29 | 30 | manifests = [] 31 | for manifest in manifest_files: 32 | with open(manifest, "rb") as f: 33 | content = f.read() 34 | manifests.append(Manifest(folder=manifest.parent.name, file_name=manifest.name, content=content)) 35 | 36 | return manifests 37 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/tools/concurrently.py: -------------------------------------------------------------------------------- 1 | from concurrent.futures import ThreadPoolExecutor 2 | from typing import Any, Callable, Dict, List, Tuple, Union 3 | 4 | from service_client import log 5 | 6 | 7 | def _safe_run(job, job_id: int, done_handler: Callable[[int], None]): 8 | call = None 9 | try: 10 | call, call_args = job[0], job[1:] 11 | return call(*call_args) 12 | except BaseException: 13 | log.debug("When concurrently running '%(call)s'", dict(call=str(call))) 14 | raise 15 | finally: 16 | if done_handler: 17 | done_handler(job_id) 18 | 19 | 20 | def run_concurrently( 21 | jobs: Union[List, Dict, Tuple], 22 | done_handler: Callable[[int], None] = None, 23 | max_workers: int = 5, 24 | timeout: float = 2**31, 25 | ) -> Dict[int, Any]: 26 | result = {} 27 | if isinstance(jobs, (list, tuple)): 28 | jobs = dict(enumerate(jobs)) 29 | with ThreadPoolExecutor(max_workers=max_workers) as executor: 30 | futures = [(job_id, executor.submit(_safe_run, *(job, job_id, done_handler))) for job_id, job in jobs.items()] 31 | for job_id, future in futures: 32 | result[job_id] = future.result(timeout=timeout) 33 | 34 | return result 35 | -------------------------------------------------------------------------------- /terraform_files/baremetal_host/libvirt_domain_custom.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | -------------------------------------------------------------------------------- /scripts/install_k8s_clients.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -euxo pipefail 3 | export SUDO=$(if [ -x "$(command -v sudo)" ]; then echo "sudo"; else echo ""; fi) 4 | 5 | function install_kubectl() { 6 | kubectl_version=v1.23.0 7 | curl --retry 3 --connect-timeout 30 -LO https://dl.k8s.io/release/${kubectl_version}/bin/linux/amd64/kubectl 8 | chmod +x kubectl 9 | 10 | curl --retry 3 --connect-timeout 30 -LO https://dl.k8s.io/${kubectl_version}/bin/linux/amd64/kubectl.sha256 11 | echo "$( /dev/null 16 | } 17 | 18 | function install_oc() { 19 | if [ -x "$(command -v oc)" ]; then 20 | echo "oc is already installed" 21 | return 22 | fi 23 | 24 | echo "Installing oc..." 25 | for i in {1..4}; do 26 | curl --retry 3 --connect-timeout 30 -SL https://mirror.openshift.com/pub/openshift-v4/x86_64/clients/ocp/stable-4.12/openshift-client-linux.tar.gz | sudo tar -xz -C /usr/local/bin && break 27 | echo "oc installation failed. Retrying again in 5 seconds..." 28 | sleep 5 29 | done 30 | 31 | which oc > /dev/null 32 | } 33 | 34 | install_kubectl 35 | install_oc 36 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_export_day2_configuration/templates/assisted-additional-config.j2: -------------------------------------------------------------------------------- 1 | export SERVICE_URL="{{ assisted_service_url }}" 2 | 3 | export DAY2_CPU_ARCHITECTURE="{{ ansible_architecture }}" 4 | 5 | export DAY2_LIBVIRT_URI="qemu+ssh://{{ ssh_user }}@{{ access_libvirt_ip }}/system?no_verify=1&keyfile={{ ssh_private_key_path }}" 6 | 7 | {# split prefix (e.g: /20) into /24 subnets -#} 8 | {% set subnet_size_ipv4 = internal_network_prefix_ipv4 | ansible.utils.ipsubnet(subnet_length_ipv4) -%} 9 | export DAY2_MACHINE_CIDR="{{ internal_network_prefix_ipv4 | ansible.utils.ipsubnet(subnet_length_ipv4, 0) }}" 10 | export DAY2_PROVISIONING_CIDR="{{ internal_network_prefix_ipv4 | ansible.utils.ipsubnet(subnet_length_ipv4, subnet_size_ipv4|int // 2) }}" 11 | 12 | {# split prefix (e.g: /48) into /64 subnets -#} 13 | {% set subnet_size_ipv6 = internal_network_prefix_ipv6 | ansible.utils.ipsubnet(subnet_length_ipv6) -%} 14 | export DAY2_MACHINE_CIDR6="{{ internal_network_prefix_ipv6 | ansible.utils.ipsubnet(subnet_length_ipv6, 0) }}" 15 | export DAY2_PROVISIONING_CIDR6="{{ internal_network_prefix_ipv6 | ansible.utils.ipsubnet(subnet_length_ipv6, subnet_size_ipv6|int // 2) }}" 16 | 17 | export DAY2_NETWORK_IF="{{ network_if }}" 18 | export DAY2_SECONDARY_NETWORK_IF="{{ secondary_network_if }}" 19 | -------------------------------------------------------------------------------- /ansible_files/oci_generic_cleanup_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Cleanup expired resources in OCI 3 | hosts: localhost 4 | vars_prompt: 5 | - name: oci_compartment_id 6 | prompt: parent compartment OCID where the resources will be created 7 | private: false 8 | - name: oci_tenancy_id 9 | prompt: tenancy OCID authentication value 10 | private: false 11 | - name: oci_user_id 12 | prompt: user OCID authentication value 13 | private: false 14 | - name: oci_fingerprint 15 | prompt: key fingerprint authentication value 16 | private: false 17 | - name: oci_region 18 | prompt: OCI region 19 | private: false 20 | - name: oci_private_key_path 21 | prompt: private key path authentication value 22 | private: false 23 | vars: 24 | oci_terraform_workdir: "{{ [playbook_dir, '..', 'terraform_files', 'oci-ci-machine'] | path_join | realpath }}" 25 | environment: 26 | TF_VAR_compartment_ocid: "{{ oci_compartment_id }}" 27 | TF_VAR_user_ocid: "{{ oci_user_id }}" 28 | TF_VAR_private_key_path: "{{ oci_private_key_path }}" 29 | TF_VAR_fingerprint: "{{ oci_fingerprint }}" 30 | TF_VAR_tenancy_ocid: "{{ oci_tenancy_id }}" 31 | TF_VAR_region: "{{ oci_region }}" 32 | roles: 33 | - role: "oci_cleanup_resources" 34 | -------------------------------------------------------------------------------- /docs/assisted-service-client.md: -------------------------------------------------------------------------------- 1 | # Assisted Service Client 2 | 3 | The Assisted Service project generates a Python client package using `swagger-codegen-cli` whenever a new commit is merged into the `master` branch. This package is built and published to PyPI. 4 | 5 | This project uses that Python client to interact with the Assisted Service once it is deployed. In some use cases, the client must be compatible with the specific Assisted Service image in use (testing API changes, etc.). To ensure compatibility, the client is installed as part of the [build image](./build-image.md), which uses the client during test execution. 6 | 7 | Since the client code must match the deployed Assisted Service code, it is not possible to use an appropriate client when deploying a different service image via `SERVICE=...`. 8 | 9 | ### Rebuilding the Client with the build image 10 | 11 | To ensure you are using a matching client version, rebuild the [build image](./build-image.md) **before** deploying Assisted Service by running: 12 | 13 | ```bash 14 | make image_build SERVICE_REPO= SERVICE_BASE_REF= 15 | ``` 16 | 17 | This code: 18 | 19 | 1. Brings assisted service repo/branch you specified. 20 | 2. Builds the python client from it. 21 | 3. builds the build image with this client. 22 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_day2_cluster_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from typing import Optional 4 | 5 | from assisted_service_client import models 6 | 7 | from assisted_test_infra.test_infra import helper_classes 8 | from assisted_test_infra.test_infra.helper_classes.config.base_cluster_config import BaseClusterConfig 9 | 10 | 11 | @dataclass 12 | class BaseDay2ClusterConfig(BaseClusterConfig, ABC): 13 | day1_cluster: Optional["helper_classes.cluster.Cluster"] = None 14 | day1_cluster_details: Optional[models.cluster.Cluster] = None 15 | day1_base_cluster_domain: Optional[str] = None 16 | day1_api_vip_dnsname: Optional[str] = None 17 | day2_workers_count: Optional[int] = None 18 | day2_masters_count: Optional[int] = None 19 | day2_cpu_architecture: Optional[str] = None 20 | infra_env_id: Optional[str] = None 21 | tf_folder: Optional[str] = None 22 | 23 | # day2 libvirt target and network configuration 24 | day2_libvirt_uri: Optional[str] = None 25 | day2_machine_cidr: Optional[str] = None 26 | day2_provisioning_cidr: Optional[str] = None 27 | day2_machine_cidr6: Optional[str] = None 28 | day2_provisioning_cidr6: Optional[str] = None 29 | day2_network_if: Optional[str] = None 30 | day2_secondary_network_if: Optional[str] = None 31 | -------------------------------------------------------------------------------- /src/cli/application.py: -------------------------------------------------------------------------------- 1 | from prompt_toolkit.shortcuts import clear, message_dialog, set_title 2 | 3 | from tests.global_variables import DefaultVariables 4 | 5 | from .commands.command import DummyCommand 6 | from .commands.commands_factory import InvalidCommandError 7 | from .prompt_handler import PromptHandler 8 | 9 | 10 | class CliApplication: 11 | def __init__(self): 12 | self._global_variables = DefaultVariables() 13 | self._prompt_handler = PromptHandler(self._global_variables) 14 | 15 | def _init_window(self): 16 | clear() 17 | set_title("Test Infra CLI") 18 | 19 | if not self._global_variables.pull_secret: 20 | message_dialog( 21 | title="Pull Secret", text="Cant find PULL_SECRET, some functionality might not work as expected" 22 | ).run() 23 | 24 | def run(self): 25 | self._init_window() 26 | while True: 27 | try: 28 | command = self._prompt_handler.get_prompt_results() 29 | except InvalidCommandError: 30 | print("\033[1;31mError, invalid command!\033[0m") 31 | continue 32 | if command is None: 33 | break 34 | if isinstance(command, DummyCommand): 35 | continue 36 | 37 | command.handle() 38 | 39 | print("Exiting ....") 40 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_terraform_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass, field 3 | from typing import Dict, List 4 | 5 | from munch import Munch 6 | 7 | import consts 8 | 9 | from .base_nodes_config import BaseNodesConfig 10 | 11 | 12 | @dataclass 13 | class BaseTerraformConfig(BaseNodesConfig, ABC): 14 | """ 15 | Define all configurations variables that are needed for Nodes during it's execution 16 | All arguments must have default to None with type hint 17 | """ 18 | 19 | single_node_ip: str = None 20 | dns_records: Dict[str, str] = field(default_factory=dict) 21 | 22 | libvirt_uri: str = consts.DEFAULT_LIBVIRT_URI 23 | libvirt_master_ips: List[str] = None 24 | libvirt_secondary_master_ips: List[str] = None 25 | libvirt_worker_ips: List[str] = None 26 | libvirt_secondary_worker_ips: List[str] = None 27 | libvirt_arbiter_ips: List[str] = None 28 | libvirt_secondary_arbiter_ips: List[str] = None 29 | ingress_dns: bool = False 30 | net_asset: Munch = None 31 | tf_folder: str = None 32 | network_name: str = None 33 | storage_pool_path: str = None 34 | running: bool = True 35 | static_ips_vlan: bool = None 36 | vlan_id: int = None 37 | 38 | uefi_boot_firmware: str = None 39 | uefi_boot_template: str = None 40 | uefi_boot: bool = False 41 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | debugpy==1.8.17 2 | dnspython==2.8.0 3 | filelock==3.20.0 4 | frozendict==2.4.6 5 | ipdb==0.13.13 6 | ipython==9.6.0 7 | jedi==0.19.2 8 | Jinja2===3.1.6 9 | junit-report==0.2.7 10 | kubernetes==34.1.0 11 | libvirt-python==11.9.0 12 | munch==4.0.0 13 | natsort==8.4.0 # natsort used in kni-assisted-installer-auto 14 | netaddr==1.3.0 ## from https://github.com/openshift/assisted-test-infra/pull/2398 15 | netifaces==0.11.0 16 | openshift-client==2.0.5 17 | paramiko==4.0.0 18 | pre-commit==4.3.0 19 | pycharm-remote-debugger==0.1.18 20 | pytest-xdist==3.8.0 21 | pytest==8.4.2 22 | python-dateutil==2.9.0.post0 23 | python-hcl2==7.3.1 24 | python-terraform==0.10.1 25 | PyYAML==6.0.3 26 | requests==2.32.5 27 | retry==0.9.2 28 | scp==0.15.0 29 | semver==3.0.4 30 | tabulate==0.9.0 31 | tqdm==4.67.1 32 | urllib3<3.0 33 | pyvmomi==9.0.0.0 34 | waiting>=1.4.1 35 | prompt-toolkit==3.0.52 36 | nutanix-api==0.0.20 37 | pytest-error-for-skips==2.0.2 38 | ansible==12.1.0 # Fix for issues with DNF and ansible core 2.17 on RHEL8 / RHEL9 systems 39 | jmespath==1.0.1 40 | oci==2.162.0 41 | setuptools==80.9.0 42 | pydantic==2.12.3 43 | certifi>=2023.7.22 # not directly required, pinned by Snyk to avoid a vulnerability 44 | cryptography>=42.0.8 # not directly required, pinned by Snyk to avoid a vulnerability 45 | zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability 46 | kfish==99.0.202502031542 47 | -------------------------------------------------------------------------------- /src/virsh_cleanup/__main__.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | 3 | from .virsh_cleanup import DEFAULT_SKIP_LIST, clean_virsh_resources, log 4 | 5 | 6 | def _get_parsed_args() -> argparse.Namespace: 7 | parser = argparse.ArgumentParser(description="Clear libvrt resources") 8 | group = parser.add_mutually_exclusive_group() 9 | group.add_argument("-a", "--all", help="Clean all virsh resources", action="store_true") 10 | group.add_argument("-m", "--minikube", help="Clean only minikube resources", action="store_true") 11 | group.add_argument("--skip-minikube", help="Clean all but skip minikube resources", action="store_true") 12 | group.add_argument( 13 | "-f", 14 | "--filter", 15 | help="List of filter of resources to delete", 16 | nargs="*", 17 | type=str, 18 | default=None, 19 | ) 20 | return parser.parse_args() 21 | 22 | 23 | def main(): 24 | log.info("===== CLEANING VIRSH RESOURCES =====") 25 | p_args = _get_parsed_args() 26 | skip_list = DEFAULT_SKIP_LIST 27 | resource_filter = [] 28 | if p_args.minikube: 29 | resource_filter.append("minikube") 30 | elif p_args.filter: 31 | resource_filter = p_args.filter 32 | else: 33 | skip_list.extend(["minikube", "minikube-net"]) 34 | 35 | clean_virsh_resources(skip_list, resource_filter) 36 | 37 | 38 | if __name__ == "__main__": 39 | main() 40 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_oci_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from typing import Dict, List 4 | 5 | from assisted_test_infra.test_infra.helper_classes.config.base_nodes_config import BaseNodesConfig 6 | 7 | 8 | @dataclass 9 | class BaseOciConfig(BaseNodesConfig, ABC): 10 | oci_compartment_oicd: str = None 11 | oci_compute_shape: str = None 12 | oci_compute_cpu: str = None 13 | oci_controller_plane_shape: str = None 14 | oci_controller_plane_cpu: str = None 15 | oci_infrastructure_zip_file: str = None 16 | oci_dns_zone: str = None 17 | oci_user_oicd: str = None 18 | oci_private_key_path: str = None 19 | oci_key_fingerprint: str = None 20 | oci_tenancy_oicd: str = None 21 | oci_region: str = None 22 | oci_vcn_oicd: str = None 23 | oci_public_subnet_oicd: str = None 24 | oci_private_subnet_oicd: str = None 25 | oci_extra_node_nsg_oicds: List[str] = None 26 | oci_extra_lb_nsg_oicds: List[str] = None 27 | oci_boot_volume_type: bool = None 28 | 29 | def get_provider_config(self) -> Dict[str, str]: 30 | return { 31 | "user": self.oci_user_oicd, 32 | "key_file": self.oci_private_key_path, 33 | "fingerprint": self.oci_key_fingerprint, 34 | "tenancy": self.oci_tenancy_oicd, 35 | "region": self.oci_region, 36 | } 37 | -------------------------------------------------------------------------------- /src/cli/commands/env_command.py: -------------------------------------------------------------------------------- 1 | from prompt_toolkit.completion import NestedCompleter 2 | from prompt_toolkit.shortcuts import clear 3 | from tabulate import tabulate 4 | 5 | from .. import cli_utils 6 | from .command import Command 7 | 8 | 9 | class EnvCommand(Command): 10 | """Get external environment information. Currently support only single command for listing clusters and clear 11 | the screen.""" 12 | 13 | ENV_COMMAND_CLUSTERS = "clusters" 14 | ENV_COMMAND_LIST = "list" 15 | ENV_COMMAND_CLEAR = "clear" 16 | 17 | @classmethod 18 | def get_completer(cls): 19 | return NestedCompleter.from_nested_dict({cls.ENV_COMMAND_CLEAR: None, cls.ENV_COMMAND_LIST: {"clusters": None}}) 20 | 21 | def command_list(self): 22 | if self.args and self.args[0] == "clusters": 23 | clusters = cli_utils.inventory_client().clusters_list() 24 | clusters_data = [(f"{i + 1})", clusters[i]["id"], clusters[i]["name"]) for i in range(len(clusters))] 25 | 26 | table = tabulate(clusters_data, headers=["", "Cluster ID", "Name"], tablefmt="fancy_grid") 27 | print(table, "\n") 28 | 29 | def handle(self): 30 | command, *args = self.text.strip().split(" ") 31 | self._args = args 32 | 33 | if command == self.ENV_COMMAND_CLEAR: 34 | clear() 35 | 36 | elif command == self.ENV_COMMAND_LIST: 37 | self.command_list() 38 | -------------------------------------------------------------------------------- /ansible_files/ofcir_heterogeneous_destroy_infra_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Destroy infrastructure for heterogeneous cluster tests 3 | hosts: localhost 4 | vars_prompt: 5 | - name: "ofcir_url" 6 | prompt: "Enter the full URL for the OFCIR server API" 7 | private: false 8 | 9 | - name: "ofcir_token" 10 | prompt: "Enter your OFCIR API Token" 11 | private: true 12 | 13 | - name: "primary_cir_type" 14 | prompt: "Enter the primary CI resource type you want to request" 15 | private: false 16 | 17 | - name: "secondary_cir_type" 18 | prompt: "Enter the secondary CI resource type you want to request" 19 | private: false 20 | 21 | - name: "shared_dir" 22 | prompt: "Enter the local path to store the final resource files" 23 | private: false 24 | default: "/tmp" 25 | tasks: 26 | - name: Set CI resources to delete 27 | ansible.builtin.set_fact: 28 | ci_resources: >- 29 | {{ 30 | {} | 31 | combine({primary_cir_identifier: primary_cir_type}) | 32 | combine({secondary_cir_identifier: secondary_cir_type}) 33 | }} 34 | 35 | - name: Relase CI resources 36 | ansible.builtin.include_role: 37 | name: "heterogeneous_cluster_release_cir" 38 | vars: 39 | cir_type: "{{ item.value }}" 40 | cir_identifier: "{{ item.key }}" 41 | loop: "{{ ci_resources | dict2items }}" 42 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_entity_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from typing import Optional 4 | 5 | from assisted_service_client import models 6 | 7 | from ...utils.base_name import BaseName 8 | from .base_config import BaseConfig 9 | 10 | 11 | @dataclass 12 | class BaseEntityConfig(BaseConfig, ABC): 13 | pull_secret: str = None 14 | ssh_public_key: str = None 15 | openshift_version: str = None 16 | additional_ntp_source: str = None 17 | user_managed_networking: bool = None 18 | control_plane_count: str = None 19 | high_availability_mode: str = None 20 | hyperthreading: str = None 21 | iso_download_path: str = None # TODO Needed only on infra env. Remove from here and move to BaseInfraEnvConfig 22 | worker_iso_download_path: str = None 23 | iso_image_type: str = None 24 | download_image: bool = None 25 | platform: str = None 26 | external_platform_name: str = None 27 | external_cloud_controller_manager: str = None 28 | is_ipv4: bool = None 29 | is_ipv6: bool = None 30 | base_dns_domain: str = None 31 | entity_name: BaseName = None 32 | proxy: models.Proxy = None 33 | ipxe_boot: bool = None 34 | cpu_architecture: Optional[str] = None 35 | is_bonded: bool = None 36 | num_bonded_slaves: int = None 37 | bonding_mode: str = None 38 | cluster_id: str = None 39 | load_balancer_type: str = None 40 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/terraform_util.py: -------------------------------------------------------------------------------- 1 | import json 2 | import os 3 | from distutils.dir_util import copy_tree 4 | from pathlib import Path 5 | 6 | from consts import consts 7 | from service_client import log 8 | 9 | 10 | class TerraformControllerUtil: 11 | @classmethod 12 | def get_folder(cls, cluster_name: str, namespace=None): 13 | folder_name = f"{cluster_name}__{namespace}" if namespace else f"{cluster_name}" 14 | return os.path.join(consts.TF_FOLDER, folder_name) 15 | 16 | @classmethod 17 | def create_folder(cls, cluster_name: str, platform: str): 18 | tf_folder = cls.get_folder(cluster_name) 19 | log.info("Creating %s as terraform folder", tf_folder) 20 | if os.path.exists(tf_folder): 21 | return os.path.join(tf_folder, platform) 22 | 23 | cls._copy_template_tree(tf_folder) 24 | tf_folder = os.path.join(tf_folder, platform) 25 | cls.create_tfvars_file(tf_folder) 26 | return tf_folder 27 | 28 | @classmethod 29 | def create_tfvars_file(cls, tf_folder: str) -> str: 30 | tfvars_file = Path(tf_folder).joinpath(consts.TFVARS_JSON_NAME) 31 | 32 | # Create an empty tfvars file 33 | with open(tfvars_file, "w") as f: 34 | json.dump({}, f) 35 | 36 | return str(tfvars_file) 37 | 38 | @classmethod 39 | def _copy_template_tree(cls, dst: str): 40 | copy_tree(src=consts.TF_TEMPLATES_ROOT, dst=dst) 41 | -------------------------------------------------------------------------------- /ansible_files/roles/ofcir_acquire/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Acquire a CI resource from OFCIR 5 | description: 6 | - This role acquires a CI resource from OFCIR (OpenShift Farm for CI Resources) 7 | - It polls the OFCIR API until a resource is provisioned and available 8 | options: 9 | cir_type: 10 | description: Type of CI resource to acquire 11 | type: str 12 | required: true 13 | cir_identifier: 14 | description: Unique identifier for the CI resource 15 | type: str 16 | required: true 17 | shared_dir: 18 | description: Directory path where CI resource information will be stored 19 | type: path 20 | required: true 21 | ofcir_token: 22 | description: Authentication token for OFCIR API 23 | type: str 24 | required: true 25 | no_log: true 26 | ofcir_url: 27 | description: Base URL for the OFCIR API 28 | type: str 29 | required: true 30 | acquire_timeout_seconds: 31 | description: Timeout in seconds for the initial acquisition request 32 | type: int 33 | default: 70 34 | request_retries: 35 | description: Number of retries for API requests 36 | type: int 37 | default: 60 38 | request_delay_seconds: 39 | description: Delay in seconds between retry attempts 40 | type: int 41 | default: 30 42 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/__init__.py: -------------------------------------------------------------------------------- 1 | from .assisted_installer_infra_controller import AssistedInstallerInfraController 2 | from .iptables import IpTableCommandOption, IptableRule 3 | from .ipxe_controller.ipxe_controller import IPXEController 4 | from .iscsi_target_controller import IscsiTargetController 5 | from .nat_controller import NatController 6 | from .node_controllers import NodeController 7 | from .node_controllers.libvirt_controller import LibvirtController 8 | from .node_controllers.node import Node 9 | from .node_controllers.nutanix_controller import NutanixController 10 | from .node_controllers.oci_api_controller import OciApiController 11 | from .node_controllers.redfish_controller import RedfishController 12 | from .node_controllers.terraform_controller import TerraformController 13 | from .node_controllers.vsphere_controller import VSphereController 14 | from .proxy_controller.proxy_controller import ProxyController 15 | from .tang_controller.tang_controller import TangController 16 | 17 | __all__ = [ 18 | "AssistedInstallerInfraController", 19 | "NodeController", 20 | "NatController", 21 | "IptableRule", 22 | "IpTableCommandOption", 23 | "IPXEController", 24 | "IscsiTargetController", 25 | "Node", 26 | "ProxyController", 27 | "TangController", 28 | "TerraformController", 29 | "LibvirtController", 30 | "OciApiController", 31 | "VSphereController", 32 | "NutanixController", 33 | "RedfishController", 34 | ] 35 | -------------------------------------------------------------------------------- /src/consts/__init__.py: -------------------------------------------------------------------------------- 1 | from .consts import * # TODO - temporary import all old consts 2 | from .consts import IP_VERSIONS, NUMBER_OF_MASTERS, ClusterStatus, HostsProgressStages, NetworkType, OpenshiftVersion 3 | from .env_defaults import DEFAULT_SSH_PRIVATE_KEY_PATH, DEFAULT_SSH_PUBLIC_KEY_PATH 4 | from .kube_api import ( 5 | CRD_API_GROUP, 6 | CRD_API_VERSION, 7 | DEFAULT_WAIT_FOR_AGENTS_TIMEOUT, 8 | DEFAULT_WAIT_FOR_CRD_STATE_TIMEOUT, 9 | DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT, 10 | DEFAULT_WAIT_FOR_INSTALLATION_COMPLETE_TIMEOUT, 11 | DEFAULT_WAIT_FOR_ISO_URL_TIMEOUT, 12 | DEFAULT_WAIT_FOR_KUBECONFIG_TIMEOUT, 13 | HIVE_API_GROUP, 14 | HIVE_API_VERSION, 15 | ) 16 | from .olm_operators import OperatorResource, OperatorStatus, OperatorType 17 | 18 | __all__ = [ 19 | "OperatorType", 20 | "HostsProgressStages", 21 | "ClusterStatus", 22 | "OperatorResource", 23 | "OperatorStatus", 24 | "OpenshiftVersion", 25 | "NetworkType", 26 | "CRD_API_GROUP", 27 | "CRD_API_VERSION", 28 | "DEFAULT_WAIT_FOR_CRD_STATUS_TIMEOUT", 29 | "DEFAULT_SSH_PRIVATE_KEY_PATH", 30 | "DEFAULT_SSH_PUBLIC_KEY_PATH", 31 | "DEFAULT_WAIT_FOR_CRD_STATE_TIMEOUT", 32 | "DEFAULT_WAIT_FOR_INSTALLATION_COMPLETE_TIMEOUT", 33 | "DEFAULT_WAIT_FOR_KUBECONFIG_TIMEOUT", 34 | "DEFAULT_WAIT_FOR_AGENTS_TIMEOUT", 35 | "HIVE_API_GROUP", 36 | "HIVE_API_VERSION", 37 | "DEFAULT_WAIT_FOR_ISO_URL_TIMEOUT", 38 | "NUMBER_OF_MASTERS", 39 | "IP_VERSIONS", 40 | ] 41 | -------------------------------------------------------------------------------- /terraform_files/limit_ip_dhcp_range.xsl: -------------------------------------------------------------------------------- 1 | 2 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_ssh_key_pair/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Generate an OpenSSH keypair 3 | community.crypto.openssh_keypair: 4 | path: "{{ shared_dir }}/{{ ssh_private_key_name }}" 5 | mode: "0600" 6 | delegate_to: localhost 7 | register: ssh_key_pair 8 | run_once: true 9 | 10 | - name: Enable SHA1 algorithm 11 | ansible.builtin.command: "update-crypto-policies --set DEFAULT:SHA1" 12 | changed_when: false 13 | 14 | - name: Ensure .ssh directory exists in user's home 15 | ansible.builtin.file: 16 | path: "{{ ansible_user_dir }}/.ssh" 17 | state: directory 18 | mode: "0700" 19 | 20 | - name: Set ssh settings as fact 21 | ansible.builtin.set_fact: 22 | ssh_private_key_path: "{{ ansible_user_dir }}/.ssh/{{ ssh_private_key_name }}" 23 | ssh_user: "{{ ansible_user_id }}" 24 | 25 | - name: Copy private key to remote host 26 | ansible.builtin.copy: 27 | src: "{{ ssh_key_pair.filename }}" 28 | dest: "{{ ssh_private_key_path }}" 29 | owner: "{{ ansible_user_id }}" 30 | group: "{{ ansible_user_gid }}" 31 | mode: "0600" 32 | 33 | - name: Authorize key 34 | ansible.posix.authorized_key: 35 | user: "{{ ansible_user_id }}" 36 | state: present 37 | key: "{{ ssh_key_pair.public_key }}" 38 | 39 | - name: Create or update SSH client configuration 40 | ansible.builtin.blockinfile: 41 | block: "{{ lookup('template', 'config.j2') }}" 42 | path: "{{ ansible_user_dir }}/.ssh/config" 43 | create: true 44 | owner: "{{ ansible_user_id }}" 45 | group: "{{ ansible_user_gid }}" 46 | mode: "0600" 47 | -------------------------------------------------------------------------------- /scripts/test_ui.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | 4 | source scripts/utils.sh 5 | 6 | export NO_UI=${NO_UI:-n} 7 | if [ "${NO_UI}" != "n" ]; then 8 | exit 0 9 | fi 10 | 11 | export NODE_IP=$(get_main_ip) 12 | export UI_PORT=${UI_PORT:-6008} 13 | export DEPLOY_TAG=${DEPLOY_TAG:-latest} 14 | export CYPRESS_BASE_URL=${CYPRESS_BASE_URL:-http://${NODE_IP}:${UI_PORT}} # URL of running Metal3 Installer UI 15 | export TESTS_IMAGE=${TESTS_IMAGE:-"quay.io/edge-infrastructure/assisted-installer-ui:${DEPLOY_TAG}"} 16 | export CONTAINER_COMMAND=${CONTAINER_COMMAND:-$(get_container_runtime_command)} 17 | export BASE_DIR=${BASE_DIR:-"$(pwd)"/$(date +%D_%T | sed 's/\//_/g' | sed 's/:/-/g')} # where screenshots will be stored 18 | 19 | if [[ "${CONTAINER_COMMAND}" = *"podman"* ]]; then 20 | export PODMAN_FLAGS="--pull=always" 21 | else 22 | export PODMAN_FLAGS="" 23 | fi 24 | 25 | echo Connecting to UI at: ${CYPRESS_BASE_URL} 26 | echo Test image: ${TESTS_IMAGE} 27 | 28 | export VIDEO_DIR=${BASE_DIR}/videos 29 | export SCREENSHOT_DIR=${BASE_DIR}/screenshots 30 | 31 | mkdir -p ${VIDEO_DIR} 32 | mkdir -p ${SCREENSHOT_DIR} 33 | 34 | ${CONTAINER_COMMAND} run -it \ 35 | -w /e2e \ 36 | -e CYPRESS_BASE_URL="${CYPRESS_BASE_URL}" \ 37 | -e CYPRESS_PULL_SECRET="${PULL_SECRET}" \ 38 | --security-opt label=disable \ 39 | --mount type=bind,source=${VIDEO_DIR},target=/e2e/cypress/videos \ 40 | --mount type=bind,source=${SCREENSHOT_DIR},target=/e2e/cypress/screenshots \ 41 | "${TESTS_IMAGE}" 42 | 43 | echo Screenshots and videos can be found in ${BASE_DIR} 44 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/cluster_host.py: -------------------------------------------------------------------------------- 1 | import json 2 | from typing import List 3 | 4 | from assisted_service_client import Host, Interface, Inventory 5 | 6 | DEFAULT_HOSTNAME = "localhost" 7 | 8 | 9 | class ClusterHost: 10 | def __init__(self, host_model: Host): 11 | self.__host_model = host_model 12 | self.__inventory = Inventory(**json.loads(self.__host_model.inventory)) 13 | 14 | def get_id(self): 15 | return self.__host_model.id 16 | 17 | def get_inventory(self) -> Inventory: 18 | return self.__inventory 19 | 20 | def get_hostname(self) -> str: 21 | return ( 22 | self.__host_model.requested_hostname if self.__host_model.requested_hostname else self.__inventory.hostname 23 | ) 24 | 25 | def interfaces(self) -> List[Interface]: 26 | return [Interface(**interface) for interface in self.__inventory.interfaces] 27 | 28 | def macs(self) -> List[str]: 29 | return [ifc.mac_address.lower() for ifc in self.interfaces()] 30 | 31 | def ips(self) -> List[str]: 32 | return self.ipv4_addresses() + self.ipv6_addresses() 33 | 34 | def ipv4_addresses(self) -> List[str]: 35 | results = list() 36 | 37 | for ifc in self.interfaces(): 38 | results.extend(ifc.ipv4_addresses) 39 | 40 | return results 41 | 42 | def ipv6_addresses(self) -> List[str]: 43 | results = list() 44 | 45 | for ifc in self.interfaces(): 46 | results.extend(ifc.ipv6_addresses) 47 | 48 | return results 49 | -------------------------------------------------------------------------------- /ansible_files/roles/common/setup_ssh_key_pair/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Generate an OpenSSH keypair 3 | community.crypto.openssh_keypair: 4 | path: "{{ shared_dir }}/{{ ssh_private_key_name }}" 5 | mode: "0600" 6 | delegate_to: localhost 7 | register: ssh_key_pair 8 | run_once: true 9 | 10 | - name: Enable SHA1 algorithm 11 | ansible.builtin.command: "update-crypto-policies --set DEFAULT:SHA1" 12 | changed_when: false 13 | 14 | - name: Ensure .ssh directory exists in user's home 15 | ansible.builtin.file: 16 | path: "{{ ansible_user_dir }}/.ssh" 17 | state: directory 18 | mode: "0700" 19 | 20 | - name: Set ssh settings as fact 21 | ansible.builtin.set_fact: 22 | ssh_private_key_path: "{{ ansible_user_dir }}/.ssh/{{ ssh_private_key_name }}" 23 | ssh_user: "{{ ansible_user_id }}" 24 | 25 | - name: Copy private key to remote host 26 | ansible.builtin.copy: 27 | src: "{{ ssh_key_pair.filename }}" 28 | dest: "{{ ssh_private_key_path }}" 29 | owner: "{{ ansible_user_id }}" 30 | group: "{{ ansible_user_gid }}" 31 | mode: "0600" 32 | 33 | - name: Authorize key 34 | ansible.posix.authorized_key: 35 | user: "{{ ansible_user_id }}" 36 | state: present 37 | key: "{{ ssh_key_pair.public_key }}" 38 | 39 | - name: Create or update SSH client configuration 40 | ansible.builtin.blockinfile: 41 | block: "{{ lookup('template', 'config.j2') }}" 42 | path: "{{ ansible_user_dir }}/.ssh/config" 43 | create: true 44 | owner: "{{ ansible_user_id }}" 45 | group: "{{ ansible_user_gid }}" 46 | mode: "0600" 47 | -------------------------------------------------------------------------------- /src/assisted_test_infra/resources/bootstrap_in_place/sno-worker-live.ign.j2: -------------------------------------------------------------------------------- 1 | { 2 | "ignition": { 3 | "config": {}, 4 | "version": "3.1.0" 5 | }, 6 | "passwd": { 7 | "users": [ 8 | { 9 | "name": "core", 10 | "sshAuthorizedKeys": ["{{ ssh_public_key }}"] 11 | } 12 | ] 13 | }, 14 | "storage": { 15 | "files": [ 16 | { 17 | "contents": { 18 | "source": "data:text/plain;charset=utf-8;base64,{{ worker_ign_contents | b64encode_utf8 }}", 19 | "verification": {} 20 | }, 21 | "filesystem": "root", 22 | "mode": 420, 23 | "overwrite": true, 24 | "path": "/root/config.ign" 25 | }, 26 | { 27 | "contents": { 28 | "source": "data:text/plain;charset=utf-8;base64,{{ install_sh_contents | b64encode_utf8 }}", 29 | "verification": {} 30 | }, 31 | "filesystem": "root", 32 | "mode": 448, 33 | "path": "/usr/local/bin/install.sh" 34 | } 35 | ] 36 | }, 37 | "systemd": { 38 | "units": [ 39 | { 40 | "contents": "[Service]\nType=oneshot\nExecStart=/usr/local/bin/install.sh\nEnvironment=INSTALL_DEVICE={{ install_device }}\n[Install]\nWantedBy=multi-user.target\n", 41 | "enabled": true, 42 | "name": "coreos-install.service" 43 | } 44 | ] 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/kube_helpers/base_resource.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import contextlib 3 | 4 | from kubernetes.client.rest import ApiException 5 | 6 | from .common import KubeAPIContext, ObjectReference 7 | 8 | 9 | class BaseResource(abc.ABC): 10 | """ 11 | A base object of both custom resources and kubernetes resources that holds 12 | a shared KubeAPIContext. Any sub instance of this class will be added to the 13 | shared context. 14 | """ 15 | 16 | context = KubeAPIContext() 17 | 18 | def __init__(self, name: str, namespace: str): 19 | self.context.resources.add(self) 20 | self._reference = ObjectReference(name=name, namespace=namespace) 21 | 22 | @property 23 | def ref(self) -> ObjectReference: 24 | return self._reference 25 | 26 | @abc.abstractmethod 27 | def create(self, **kwargs) -> None: 28 | pass 29 | 30 | @abc.abstractmethod 31 | def delete(self) -> None: 32 | pass 33 | 34 | @abc.abstractmethod 35 | def get(self) -> dict: 36 | pass 37 | 38 | def apply(self, **kwargs): 39 | with contextlib.suppress(ApiException): 40 | self.delete() 41 | 42 | self.create(**kwargs) 43 | 44 | 45 | class BaseCustomResource(BaseResource): 46 | """ 47 | Base class for all CRDs, enforces basic methods that every resource must 48 | have e.g create, path, get, delete and status. 49 | """ 50 | 51 | @abc.abstractmethod 52 | def patch(self, **kwargs) -> None: 53 | pass 54 | 55 | @abc.abstractmethod 56 | def status(self, **kwargs) -> dict: 57 | pass 58 | -------------------------------------------------------------------------------- /src/cli/commands/commands_factory.py: -------------------------------------------------------------------------------- 1 | import functools 2 | from typing import Union 3 | 4 | from prompt_toolkit.completion import merge_completers 5 | 6 | from tests.global_variables import DefaultVariables 7 | 8 | from .. import cli_utils 9 | from ..completers import DynamicNestedCompleter 10 | from .command import Command, DummyCommand 11 | from .env_command import EnvCommand 12 | from .help_command import HelpCommand 13 | from .test_command import TestCommand 14 | 15 | 16 | class InvalidCommandError(Exception): 17 | pass 18 | 19 | 20 | class CommandFactory: 21 | _supported_commands = { 22 | "": DummyCommand, 23 | "test": TestCommand, 24 | "list": EnvCommand, 25 | "clear": EnvCommand, 26 | "help": HelpCommand, 27 | } 28 | 29 | @classmethod 30 | def get_command(cls, text: str) -> Union[Command, None]: 31 | text = text if text else "" 32 | factory = cls._supported_commands.get(text.split(" ")[0]) 33 | try: 34 | return factory(text) 35 | except TypeError as e: 36 | raise InvalidCommandError(f"Error, invalid command {text}") from e 37 | 38 | @classmethod 39 | @functools.cache 40 | def get_completers(cls): 41 | commands = [c.get_completer() for c in {cmd for k, cmd in cls._supported_commands.items() if k}] 42 | return merge_completers(commands) 43 | 44 | @classmethod 45 | def env_vars_completers(cls, global_variables: DefaultVariables): 46 | keys = cli_utils.get_env_args_keys() 47 | return DynamicNestedCompleter.from_nested_dict({k: None for k in keys}, global_variables=global_variables) 48 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_setup_for_test_infra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Directory where OCI private key will be stored 3 | ansible.builtin.set_fact: 4 | oci_private_key_path_ci_machine: "{{ hostvars[ci_machine].ansible_user_dir }}/.oci/{{ oci_private_key_path | basename }}" 5 | oci_ci_machine_public_ip: "{{ hostvars[ci_machine].ansible_host }}" 6 | 7 | - name: Create directory where OCI private key will be stored on CI machine 8 | ansible.builtin.file: 9 | path: "{{ oci_private_key_path_ci_machine | dirname }}" 10 | state: directory 11 | mode: "0700" 12 | delegate_to: "{{ ci_machine }}" 13 | 14 | - name: Copy OCI private key to CI machine 15 | ansible.builtin.copy: 16 | src: "{{ oci_private_key_path }}" 17 | dest: "{{ oci_private_key_path_ci_machine }}" 18 | mode: "0600" 19 | delegate_to: "{{ ci_machine }}" 20 | 21 | - name: Create directory where OCI infrastructure archive will be stored on CI machine 22 | ansible.builtin.file: 23 | path: "{{ oci_infrastructure_zip_file | dirname }}" 24 | state: directory 25 | mode: "0700" 26 | delegate_to: "{{ ci_machine }}" 27 | 28 | - name: Download terraform template to create the infrastructure on OCI 29 | ansible.builtin.get_url: 30 | url: "{{ oci_infrastructure_zip_url }}" 31 | dest: "{{ oci_infrastructure_zip_file }}" 32 | mode: "0600" 33 | delegate_to: "{{ ci_machine }}" 34 | 35 | - name: Export OCI configuration in assisted-additional-config to shared_dir 36 | ansible.builtin.template: 37 | src: "assisted-additional-config.j2" 38 | dest: "{{ shared_dir }}/assisted-additional-config" 39 | mode: "0644" 40 | when: shared_dir is defined 41 | -------------------------------------------------------------------------------- /src/service_client/client_factory.py: -------------------------------------------------------------------------------- 1 | import functools 2 | from typing import Optional 3 | 4 | from assisted_service_client import ApiClient 5 | from kubernetes.client import ApiClient as KubeApiClient 6 | from kubernetes.client import Configuration as KubeConfiguration 7 | from kubernetes.config import load_kube_config 8 | 9 | import consts 10 | from service_client import InventoryClient, ServiceAccount 11 | from service_client.logger import log 12 | 13 | 14 | class ClientFactory: 15 | @staticmethod 16 | @functools.cache 17 | def create_client( 18 | url: str, 19 | offline_token: str, 20 | service_account: ServiceAccount, 21 | refresh_token: str, 22 | pull_secret: Optional[str] = "", 23 | wait_for_api: Optional[bool] = True, 24 | timeout: Optional[int] = consts.WAIT_FOR_BM_API, 25 | ) -> InventoryClient: 26 | log.info("Creating assisted-service client for url: %s", url) 27 | c = InventoryClient( 28 | inventory_url=url, 29 | offline_token=offline_token, 30 | service_account=service_account, 31 | refresh_token=refresh_token, 32 | pull_secret=pull_secret, 33 | ) 34 | if wait_for_api: 35 | c.wait_for_api_readiness(timeout) 36 | return c 37 | 38 | @staticmethod 39 | def create_kube_api_client(kubeconfig_path: Optional[str] = None) -> ApiClient: 40 | log.info("creating kube client with config file: %s", kubeconfig_path) 41 | 42 | conf = KubeConfiguration() 43 | load_kube_config(config_file=kubeconfig_path, client_configuration=conf) 44 | return KubeApiClient(configuration=conf) 45 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/tang_controller/tang_controller.py: -------------------------------------------------------------------------------- 1 | import socket 2 | 3 | import consts 4 | from assisted_test_infra.test_infra import utils 5 | from assisted_test_infra.test_infra.controllers.containerized_controller import ContainerizedController 6 | 7 | 8 | class TangController(ContainerizedController): 9 | """ 10 | TangController deploys a Tang server inside container which running on hypervisor, port 7500 11 | It allows deploying AI OCP cluster encrypted with tang mode 12 | """ 13 | 14 | IMAGE = "registry.redhat.io/rhel8/tang" 15 | 16 | def __init__(self, name: str = None, port: int = consts.DEFAULT_TANG_SERVER_PORT, pull_secret: str = None): 17 | extra_flags = [f"-e PORT={port}", f"--authfile={self._create_auth_file(name, pull_secret)}"] 18 | super().__init__(name, port, self.IMAGE, extra_flags) 19 | self.ip = None 20 | self.address = None 21 | self.thumbprint = None 22 | self._set_server_address() 23 | 24 | def _set_server_address(self): 25 | host_name = socket.gethostname() 26 | self.ip = socket.gethostbyname(host_name) 27 | self.address = f"http://{self.ip}:{self._port}" 28 | 29 | @classmethod 30 | def _create_auth_file(cls, name: str, pull_secret: str): 31 | filename = f"{consts.WORKING_DIR}/{name}_authfile" 32 | with open(filename, "w") as opened_file: 33 | opened_file.write(pull_secret) 34 | return filename 35 | 36 | def set_thumbprint(self): 37 | exec_command = f"podman-remote exec -it {self._name} tang-show-keys {self._port}" 38 | self.thumbprint, _, _ = utils.run_command(exec_command, shell=True) 39 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/release_image_utils.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | 4 | from assisted_test_infra.test_infra import utils 5 | 6 | 7 | def extract_installer(release_image: str, dest: str): 8 | """ 9 | Extracts the installer binary from the release image. 10 | 11 | Args: 12 | release_image: The release image to extract the installer from. 13 | dest: The destination to extract the installer to. 14 | """ 15 | logging.info("Extracting installer from %s to %s", release_image, dest) 16 | with utils.pull_secret_file() as pull_secret: 17 | utils.run_command( 18 | f"oc adm release extract --registry-config '{pull_secret}'" 19 | f" --command=openshift-install --to={dest} {release_image}" 20 | ) 21 | 22 | 23 | def extract_rhcos_url_from_ocp_installer(installer_binary_path: str): 24 | """ 25 | Extracts the RHCOS download URL from the installer binary. 26 | 27 | Args: 28 | installer_binary_path: The path to the installer binary. 29 | """ 30 | logging.info(f"Extracting RHCOS URL from {installer_binary_path}") 31 | stdout, _, _ = utils.run_command(f"'{installer_binary_path}' coreos print-stream-json") 32 | 33 | jsonpath = "architectures.x86_64.artifacts.metal.formats.iso.disk.location" 34 | current_node = json.loads(stdout) 35 | for element in jsonpath.split("."): 36 | current_node = current_node.get(element, {}) 37 | 38 | if current_node == {}: 39 | raise ValueError(f"Could not extract RHCOS URL from {installer_binary_path}, malformed JSON") 40 | 41 | logging.info(f"Extracted RHCOS URL: {current_node}") 42 | 43 | return current_node 44 | -------------------------------------------------------------------------------- /skipper.yaml: -------------------------------------------------------------------------------- 1 | registry: quay.io 2 | build-container-image: assisted-test-infra 3 | build-container-tag: latest 4 | 5 | volumes: 6 | # programs 7 | - $(which oc --skip-alias || echo /usr/local/bin/oc):/usr/local/bin/oc 8 | - $(which kubectl --skip-alias || echo /usr/local/bin/kubectl):/usr/bin/kubectl 9 | - $(which minikube --skip-alias || echo /usr/local/bin/minikube):/usr/bin/minikube 10 | - $MINIKUBE_HOME:$MINIKUBE_HOME 11 | # config 12 | - $HOME/.kube/:$HOME/.kube/ 13 | - $HOME/.minikube/:$HOME/.minikube/ 14 | - $HOME/.ssh/:$HOME/.ssh/ 15 | - $HOME/.oci/:$HOME/.oci/ 16 | - $HOME/oci/:$HOME/oci/ 17 | 18 | # logs 19 | - /var/log:/var/log 20 | - /run/log/journal:/run/log/journal 21 | - /var/ai-logs:/var/ai-logs # using this when downloading triage logs 22 | 23 | # sockets 24 | - $HOME/.test-infra/etc/nginx/conf.d:/etc/nginx/conf.d 25 | 26 | # cache 27 | - $HOME/.cache/go-build/:/go/pkg/mod/ 28 | - $HOME/.cache/libvirt/:$HOME/.cache/libvirt/ 29 | 30 | # etc 31 | - /var/lib/libvirt/:/var/lib/libvirt/ 32 | - /var/run/libvirt/:/var/run/libvirt/ 33 | - /var/lib/libvirt/dnsmasq/:/var/lib/libvirt/dnsmasq/ 34 | - /tmp:/tmp/ 35 | - /dev/:/dev 36 | - /run/udev:/run/udev 37 | - /run/dbus/system_bus_socket:/run/dbus/system_bus_socket 38 | 39 | # podman - sharing the podman.socket between the host and the skipper container 40 | - $XDG_RUNTIME_DIR/podman/podman.sock:/run/podman/podman.sock 41 | 42 | # Network manager dnsmasq. Mounted to allow the container to write dnsmasq config files to HOST Network Manager 43 | - /etc/NetworkManager/dnsmasq.d:/etc/NetworkManager/dnsmasq.d 44 | env_file: 45 | - skipper.env 46 | 47 | env: 48 | CONTAINER_HOST: unix://run/podman/podman.sock 49 | -------------------------------------------------------------------------------- /packer_files/nutanix_centos_template/centos-config/ks.cfg: -------------------------------------------------------------------------------- 1 | # Use graphical install 2 | text 3 | 4 | %packages 5 | @^server-product-environment 6 | kexec-tools 7 | openssh-server 8 | perl 9 | cloud-init 10 | %end 11 | 12 | services --enabled=sshd 13 | 14 | url --url="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/" 15 | repo --name="AppStream" --baseurl="https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/" 16 | 17 | # Keyboard layouts 18 | keyboard --xlayouts='us' 19 | # System language 20 | lang en_US.UTF-8 21 | 22 | # Setting up network interface to DHCP 23 | network --bootproto=dhcp --ipv6=auto --hostname=centos-ks.local --activate 24 | 25 | # Run the Setup Agent on first boot 26 | firstboot --enable 27 | 28 | ignoredisk --only-use=sda 29 | # System bootloader configuration 30 | bootloader --append="crashkernel=auto" --location=mbr --boot-drive=sda 31 | 32 | part /boot --fstype=ext4 --size=512 33 | part pv.1 --size=10000 --grow 34 | volgroup vg00 pv.1 35 | logvol swap --vgname=vg00 --recommended --name=swap --fstype=swap 36 | logvol / --vgname=vg00 --percent=100 --grow --name=root --fstype=ext4 37 | 38 | # Partition clearing information 39 | clearpart --none --initlabel 40 | 41 | # System timezone 42 | timezone America/New_York --isUtc 43 | 44 | # Root password 45 | sshkey --username=root "SSH_PUBLIC_KEY_PLACEHOLDER" 46 | rootpw packer 47 | 48 | %addon com_redhat_kdump --enable --reserve-mb='auto' 49 | selinux --permissive 50 | %end 51 | 52 | %anaconda 53 | pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty 54 | pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok 55 | pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty 56 | %end 57 | 58 | reboot --eject 59 | -------------------------------------------------------------------------------- /packer_files/vsphere_centos_template/centos-config/centos8-ks.cfg: -------------------------------------------------------------------------------- 1 | # Use graphical install 2 | graphical 3 | 4 | %packages 5 | @^server-product-environment 6 | kexec-tools 7 | open-vm-tools 8 | openssh-server 9 | perl 10 | cloud-init 11 | %end 12 | 13 | services --enabled=sshd 14 | 15 | url --url="https://mirror.stream.centos.org/9-stream/BaseOS/x86_64/os/" 16 | repo --name="AppStream" --baseurl="https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/" 17 | 18 | # Keyboard layouts 19 | keyboard --xlayouts='us' 20 | # System language 21 | lang en_US.UTF-8 22 | 23 | # Setting up network interface to DHCP 24 | network --bootproto=dhcp --ipv6=auto --hostname=centos-ks.local --activate 25 | 26 | # Run the Setup Agent on first boot 27 | firstboot --enable 28 | 29 | ignoredisk --only-use=sda 30 | # System bootloader configuration 31 | bootloader --append="crashkernel=auto" --location=mbr --boot-drive=sda 32 | 33 | part /boot --fstype=ext4 --size=512 34 | part pv.1 --size=10000 --grow 35 | volgroup vg00 pv.1 36 | logvol swap --vgname=vg00 --recommended --name=swap --fstype=swap 37 | logvol / --vgname=vg00 --size=5000 --grow --name=root --fstype=ext4 38 | 39 | # Partition clearing information 40 | clearpart --none --initlabel 41 | 42 | # System timezone 43 | timezone America/New_York --isUtc 44 | 45 | # Root password 46 | sshkey --username=root "${key}" 47 | rootpw ${password} 48 | 49 | %addon com_redhat_kdump --enable --reserve-mb='auto' 50 | selinux --permissive 51 | %end 52 | 53 | %anaconda 54 | pwpolicy root --minlen=6 --minquality=1 --notstrict --nochanges --notempty 55 | pwpolicy user --minlen=6 --minquality=1 --notstrict --nochanges --emptyok 56 | pwpolicy luks --minlen=6 --minquality=1 --notstrict --nochanges --notempty 57 | %end 58 | 59 | reboot -------------------------------------------------------------------------------- /ansible_files/roles/oci_cleanup_resources/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Look for OCI provider binaries 3 | ansible.builtin.find: 4 | paths: "{{ oci_terraform_workdir }}/.terraform/providers" 5 | file_type: file 6 | patterns: "terraform-provider-oci_*" 7 | recurse: true 8 | register: found_providers 9 | 10 | - name: Select OCI provider binary 11 | ansible.builtin.set_fact: 12 | oci_provider_bin: "{{ (found_providers.files | first).path }}" 13 | 14 | - name: Create temporary directory where resources will be imported as terraform files 15 | ansible.builtin.tempfile: 16 | state: directory 17 | suffix: terraform 18 | register: terraform_working_tmp_dir 19 | when: terraform_working_dir is not defined 20 | 21 | - name: Set terraform_working_dir 22 | ansible.builtin.set_fact: 23 | terraform_working_dir: "{{ terraform_working_tmp_dir.path }}" 24 | when: terraform_working_dir is not defined 25 | 26 | - name: Import OCI resources 27 | ansible.builtin.command: 28 | cmd: >- 29 | {{ oci_provider_bin }} 30 | -command=export 31 | -compartment_id={{ oci_compartment_id }} 32 | -output_path=. 33 | -services=core,load_balancer,network_load_balancer,identity,object_storage,tagging 34 | -generate_state 35 | creates: "terraform.tfstate" 36 | chdir: "{{ terraform_working_dir }}" 37 | check_mode: false 38 | 39 | - name: Check terraform state file 40 | ansible.builtin.stat: 41 | path: "{{ [terraform_working_dir, 'terraform.tfstate'] | path_join }}" 42 | register: terraform_state_file_result 43 | 44 | - name: Cleanup resources when a terraform state file exists 45 | ansible.builtin.include_tasks: 46 | file: cleanup_resources.yml 47 | when: terraform_state_file_result.stat.exists 48 | -------------------------------------------------------------------------------- /packer_files/vsphere_centos_template/sources.pkr.hcl: -------------------------------------------------------------------------------- 1 | source "vsphere-iso" "test-infra-template" { 2 | vcenter_server = var.vsphere_server 3 | username = var.vsphere_username 4 | password = var.vsphere_password 5 | datacenter = var.vsphere_datacenter 6 | insecure_connection = true 7 | convert_to_template = true 8 | 9 | # SSH 10 | ssh_username = "root" 11 | ssh_private_key_file = var.ssh_private_key_file 12 | ssh_bastion_host = var.ssh_bastion_host 13 | ssh_bastion_username = var.ssh_bastion_username 14 | ssh_bastion_private_key_file = var.ssh_bastion_private_key_file 15 | 16 | # Hardware Configuration 17 | CPUs = var.vcpus 18 | RAM = var.memory_size 19 | 20 | # Location Configuration 21 | vm_name = var.vm_name 22 | folder = var.vsphere_folder 23 | cluster = var.vsphere_cluster 24 | datastore = var.vsphere_datastore 25 | 26 | # Shutdown Configuration 27 | shutdown_command = "shutdown -P now" 28 | 29 | # ISO Configuration 30 | iso_checksum = var.iso_checksum 31 | iso_url = var.iso_url 32 | 33 | # Configuration 34 | guest_os_type = "centos8_64Guest" 35 | notes = "Built via Packer" 36 | 37 | cd_content = { 38 | "centos8-ks.cfg" = templatefile("centos-config/centos8-ks.cfg", { key = var.ssh_public_key, password = var.root_password }) 39 | } 40 | 41 | cd_label = "ksdata" 42 | remove_cdrom = true 43 | ip_wait_timeout = "1h" 44 | 45 | boot_command = [ 46 | "", 47 | " inst.ks=linux inst.ks=cdrom:/centos8-ks.cfg" 48 | ] 49 | 50 | network_adapters { 51 | network = var.vsphere_network 52 | network_card = "vmxnet3" 53 | } 54 | 55 | storage { 56 | disk_size = var.disk_size 57 | disk_thin_provisioned = true 58 | disk_eagerly_scrub = false 59 | } 60 | } 61 | -------------------------------------------------------------------------------- /scripts/ibmcloud_post_install.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | cat > /usr/bin/install_complete.sh <> \$LOGFILE 20 | } 21 | 22 | log "Starting ibmcloud-post-install.sh" 23 | 24 | 25 | { 26 | echo "Install base prerequisites" 27 | dnf install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-8.noarch.rpm 28 | dnf install -y git make 29 | 30 | echo "Get AI repo" 31 | mkdir -p /home/test 32 | cd /home/test 33 | git clone https://github.com/openshift/assisted-test-infra.git 34 | 35 | echo "Provision test-infra" 36 | cd /home/test/assisted-test-infra 37 | 38 | scripts/install_environment.sh 39 | scripts/install_environment.sh config_sshd 40 | 41 | } &>> \$LOGFILE 42 | 43 | ### Status flag 44 | echo "Installation completed" 45 | log "Installation completed, thank you !" 46 | EOF 47 | chmod 755 /usr/bin/install_complete.sh 48 | 49 | # set timeoutsec explicitly to prevent timeout fail 50 | # ref. https://qiita.com/khayama/items/861243aed5cf95f318d1 51 | cat > /etc/systemd/system/install_complete.service < str | dict | list: 22 | parts = path.split(".") 23 | current = resource 24 | 25 | for part in parts: 26 | if "[" in part and "]" in part: 27 | key, index = part.split("[") 28 | index = int(index.rstrip("]")) 29 | 30 | if not isinstance(current, dict) or key not in current: 31 | raise KeyError(f"Key '{key}' not found in {current}") 32 | current = current[key] 33 | 34 | if not isinstance(current, list): 35 | raise TypeError(f"Expected a list at '{key}', but got {type(current).__name__}") 36 | if index >= len(current): 37 | raise IndexError(f"Index {index} out of range for list at '{key}'") 38 | current = current[index] 39 | else: 40 | if not isinstance(current, dict) or part not in current: 41 | raise KeyError(f"Key '{part}' not found in {current}") 42 | current = current[part] 43 | 44 | return current 45 | -------------------------------------------------------------------------------- /ansible_files/roles/common/setup_sftp_share/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install EPEL 3 | ansible.builtin.dnf: 4 | enablerepo: powertools 5 | name: 6 | - epel-release 7 | 8 | - name: Install fuse and rclone 9 | ansible.builtin.dnf: 10 | enablerepo: powertools 11 | name: 12 | - fuse 13 | - rclone 14 | update_cache: true # as we enabled EPEL previously 15 | state: present 16 | 17 | - name: Create rclone configuration 18 | ansible.builtin.command: 19 | cmd: >- 20 | rclone config create 21 | --non-interactive 22 | --config {{ rclone_config_file }} 23 | {{ rclone_remote_name }} 24 | {{ rclone_type_sftp }} 25 | host={{ remote_host_ip }} 26 | user={{ ssh_user }} 27 | key_file={{ ssh_private_key_path }} 28 | creates: "{{ rclone_config_file }}" 29 | 30 | - name: Create systemd service file to mount share on {{ shared_directory }} 31 | ansible.builtin.template: 32 | src: "rclone.service.j2" 33 | dest: "/etc/systemd/system/rclone-{{ rclone_remote_name }}.service" 34 | owner: "{{ ansible_user_id }}" 35 | group: "{{ ansible_user_gid }}" 36 | mode: "0655" 37 | 38 | - name: Mount the share on {{ shared_directory }} 39 | ansible.builtin.systemd: 40 | state: restarted 41 | daemon_reload: true 42 | name: "rclone-{{ rclone_remote_name }}" 43 | ignore_errors: true 44 | register: rclone_service 45 | 46 | - name: Service restart failed 47 | when: 48 | - rclone_service.failed 49 | block: 50 | - name: Get Service status 51 | ansible.builtin.command: "journalctl -xeu rclone-{{ rclone_remote_name }}.service" 52 | register: post_shell 53 | changed_when: false 54 | - name: Print Status 55 | ansible.builtin.fail: 56 | msg: "{{ rclone_service }}" 57 | -------------------------------------------------------------------------------- /ansible_files/roles/setup_sftp_share/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install EPEL 3 | ansible.builtin.dnf: 4 | enablerepo: powertools 5 | name: 6 | - epel-release 7 | 8 | - name: Install fuse and rclone 9 | ansible.builtin.dnf: 10 | enablerepo: powertools 11 | name: 12 | - fuse 13 | - fuse3 14 | - rclone 15 | update_cache: true # as we enabled EPEL previously 16 | state: present 17 | 18 | - name: Create rclone configuration 19 | ansible.builtin.command: 20 | cmd: >- 21 | rclone config create 22 | --non-interactive 23 | --config {{ rclone_config_file }} 24 | {{ rclone_remote_name }} 25 | {{ rclone_type_sftp }} 26 | host={{ remote_host_ip }} 27 | user={{ ssh_user }} 28 | key_file={{ ssh_private_key_path }} 29 | creates: "{{ rclone_config_file }}" 30 | 31 | - name: Create systemd service file to mount share on {{ shared_directory }} 32 | ansible.builtin.template: 33 | src: "rclone.service.j2" 34 | dest: "/etc/systemd/system/rclone-{{ rclone_remote_name }}.service" 35 | owner: "{{ ansible_user_id }}" 36 | group: "{{ ansible_user_gid }}" 37 | mode: "0655" 38 | 39 | - name: Mount the share on {{ shared_directory }} 40 | ansible.builtin.systemd: 41 | state: restarted 42 | daemon_reload: true 43 | name: "rclone-{{ rclone_remote_name }}" 44 | ignore_errors: true 45 | register: rclone_service 46 | 47 | - name: Service restart failed 48 | when: 49 | - rclone_service.failed 50 | block: 51 | - name: Get Service status 52 | ansible.builtin.command: "journalctl -xeu rclone-{{ rclone_remote_name }}.service" 53 | register: post_shell 54 | changed_when: false 55 | - name: Print Status 56 | ansible.builtin.fail: 57 | msg: "{{ rclone_service }}" 58 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_create_infra/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Save terraform variables into {{ oci_tf_vars_file }}" 3 | ansible.builtin.template: 4 | src: "terraform.tfvars.j2" 5 | dest: "{{ oci_tf_vars_file }}" 6 | mode: "0644" 7 | 8 | - name: "Deploy Terraform Instance" 9 | community.general.terraform: 10 | project_path: "{{ oci_terraform_workdir }}" 11 | state: present 12 | force_init: true 13 | variables_files: 14 | - "{{ oci_tf_vars_file }}" 15 | register: deployed_tf 16 | 17 | - name: "Save Terraform state file in {{ oci_tf_state_file }}" 18 | ansible.builtin.copy: 19 | src: "{{ oci_terraform_workdir }}/terraform.tfstate" 20 | dest: "{{ oci_tf_state_file }}" 21 | mode: "0644" 22 | when: oci_tf_state_file is defined 23 | 24 | - name: "Add ci_machine to inventory" 25 | ansible.builtin.add_host: 26 | name: "{{ deployed_tf.outputs.ci_machine_inventory.value.display_name }}" 27 | ansible_host: "{{ deployed_tf.outputs.ci_machine_inventory.value.public_ip }}" 28 | ansible_user: "{{ deployed_tf.outputs.ci_machine_inventory.value.user }}" 29 | ansible_ssh_private_key_file: "{{ deployed_tf.outputs.ci_machine_inventory.value.ssh_private_key_path }}" 30 | groups: "primary" 31 | 32 | - name: Collect facts from added host 33 | ansible.builtin.setup: 34 | delegate_to: "{{ deployed_tf.outputs.ci_machine_inventory.value.display_name }}" 35 | delegate_facts: true 36 | retries: 5 37 | delay: 30 38 | register: result 39 | until: result is succeeded 40 | 41 | - name: Update all packages on host 42 | ansible.builtin.dnf: 43 | name: "*" 44 | state: latest 45 | delegate_to: "{{ deployed_tf.outputs.ci_machine_inventory.value.display_name }}" 46 | 47 | - name: Reboot machine 48 | ansible.builtin.reboot: 49 | delegate_to: "{{ deployed_tf.outputs.ci_machine_inventory.value.display_name }}" 50 | -------------------------------------------------------------------------------- /scripts/create_full_environment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -o errexit 4 | 5 | export PATH=${PATH}:/usr/local/bin 6 | 7 | 8 | function error() { 9 | echo $@ 1>&2 10 | } 11 | 12 | # Check OS 13 | OS=$(awk -F= '/^ID=/ { print $2 }' /etc/os-release | tr -d '"') 14 | if [[ ! ${OS} =~ ^(ol)$ ]] && [[ ! ${OS} =~ ^(centos)$ ]] && [[ ! ${OS} =~ ^(rhel)$ ]] && [[ ! ${OS} =~ ^(rocky)$ ]] && [[ ! ${OS} =~ ^(almalinux)$ ]]; then 15 | error "\"${OS}\" is an unsupported OS. We support only CentOS, RHEL, Rocky or AlmaLinux." 16 | error "It's not recommended to run the code in this repo locally on your personal machine, as it makes some opinionated configuration changes to the machine it's running on" 17 | exit 1 18 | fi 19 | 20 | #Check CentOS version 21 | VER=$(awk -F= '/^VERSION_ID=/ { print $2 }' /etc/os-release | tr -d '"' | cut -f1 -d'.') 22 | SUPPORTED_VERSIONS=( 8 9 ) 23 | if [[ ! " ${SUPPORTED_VERSIONS[@]} " =~ " ${VER} " ]]; then 24 | if [[ ${OS} =~ ^(centos)$ ]]; then 25 | error "CentOS version 8 or 9 is required." 26 | elif [[ ${OS} =~ ^(rhel)$ ]]; then 27 | error "RHEL version 8 or 9 is required." 28 | fi 29 | exit 1 30 | fi 31 | 32 | echo "Installing environment" 33 | scripts/install_environment.sh 34 | echo "Done installing" 35 | 36 | echo "Installing kind" 37 | make bring_assisted_service 38 | assisted-service/hack/kind/kind.sh install 39 | echo "Done installing kind" 40 | 41 | echo "Installing minikube" 42 | assisted-service/hack/minikube/minikube.sh install 43 | echo "Done installing minikube" 44 | 45 | echo "Installing oc and kubectl" 46 | scripts/install_k8s_clients.sh 47 | echo "Done installing oc and kubectl" 48 | 49 | echo "Creating image" 50 | make image_build 51 | echo "Done creating image" 52 | 53 | if [ "${DEPLOY_TARGET}" == "minikube" ] && [ -z "${NO_MINIKUBE}" ]; then 54 | echo "Start minikube" 55 | make start_minikube 56 | fi 57 | -------------------------------------------------------------------------------- /ansible_files/roles/ofcir_acquire/tasks/main.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure the shared directory exists 3 | ansible.builtin.file: 4 | path: "{{ shared_dir }}" 5 | state: directory 6 | mode: "0755" 7 | 8 | - name: Define ofcir request details 9 | ansible.builtin.set_fact: 10 | request_url: "{{ ofcir_url }}?type={{ cir_type }}" 11 | cir_file_path: "{{ shared_dir }}/cir_{{ cir_type }}_{{ cir_identifier }}.json" 12 | 13 | - name: "Attempt to acquire a Host from OFCIR" 14 | ansible.builtin.uri: 15 | url: "{{ request_url }}" 16 | method: POST 17 | headers: 18 | "X-OFCIRTOKEN": "{{ ofcir_token }}" 19 | validate_certs: false 20 | return_content: true 21 | timeout: "{{ acquire_timeout_seconds }}" 22 | register: acquire_response 23 | until: acquire_response.status == 200 24 | retries: "{{ request_retries }}" 25 | delay: "{{ request_delay_seconds }}" 26 | failed_when: acquire_response.status == -1 or acquire_response.status >= 400 27 | 28 | - name: Extract resource name from response 29 | ansible.builtin.set_fact: 30 | resource_name: "{{ (acquire_response.json).name }}" 31 | 32 | - name: "Poll until resource is provisioned and has an IP" 33 | ansible.builtin.uri: 34 | url: "{{ ofcir_url }}/{{ resource_name }}" 35 | method: GET 36 | headers: 37 | "X-OFCIRTOKEN": "{{ ofcir_token }}" 38 | validate_certs: false 39 | return_content: true 40 | register: poll_response 41 | until: 42 | - poll_response.status == 200 43 | - poll_response.json.status is defined 44 | - poll_response.json.status == "in use" 45 | - poll_response.json.ip is defined 46 | - poll_response.json.ip != "" 47 | retries: "{{ request_retries }}" 48 | delay: "{{ request_delay_seconds }}" 49 | 50 | - name: "Save final CIR response to file" 51 | ansible.builtin.copy: 52 | content: "{{ poll_response.content }}" 53 | dest: "{{ cir_file_path }}" 54 | mode: "0644" 55 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/kube_helpers/__init__.py: -------------------------------------------------------------------------------- 1 | """ 2 | kube_helpers package provides infra to deploy, manage and install cluster using 3 | CRDs instead of restful API calls. 4 | 5 | Use this package as part of pytest infra with the fixture kube_api_context. 6 | It provides a KubeAPIContext object which holds information about the resources 7 | created as well as the kubernetes ApiClient. 8 | 9 | Example of usage: 10 | 11 | def test_kube_api_wait_for_install(kube_api_context): 12 | kube_api_client = kube_api_context.api_client 13 | cluster_deployment = deploy_default_cluster_deployment( 14 | kube_api_client, "test-cluster", **installation_params 15 | ) 16 | cluster_deployment.wait_to_be_installing() 17 | 18 | An Agent CRD will be created for each registered host. In order to start the 19 | installation all agents must be approved. 20 | When a ClusterDeployment has sufficient data and the assigned agents are 21 | approved, installation will be started automatically. 22 | """ 23 | 24 | from .agent import Agent 25 | from .agent_cluster_install import AgentClusterInstall 26 | from .cluster_deployment import ClusterDeployment 27 | from .cluster_image_set import ClusterImageSet, ClusterImageSetReference 28 | from .common import KubeAPIContext, ObjectReference, UnexpectedStateError, create_kube_api_client 29 | from .infraenv import InfraEnv, Proxy, deploy_default_infraenv 30 | from .nmstate_config import NMStateConfig 31 | from .secret import Secret, deploy_default_secret 32 | 33 | __all__ = ( 34 | "ClusterImageSet", 35 | "ClusterImageSetReference", 36 | "ClusterDeployment", 37 | "Secret", 38 | "Agent", 39 | "AgentClusterInstall", 40 | "KubeAPIContext", 41 | "ObjectReference", 42 | "InfraEnv", 43 | "NMStateConfig", 44 | "UnexpectedStateError", 45 | "deploy_default_secret", 46 | "deploy_default_infraenv", 47 | "create_kube_api_client", 48 | "Proxy", 49 | ) 50 | -------------------------------------------------------------------------------- /.yaspeller.json: -------------------------------------------------------------------------------- 1 | { 2 | "ignoreUrls": true, 3 | "findRepeatWords": true, 4 | "maxRequests": 5, 5 | "ignoreDigits": true, 6 | "lang": "en", 7 | "dictionary": [ 8 | "Ansible", 9 | "baremetal", 10 | "BareMetalHost", 11 | "CI", 12 | "CIDR", 13 | "ConfigMap", 14 | "DCO", 15 | "dhcp", 16 | "dhcpd", 17 | "dnsmasq", 18 | "endpoint", 19 | "filesystem", 20 | "filesystems", 21 | "GA", 22 | "GitHub", 23 | "hostname", 24 | "hostnames", 25 | "iDRAC", 26 | "iLO", 27 | "IPI", 28 | "IPMI", 29 | "KNI", 30 | "machineset", 31 | "Markdown", 32 | "nameserver", 33 | "NICs", 34 | "OCP", 35 | "openshift", 36 | "OpenShift", 37 | "orchestrator", 38 | "pingable", 39 | "playbook", 40 | "playbooks", 41 | "podman", 42 | "provisioner", 43 | "PRs", 44 | "PXE", 45 | "RHCOS", 46 | "RHEL", 47 | "routable", 48 | "subzone", 49 | "Unreachable", 50 | "VIPs", 51 | "VLAN", 52 | "VM", 53 | "YAML", 54 | "DPDK", 55 | "PTP", 56 | "Kubernetes", 57 | "virtualization", 58 | "NMstate", 59 | "Hugepages", 60 | "NFV", 61 | "BM", 62 | "SR-IOV", 63 | "CNV", 64 | "Netlify", 65 | "SCTP", 66 | "sexualized", 67 | "asciidoc", 68 | "FQDN", 69 | "MachineConfig", 70 | "VMs", 71 | "NTP", 72 | "QEMU", 73 | "kubelet", 74 | "ansible", 75 | "endfor", 76 | "Jekyll", 77 | "ipi", 78 | "versioned", 79 | "unversioned", 80 | "devprev", 81 | "CentOS", 82 | "Dockerfile", 83 | "UI", 84 | "kubeconfig", 85 | "libvirt", 86 | "minikube", 87 | "Parrilla", 88 | "Parrilla's", 89 | "ClusterId", 90 | "BMI", 91 | "OPENSHIFT", 92 | "virsh", 93 | "iso", 94 | "vms", 95 | "ssh", 96 | "cd", 97 | "os" 98 | ] 99 | } 100 | -------------------------------------------------------------------------------- /ansible_files/oci_generic_create_ci_machine_playbook.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Create and configure the CI machine on OCI 3 | hosts: localhost 4 | vars_prompt: 5 | - name: unique_id 6 | prompt: Unique ID used to name the resources 7 | private: false 8 | - name: oci_compartment_id 9 | prompt: parent compartment OCID where the resources will be created 10 | private: false 11 | - name: oci_tf_state_file 12 | prompt: Terrafom state file that was used to create the infrastructure 13 | private: false 14 | - name: oci_tf_vars_file 15 | prompt: Place where the Terrafom variable file will be stored 16 | private: false 17 | - name: oci_tenancy_id 18 | prompt: tenancy OCID authentication value 19 | private: false 20 | - name: oci_user_id 21 | prompt: user OCID authentication value 22 | private: false 23 | - name: oci_fingerprint 24 | prompt: key fingerprint authentication value 25 | private: false 26 | - name: oci_region 27 | prompt: OCI region 28 | private: false 29 | - name: oci_private_key_path 30 | prompt: private key path authentication value 31 | private: false 32 | - name: oci_private_ssh_key_path 33 | prompt: private SSH key path used to login the CI machine 34 | private: false 35 | - name: oci_public_ssh_key_path 36 | prompt: public SSH key path used to login the CI machine 37 | private: false 38 | vars: 39 | oci_terraform_workdir: "{{ [playbook_dir, '..', 'terraform_files', 'oci-ci-machine'] | path_join | realpath }}" 40 | roles: 41 | - role: oci_create_infra 42 | vars: 43 | private_ssh_key_path: "{{ oci_private_ssh_key_path }}" 44 | public_ssh_key_path: "{{ oci_public_ssh_key_path }}" 45 | - role: "oci_export_connection_details" # Export connection details, required for the "common" steps in Prow 46 | - role: "oci_setup_for_test_infra" 47 | vars: 48 | ci_machine: "{{ groups['primary'][0] }}" 49 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_cluster_config.py: -------------------------------------------------------------------------------- 1 | from abc import ABC 2 | from dataclasses import dataclass 3 | from typing import List 4 | 5 | from assisted_service_client import models 6 | 7 | from ...utils.entity_name import BaseName, ClusterName 8 | from ...utils.manifests import Manifest 9 | from .base_entity_config import BaseEntityConfig 10 | 11 | 12 | @dataclass 13 | class BaseClusterConfig(BaseEntityConfig, ABC): 14 | """ 15 | Define all configurations variables that are needed for Cluster during its execution. 16 | All arguments must default to None and be type annotated. 17 | """ 18 | 19 | cluster_tags: str = None 20 | olm_operators: List[str] = None 21 | olm_bundles: List[str] = None 22 | vip_dhcp_allocation: bool = None 23 | cluster_networks: List[models.ClusterNetwork] = None 24 | service_networks: List[models.ServiceNetwork] = None 25 | machine_networks: List[models.MachineNetwork] = None 26 | kubeconfig_path: str = None 27 | network_type: str = None 28 | api_vips: List[models.ApiVip] = None 29 | ingress_vips: List[models.IngressVip] = None 30 | disk_encryption_mode: str = None 31 | disk_encryption_roles: str = None 32 | tang_servers: str = None 33 | custom_manifests: List[Manifest] = None 34 | is_disconnected: bool = None 35 | registry_ca_path: str = None 36 | load_balancer_type: str = None 37 | load_balancer_cidr: str = None 38 | install_working_dir: str = None 39 | libvirt_uri: str = None 40 | primary_stack: str = None 41 | 42 | @property 43 | def cluster_name(self) -> BaseName: 44 | return self.entity_name 45 | 46 | @cluster_name.setter 47 | def cluster_name(self, cluster_name: BaseName): 48 | self.entity_name = cluster_name 49 | 50 | 51 | # Add cluster_name to __annotations__ dict so we will be able to set it also on get_annotations 52 | # under BaseConfig 53 | BaseClusterConfig.__annotations__["cluster_name"] = ClusterName 54 | -------------------------------------------------------------------------------- /scripts/deploy_ui.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -euo pipefail 3 | set -x 4 | 5 | source scripts/utils.sh 6 | 7 | export UI_SERVICE_NAME=assisted-installer-ui 8 | export NO_UI=${NO_UI:-n} 9 | export NAMESPACE=${NAMESPACE:-assisted-installer} 10 | export EXTERNAL_PORT=${EXTERNAL_PORT:-true} 11 | 12 | if [[ "${NO_UI}" != "n" ]] || [[ "${DEPLOY_TARGET}" != @(minikube|kind) ]]; then 13 | exit 0 14 | fi 15 | 16 | if [[ "${OPENSHIFT_CI}" == "true" ]]; then 17 | echo "Skipping UI deployment in CI" 18 | exit 0 19 | fi 20 | 21 | mkdir -p build 22 | 23 | print_log "Starting ui" 24 | skipper run "make -C assisted-service/ deploy-ui" ${SKIPPER_PARAMS} TARGET=${DEPLOY_TARGET} DEPLOY_TAG=${DEPLOY_TAG} DEPLOY_MANIFEST_PATH=${DEPLOY_MANIFEST_PATH} DEPLOY_MANIFEST_TAG=${DEPLOY_MANIFEST_TAG} NAMESPACE=${NAMESPACE} 25 | 26 | ui_pod=$(get_pods_with_label app=assisted-installer-ui ${NAMESPACE}) 27 | kubectl wait -n ${NAMESPACE} --for=condition=Ready=True --timeout=60s $ui_pod 28 | 29 | case ${DEPLOY_TARGET} in 30 | minikube) 31 | node_ip=$(get_main_ip) 32 | ui_port=$(( 6008 + $NAMESPACE_INDEX )) 33 | ui_url="$(minikube service ${UI_SERVICE_NAME} -n ${NAMESPACE} --url)" 34 | ;; 35 | 36 | kind) 37 | node_ip=$(get_main_ip) 38 | ui_port=8060 39 | ui_url="http://${node_ip}:${ui_port}" 40 | ;; 41 | *) 42 | echo "Non-supported deploy target ${DEPLOY_TARGET}!"; 43 | exit 1 44 | ;; 45 | esac 46 | 47 | print_log "Wait till UI is ready" 48 | wait_for_url_and_run ${ui_url} "echo \"waiting for ${ui_url}\"" 49 | 50 | add_firewalld_port $ui_port 51 | 52 | if [[ "${DEPLOY_TARGET}" == "minikube" ]]; then 53 | print_log "Starting port forwarding for deployment/${UI_SERVICE_NAME} on port $ui_port" 54 | wait_for_url_and_run "http://${node_ip}:${ui_port}" "spawn_port_forwarding_command $UI_SERVICE_NAME $ui_port $NAMESPACE $NAMESPACE_INDEX $KUBECONFIG minikube" 55 | fi 56 | 57 | print_log "Done. Assisted-installer UI can be reached at http://${node_ip}:${ui_port}" 58 | -------------------------------------------------------------------------------- /ansible_files/roles/heterogeneous_cluster_prepare_inventory/meta/argument_specs.yml: -------------------------------------------------------------------------------- 1 | --- 2 | argument_specs: 3 | main: 4 | short_description: Prepare Ansible inventory from instance list 5 | description: 6 | - This role adds provided instances to the Ansible inventory 7 | - It also gathers default IP addresses from each instance for networking configuration 8 | author: Assisted Test Infrastructure Team 9 | options: 10 | instances: 11 | description: List of instances to add to inventory 12 | type: list 13 | required: true 14 | elements: dict 15 | options: 16 | name: 17 | description: Name/hostname of the instance 18 | type: str 19 | required: true 20 | groups: 21 | description: Ansible groups to assign this instance to 22 | type: list 23 | elements: str 24 | required: true 25 | ip: 26 | description: IP address of the instance 27 | type: str 28 | required: true 29 | extra: 30 | description: Additional metadata for the instance 31 | type: str 32 | required: false 33 | pool: 34 | description: Resource pool information 35 | type: str 36 | required: false 37 | provider: 38 | description: Cloud provider information 39 | type: str 40 | required: false 41 | providerInfo: 42 | description: Provider-specific information 43 | type: str 44 | required: false 45 | status: 46 | description: Current status of the instance 47 | type: str 48 | required: false 49 | type: 50 | description: Type or category of the instance 51 | type: str 52 | required: false 53 | 54 | private_key_path: 55 | description: Path to the SSH private key for connecting to instances 56 | type: path 57 | required: true 58 | -------------------------------------------------------------------------------- /terraform_files/nutanix/virtual_machines.tf: -------------------------------------------------------------------------------- 1 | 2 | # Creating the master VMs. 3 | resource "nutanix_virtual_machine" "master" { 4 | count = var.masters_count 5 | name = "${var.cluster_name}-master-${count.index}" 6 | cluster_uuid = data.nutanix_cluster.cluster.id 7 | num_vcpus_per_socket = var.nutanix_control_plane_cores_per_socket 8 | memory_size_mib = var.master_memory 9 | num_sockets = var.master_vcpu 10 | 11 | boot_device_order_list = ["DISK", "CDROM", "NETWORK"] 12 | boot_type = "LEGACY" 13 | 14 | disk_list { 15 | data_source_reference = { 16 | kind = "image" 17 | uuid = nutanix_image.image.id 18 | } 19 | device_properties { 20 | device_type = "CDROM" 21 | } 22 | } 23 | 24 | disk_list { 25 | disk_size_bytes = var.master_disk 26 | device_properties { 27 | device_type = "DISK" 28 | disk_address = { 29 | device_index = 0 30 | adapter_type = "SATA" 31 | } 32 | } 33 | } 34 | 35 | nic_list { 36 | subnet_uuid = data.nutanix_subnet.subnet.id 37 | } 38 | } 39 | 40 | # Creating the worker VMs. 41 | resource "nutanix_virtual_machine" "worker" { 42 | count = var.workers_count 43 | name = "${var.cluster_name}-worker-${count.index}" 44 | cluster_uuid = data.nutanix_cluster.cluster.id 45 | num_vcpus_per_socket = var.nutanix_control_plane_cores_per_socket 46 | memory_size_mib = var.worker_memory 47 | num_sockets = var.worker_vcpu 48 | 49 | boot_device_order_list = ["DISK", "CDROM", "NETWORK"] 50 | boot_type = "LEGACY" 51 | 52 | disk_list { 53 | data_source_reference = { 54 | kind = "image" 55 | uuid = nutanix_image.image.id 56 | } 57 | device_properties { 58 | device_type = "CDROM" 59 | } 60 | } 61 | 62 | disk_list { 63 | disk_size_bytes = var.worker_disk 64 | device_properties { 65 | device_type = "DISK" 66 | disk_address = { 67 | device_index = 0 68 | adapter_type = "SATA" 69 | } 70 | } 71 | } 72 | 73 | nic_list { 74 | subnet_uuid = data.nutanix_subnet.subnet.id 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /terraform_files/oci-ci-machine/02_compute.tf: -------------------------------------------------------------------------------- 1 | data "oci_identity_availability_domains" "ads" { 2 | compartment_id = var.oci_compartment_id 3 | } 4 | 5 | # Use cloud init to configure root user 6 | data "cloudinit_config" "config" { 7 | part { 8 | content_type = "text/cloud-config" 9 | 10 | content = yamlencode({ 11 | "users" : [ 12 | { 13 | "name" : "root", 14 | "ssh-authorized-keys" : [ 15 | file(var.public_ssh_key_path) 16 | ] 17 | } 18 | ] 19 | }) 20 | } 21 | } 22 | 23 | resource "oci_core_instance" "ci_instance" { 24 | # Required 25 | availability_domain = data.oci_identity_availability_domains.ads.availability_domains[0].name 26 | compartment_id = var.oci_compartment_id 27 | shape = "VM.Standard.E5.Flex" 28 | 29 | shape_config { 30 | memory_in_gbs = 16 31 | ocpus = 4 32 | } 33 | 34 | source_details { 35 | # source_id = data.oci_core_app_catalog_listing_resource_version.os_catalog_listing.listing_resource_id 36 | source_id = var.operating_system_source_id 37 | source_type = "image" 38 | boot_volume_size_in_gbs = 500 39 | boot_volume_vpus_per_gb = 30 40 | } 41 | 42 | # Optional 43 | display_name = "ci-instance-${var.unique_id}" 44 | 45 | create_vnic_details { 46 | assign_public_ip = true 47 | assign_private_dns_record = true 48 | hostname_label = "ci-instance" 49 | subnet_id = oci_core_subnet.public.id 50 | } 51 | metadata = { 52 | user_data = data.cloudinit_config.config.rendered 53 | } 54 | preserve_boot_volume = false 55 | 56 | # wait an ssh connection and wait for cloud-init to complete 57 | connection { 58 | type = "ssh" 59 | user = "root" 60 | host = self.public_ip 61 | timeout = "5m" 62 | private_key = file(var.private_ssh_key_path) 63 | } 64 | 65 | provisioner "remote-exec" { 66 | inline = [ 67 | # Wait for cloud-init to complete. 68 | "cloud-init status --wait || true" 69 | ] 70 | } 71 | lifecycle { 72 | ignore_changes = [ 73 | source_details[0].source_id, 74 | ] 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/events_handler.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | import waiting 4 | 5 | from assisted_test_infra.test_infra.utils import utils 6 | from service_client import InventoryClient, log 7 | 8 | 9 | class EventsHandler: 10 | def __init__(self, api_client: InventoryClient): 11 | self.api_client = api_client 12 | 13 | def _find_event( 14 | self, 15 | event_to_find: str, 16 | reference_time: int, 17 | params_list: List[str] = None, 18 | host_id: str = "", 19 | infra_env_id: str = "", 20 | cluster_id: str = "", 21 | ): 22 | events_list = self.get_events(host_id=host_id, cluster_id=cluster_id, infra_env_id=infra_env_id) 23 | for event in events_list: 24 | if event_to_find not in event["message"]: 25 | continue 26 | # Adding a 2 sec buffer to account for a small time diff between the machine and the time on staging 27 | if utils.to_utc(event["event_time"]) >= reference_time - 2: 28 | if all(param in event["message"] for param in params_list): 29 | log.info(f"Event to find: {event_to_find} exists with its params") 30 | return True 31 | return False 32 | 33 | def get_events(self, host_id: str = "", cluster_id: str = "", infra_env_id: str = "", **kwargs): 34 | return self.api_client.get_events(cluster_id=cluster_id, host_id=host_id, infra_env_id=infra_env_id, **kwargs) 35 | 36 | def wait_for_event( 37 | self, 38 | event_to_find: str, 39 | reference_time: int, 40 | params_list: List[str] = None, 41 | host_id: str = "", 42 | infra_env_id: str = "", 43 | cluster_id: str = "", 44 | timeout: int = 10, 45 | ): 46 | log.info(f"Searching for event: {event_to_find}") 47 | if params_list is None: 48 | params_list = list() 49 | waiting.wait( 50 | lambda: self._find_event(event_to_find, reference_time, params_list, host_id, infra_env_id, cluster_id), 51 | timeout_seconds=timeout, 52 | sleep_seconds=2, 53 | waiting_for=f"event {event_to_find}", 54 | ) 55 | -------------------------------------------------------------------------------- /terraform_files/oci-ci-machine/01_networking.tf: -------------------------------------------------------------------------------- 1 | locals { 2 | all_protocols = "all" 3 | anywhere = "0.0.0.0/0" 4 | } 5 | 6 | resource "oci_core_vcn" "ci_machine_vcn" { 7 | cidr_blocks = [ 8 | "10.0.0.0/16", 9 | ] 10 | compartment_id = var.oci_compartment_id 11 | display_name = "vcn-ci-${var.unique_id}" 12 | dns_label = "v${substr(var.unique_id, -14, -1)}" # dns label is limited to 15 chacracters 13 | } 14 | 15 | resource "oci_core_internet_gateway" "internet_gateway" { 16 | compartment_id = var.oci_compartment_id 17 | display_name = "InternetGateway" 18 | vcn_id = oci_core_vcn.ci_machine_vcn.id 19 | } 20 | 21 | resource "oci_core_route_table" "public_routes" { 22 | compartment_id = var.oci_compartment_id 23 | vcn_id = oci_core_vcn.ci_machine_vcn.id 24 | display_name = "public" 25 | 26 | route_rules { 27 | destination = local.anywhere 28 | destination_type = "CIDR_BLOCK" 29 | network_entity_id = oci_core_internet_gateway.internet_gateway.id 30 | } 31 | } 32 | 33 | resource "oci_core_security_list" "public" { 34 | compartment_id = var.oci_compartment_id 35 | display_name = "public" 36 | vcn_id = oci_core_vcn.ci_machine_vcn.id 37 | 38 | ingress_security_rules { 39 | source = local.anywhere 40 | protocol = "6" 41 | tcp_options { 42 | min = 22 43 | max = 22 44 | } 45 | } 46 | ingress_security_rules { 47 | source = local.anywhere 48 | protocol = "6" 49 | tcp_options { 50 | min = 8080 51 | max = 8080 52 | } 53 | } 54 | ingress_security_rules { 55 | source = local.anywhere 56 | protocol = "6" 57 | tcp_options { 58 | min = 8090 59 | max = 8090 60 | } 61 | } 62 | egress_security_rules { 63 | destination = local.anywhere 64 | protocol = local.all_protocols 65 | } 66 | } 67 | 68 | resource "oci_core_subnet" "public" { 69 | cidr_block = "10.0.0.0/24" 70 | display_name = "public" 71 | compartment_id = var.oci_compartment_id 72 | vcn_id = oci_core_vcn.ci_machine_vcn.id 73 | route_table_id = oci_core_route_table.public_routes.id 74 | 75 | security_list_ids = [ 76 | oci_core_security_list.public.id, 77 | ] 78 | 79 | dns_label = "public" 80 | prohibit_public_ip_on_vnic = false 81 | } 82 | 83 | -------------------------------------------------------------------------------- /terraform_files/vsphere-ci-machine/variables-vsphere.tf: -------------------------------------------------------------------------------- 1 | ////// 2 | // vSphere variables 3 | ////// 4 | 5 | variable "vsphere_server" { 6 | type = string 7 | description = "vSphere vcenter server ip address or fqdn (vCenter server name for vSphere API operations)" 8 | } 9 | 10 | variable "vsphere_username" { 11 | type = string 12 | description = "vSphere vcenter server username" 13 | } 14 | 15 | variable "vsphere_password" { 16 | type = string 17 | description = "vSphere vcenter server username" 18 | } 19 | 20 | variable "vsphere_cluster" { 21 | type = string 22 | description = "vSphere cluster name, vsphere cluster is a cluster of hosts that it manages" 23 | } 24 | 25 | variable "vsphere_datacenter" { 26 | type = string 27 | description = "vSphere data center name" 28 | } 29 | 30 | variable "vsphere_datastore" { 31 | type = string 32 | description = "vSphere data store name" 33 | } 34 | 35 | variable "vsphere_network" { 36 | type = string 37 | description = "vSphere publicly accessible network for cluster ingress and access. e.g VM Network" 38 | } 39 | 40 | variable "template_name" { 41 | type = string 42 | description = "The Fedora/Centos template name to clone, should exist on the vsphere" 43 | } 44 | 45 | /////////// 46 | // Creating a vsphere machine to deploy test-infra on it. 47 | /////////// 48 | 49 | variable "build_id" { 50 | type = string 51 | description = "The CI build id" 52 | } 53 | 54 | variable "vcpu" { 55 | type = number 56 | default = 4 57 | description = "The total number of virtual processor cores to assign to the virtual machine." 58 | } 59 | 60 | variable "memory" { 61 | type = number 62 | default = 24576 63 | description = "The size of the virtual machine's memory, in MB" 64 | } 65 | 66 | variable "disk_size" { 67 | type = number 68 | default = 240 69 | description = "The size of the virtual machine's disk, in GB" 70 | } 71 | 72 | variable "guest_id" { 73 | type = string 74 | description = "The server os type. see: https://code.vmware.com/apis/358/doc/vim.vm.GuestOsDescriptor.GuestOsIdentifier.html" 75 | default = "centos8_64Guest" 76 | } 77 | 78 | variable "domain" { 79 | type = string 80 | description = "The host domain name" 81 | default = "redhat.com" 82 | } 83 | -------------------------------------------------------------------------------- /scripts/assisted_deployment.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | function destroy_all() { 4 | make destroy 5 | } 6 | 7 | function update_conf_file() { 8 | FILE="/etc/NetworkManager/conf.d/dnsmasq.conf" 9 | if ! [ -f "${FILE}" ]; then 10 | echo -e "[main]\ndns=dnsmasq" | sudo tee $FILE 11 | fi 12 | } 13 | 14 | function set_dns() { 15 | NAMESPACE_INDEX=${1:-0} 16 | if [ "${BASE_DNS_DOMAINS}" != '""' ]; then 17 | echo "DNS registration should be handled by assisted-service" 18 | exit 0 19 | fi 20 | NAMESERVER_IP=$(ip route show dev tt$NAMESPACE_INDEX | cut -d\ -f7) 21 | if [ -z "${NAMESERVER_IP}" ] ; then 22 | NAMESERVER_IP=$(ip -o -6 address show dev tt$NAMESPACE_INDEX | awk '!/ fe80/ {e=index($4,"/"); print substr($4, 0, e-1);}') 23 | fi 24 | if [ -z "${NAMESERVER_IP}" ] ; then 25 | echo IP for interface tt$NAMESPACE_INDEX was not found 26 | exit 1 27 | fi 28 | 29 | update_conf_file 30 | sudo truncate -s0 /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf 31 | echo "server=/api.${CLUSTER_NAME}-${NAMESPACE}.${BASE_DOMAIN}/${NAMESERVER_IP}" | sudo tee -a /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf 32 | echo "server=/.apps.${CLUSTER_NAME}-${NAMESPACE}.${BASE_DOMAIN}/${NAMESERVER_IP}" | sudo tee -a /etc/NetworkManager/dnsmasq.d/openshift-${CLUSTER_NAME}.conf 33 | 34 | sudo systemctl reload NetworkManager 35 | 36 | echo "Finished setting dns" 37 | } 38 | 39 | function set_all_vips_dns() { 40 | update_conf_file 41 | 42 | sudo systemctl reload NetworkManager 43 | 44 | echo "Finished setting all vips dns" 45 | } 46 | 47 | # Delete after pushing fix to dev-scripts 48 | function wait_for_cluster() { 49 | echo "Nothing to do" 50 | } 51 | 52 | #TODO ADD ALL RELEVANT OS ENVS 53 | function run() { 54 | make $1 NUM_MASTERS=$NUM_MASTERS NUM_WORKERS=$NUM_WORKERS KUBECONFIG=$PWD/minikube_kubeconfig BASE_DOMAIN=$BASE_DOMAIN CLUSTER_NAME=$CLUSTER_NAME 55 | retVal=$? 56 | echo retVal 57 | if [ $retVal -ne 0 ]; then 58 | exit $retVal 59 | fi 60 | } 61 | 62 | function run_skipper_make_command() { 63 | make $1 64 | retVal=$? 65 | echo retVal 66 | if [ $retVal -ne 0 ]; then 67 | exit $retVal 68 | fi 69 | } 70 | 71 | function run_without_os_envs() { 72 | run_skipper_make_command $1 73 | } 74 | 75 | "$@" 76 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/utils/env_var.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Callable, List, Optional 2 | 3 | from assisted_test_infra.test_infra.utils.utils import get_env 4 | 5 | 6 | class EnvVar: 7 | """ 8 | Get env vars from os environment variables while saving the source of the data 9 | Attributes: 10 | __var_keys Environment variables keys as passed to test_infra, if multiple keys are set, taking the first 11 | __loader Function to execute on the env var when getting it from system 12 | __default Default value for variable if not set 13 | __is_user_set Set to true if one of the environment variables in __var_keys was set by the user 14 | __value The actual calculated value of the variable (user -> default) 15 | """ 16 | 17 | def __init__( 18 | self, var_keys: List[str] = None, *, loader: Optional[Callable] = None, default: Optional[Any] = None 19 | ) -> None: 20 | self.__var_keys = var_keys if var_keys else [] 21 | self.__loader = loader 22 | self.__default = default 23 | self.__is_user_set = False 24 | self.__init_value() 25 | 26 | def __init_value(self): 27 | self.__value = self.__default 28 | for key in self.__var_keys: 29 | env = get_env(key) 30 | if env is not None: 31 | self.__is_user_set = True 32 | self.__value = self.__loader(env) if self.__loader else env 33 | break 34 | 35 | def __add__(self, other: "EnvVar"): 36 | return EnvVar(default=self.get() + other.get()) 37 | 38 | def __str__(self): 39 | return f"{f'{self.__var_keys[0]}=' if len(self.__var_keys) > 0 else ''}{self.__value}" 40 | 41 | @property 42 | def value(self): 43 | return self.__value 44 | 45 | @property 46 | def var_keys(self): 47 | return self.__var_keys 48 | 49 | @property 50 | def is_user_set(self): 51 | return self.__is_user_set 52 | 53 | def copy(self, value=None) -> "EnvVar": 54 | """Get EnvVar copy, if value is different than None it will set the old EnvVar value""" 55 | env = EnvVar(self.__var_keys, loader=self.__loader, default=self.__default) 56 | env.__is_user_set = self.__is_user_set 57 | env.__value = value if value else self.__value 58 | 59 | return env 60 | -------------------------------------------------------------------------------- /ansible_files/roles/oci_cleanup_resources/tasks/cleanup_resources.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Load terraform state 3 | ansible.builtin.command: 4 | cmd: terraform show -json 5 | chdir: "{{ terraform_working_dir }}" 6 | register: terraform_state 7 | check_mode: false 8 | changed_when: false 9 | 10 | - name: Convert terraform state to dict 11 | ansible.builtin.set_fact: 12 | terraform_resources: "{{ (terraform_state.stdout | from_json)['values']['root_module']['resources'] | default([]) }}" 13 | excluded_resources: [] 14 | 15 | - name: Exclude types from terraform state 16 | ansible.builtin.set_fact: 17 | excluded_resources: "{{ excluded_resources + [item] }}" 18 | loop: "{{ terraform_resources }}" 19 | when: item.type in excluded_types 20 | 21 | - name: Exclude recently created resources from terraform state 22 | ansible.builtin.set_fact: 23 | excluded_resources: "{{ excluded_resources + [item] }}" 24 | loop: "{{ terraform_resources }}" 25 | when: 26 | - item["values"]["defined_tags"]["Oracle-Tags.CreatedOn"] is defined 27 | - >- 28 | ( 29 | now(utc=true).replace(tzinfo=None) 30 | - 31 | (item["values"]["defined_tags"]["Oracle-Tags.CreatedOn"] | ansible.builtin.to_datetime("%Y-%m-%dT%H:%M:%S.%fZ")).replace(tzinfo=None) 32 | ).total_seconds() / 3600 < expired_after_hours 33 | 34 | - name: List excluded bucket names 35 | ansible.builtin.set_fact: 36 | excluded_buckets: "{{ excluded_resources | json_query('[?type==`oci_objectstorage_bucket`].values.name') }}" 37 | 38 | - name: Exclude objects belonging to excluded buckets 39 | ansible.builtin.set_fact: 40 | excluded_resources: "{{ excluded_resources + [item] }}" 41 | loop: "{{ terraform_resources }}" 42 | when: 43 | - item["type"] == "oci_objectstorage_object" 44 | - item["values"]["bucket"] in excluded_buckets 45 | 46 | - name: Remove excluded resources from terraform state 47 | ansible.builtin.command: 48 | cmd: terraform state rm {{ excluded_resources | json_query('[].address') | unique | join(" ") }} 49 | chdir: "{{ terraform_working_dir }}" 50 | when: excluded_resources | length > 0 51 | changed_when: true 52 | 53 | - name: Destroy expired resources 54 | community.general.terraform: 55 | project_path: "{{ terraform_working_dir }}" 56 | state: absent 57 | register: result 58 | until: "result is not failed" 59 | retries: 10 60 | delay: 10 61 | -------------------------------------------------------------------------------- /src/assisted_test_infra/download_logs/events.html.j2: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 8 | 9 | 10 | 43 | 44 | 45 | 46 |
47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 | 57 | 58 | 92 | 93 | 94 | 95 | 96 | -------------------------------------------------------------------------------- /terraform_files/baremetal_host/variables.tf: -------------------------------------------------------------------------------- 1 | variable "name" { 2 | type = string 3 | description = "Identifying name for the host." 4 | } 5 | 6 | variable "memory" { 7 | type = number 8 | description = "RAM in MiB allocated to the host." 9 | } 10 | 11 | variable "vcpu" { 12 | type = number 13 | description = "Number of virtual cores allocated to the host." 14 | } 15 | 16 | variable "running" { 17 | type = bool 18 | description = "Whether or not letting the host start running right away after its creation." 19 | } 20 | 21 | variable "image_path" { 22 | type = string 23 | description = "Live CD image that should be booted from if hard disk is not bootable." 24 | } 25 | 26 | variable "cpu_mode" { 27 | type = string 28 | description = "How CPU model of the libvirt guest should be configured." 29 | default = "host-passthrough" 30 | } 31 | 32 | variable "cluster_domain" { 33 | type = string 34 | description = "The domain for the cluster that all DNS records must belong." 35 | } 36 | 37 | variable "networks" { 38 | type = list(object({ 39 | name = string, 40 | hostname = optional(string), 41 | ips = list(string), 42 | mac = string 43 | })) 44 | description = "Network devices configuration for the host." 45 | default = [] 46 | } 47 | 48 | variable "pool" { 49 | type = string 50 | description = "Pool name to be used." 51 | } 52 | 53 | variable "disk_base_name" { 54 | type = string 55 | description = "Prefix name to be used for namespacing disks." 56 | } 57 | 58 | variable "disk_size" { 59 | type = number 60 | description = "Disk space in bytes allocated to the host." 61 | } 62 | 63 | variable "disk_count" { 64 | type = number 65 | description = "Number of disks to attach to the host." 66 | default = 1 67 | } 68 | 69 | variable "vtpm2" { 70 | type = bool 71 | description = "Whether of not to emulate TPM v2 device on the host." 72 | default = false 73 | } 74 | 75 | variable "boot_devices" { 76 | type = list(string) 77 | description = "the list of boot devices in the desired order of boot" 78 | default = ["hd", "cdrom"] 79 | } 80 | 81 | variable "uefi_boot_firmware" { 82 | description = "The uefi boot firmware path in hypervisor" 83 | type = string 84 | default = "" 85 | } 86 | 87 | variable "uefi_boot_template" { 88 | description = "The uefi boot template path in hypervisor" 89 | type = string 90 | default = "" 91 | } 92 | 93 | -------------------------------------------------------------------------------- /src/consts/env_defaults.py: -------------------------------------------------------------------------------- 1 | import os 2 | from pathlib import Path 3 | 4 | import consts 5 | 6 | DEFAULT_NUMBER_OF_MASTERS: int = consts.NUMBER_OF_MASTERS 7 | DEFAULT_DAY2_WORKERS_COUNT: int = 1 8 | DEFAULT_DAY2_MASTERS_COUNT: int = 0 9 | DEFAULT_WORKERS_COUNT: int = 2 10 | DEFAULT_ARBITERS_COUNT: int = 0 11 | DEFAULT_STORAGE_POOL_PATH: str = str(Path.cwd().joinpath("storage_pool")) 12 | DEFAULT_SSH_PRIVATE_KEY_PATH: Path = Path.home() / ".ssh" / "id_rsa" 13 | DEFAULT_SSH_PUBLIC_KEY_PATH: Path = Path.home() / ".ssh" / "id_rsa.pub" 14 | DEFAULT_INSTALLER_KUBECONFIG = None 15 | DEFAULT_LOG_FOLDER: Path = Path("/tmp/assisted_test_infra_logs") 16 | DEFAULT_IMAGE_TYPE: str = consts.ImageType.MINIMAL_ISO 17 | DEFAULT_TEST_TEARDOWN: bool = True 18 | DEFAULT_PLATFORM: str = consts.Platforms.BARE_METAL 19 | DEFAULT_USER_MANAGED_NETWORKING: bool = False 20 | DEFAULT_CONTROL_PLANE_COUNT: int = consts.ControlPlaneCount.THREE 21 | DEFAULT_DOWNLOAD_IMAGE: bool = True 22 | DEFAULT_VERIFY_SSL: bool = True 23 | DEFAULT_IS_IPV4: bool = True 24 | DEFAULT_IS_IPV6: bool = False 25 | DEFAULT_ADDITIONAL_NTP_SOURCE: str = consts.DEFAULT_ADDITIONAL_NTP_SOURCE 26 | DEFAULT_STATIC_IPS: bool = False 27 | DEFAULT_IS_BONDED: bool = False 28 | DEFAULT_NUM_BONDED_SLAVES: int = 2 29 | DEFAULT_BONDING_MODE: str = "active-backup" 30 | DEFAULT_BOOTSTRAP_IN_PLACE: bool = False 31 | DEFAULT_NETWORK_NAME: str = consts.TEST_NETWORK 32 | DEFAULT_SINGLE_NODE_IP: str = "" 33 | DEFAULT_TF_CPU_MODE: str = consts.HOST_PASSTHROUGH_CPU_MODE 34 | DEFAULT_IMAGE_FOLDER: Path = Path(consts.IMAGE_FOLDER) 35 | DEFAULT_IMAGE_FILENAME: str = "installer-image.iso" 36 | DEFAULT_NETWORK_TYPE: str = consts.NetworkType.OpenShiftSDN 37 | DEFAULT_DISK_ENCRYPTION_MODE: str = consts.DiskEncryptionMode.TPM_VERSION_2 38 | DEFAULT_DISK_ENCRYPTION_ROLES: str = consts.DiskEncryptionRoles.NONE 39 | DEFAULT_IS_KUBE_API: bool = False 40 | DEFAULT_HOLD_INSTALLATION: bool = False 41 | DEFAULT_MULTI_VERSION: bool = False 42 | DEFAULT_BASE_DNS_DOMAIN = consts.REDHAT_DNS_DOMAIN 43 | DEFAULT_VSHPERE_PARENT_FOLDER: str = "assisted-test-infra" 44 | TF_APPLY_ATTEMPTS = int(os.getenv("TF_APPLY_ATTEMPTS", 1)) 45 | DEFAULT_EXTERNAL_PLATFORM_NAME = "test-infra" 46 | DEFAULT_EXTERNAL_CLOUD_CONTROLLER_MANAGER = "" 47 | DEFAULT_LOAD_BALANCER_TYPE: str = consts.LoadBalancerType.CLUSTER_MANAGED.value 48 | DEFAULT_USE_DHCP_FOR_LIBVIRT: bool = False 49 | DEFAULT_PRIMARY_STACK: str = "ipv4" 50 | DEFAULT_UEFI_BOOT_FIRMWARE: str = "/usr/share/OVMF/OVMF_CODE.fd" 51 | DEFAULT_UEFI_BOOT_TEMPLATE: str = "/usr/share/OVMF/OVMF_VARS.fd" 52 | DEFAULT_UEFI_BOOT: bool = False 53 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/helper_classes/config/base_nodes_config.py: -------------------------------------------------------------------------------- 1 | import warnings 2 | from abc import ABC 3 | from dataclasses import dataclass 4 | from pathlib import Path 5 | from typing import List, Optional 6 | 7 | from assisted_service_client import models 8 | 9 | import consts 10 | 11 | from .base_config import BaseConfig 12 | 13 | 14 | @dataclass 15 | class BaseNodesConfig(BaseConfig, ABC): 16 | is_ipv4: bool = None 17 | is_ipv6: bool = None 18 | bootstrap_in_place: bool = None 19 | private_ssh_key_path: Path = None 20 | working_dir: str = consts.WORKING_DIR 21 | 22 | master_memory: int = None 23 | master_vcpu: int = None 24 | masters_count: int = None 25 | master_cpu_mode: str = None 26 | master_disk: int = None # disk size in MB. 27 | master_disk_size_gib: str = None # disk size in GB. 28 | master_disk_count: int = None # number of disks to create 29 | master_boot_devices: List[str] = None # order of boot devices to use 30 | 31 | worker_memory: int = None 32 | worker_vcpu: int = None 33 | workers_count: int = None 34 | worker_cpu_mode: str = None 35 | worker_disk: int = None 36 | worker_disk_size_gib: str = None # disk size in GB. 37 | worker_disk_count: int = None 38 | worker_boot_devices: List[str] = None 39 | 40 | arbiter_memory: int = None 41 | arbiter_vcpu: int = None 42 | arbiters_count: int = None 43 | arbiter_cpu_mode: str = None 44 | arbiter_disk: int = None 45 | arbiter_disk_size_gib: str = None 46 | arbiter_disk_count: int = None 47 | arbiter_boot_devices: List[str] = None 48 | 49 | api_vips: List[models.ApiVip] = None 50 | ingress_vips: List[models.IngressVip] = None 51 | base_cluster_domain: Optional[str] = None 52 | 53 | network_mtu: int = None 54 | tf_platform: str = ( 55 | None # todo - make all tf dependent platforms (e.g. vsphere, nutanix) inherit from BaseTerraformConfig # noqa E501 56 | ) 57 | 58 | @property 59 | def nodes_count(self): 60 | if self.workers_count is not None and self.masters_count is not None and self.arbiters_count is not None: 61 | return self.masters_count + self.workers_count + self.arbiters_count 62 | 63 | return 0 64 | 65 | @nodes_count.setter 66 | def nodes_count(self, nodes_count: int): 67 | warnings.warn( 68 | "Setting nodes_count is deprecated. nodes_count value is taken from masters_count plus" 69 | " workers_count plus arbiters_count instead.", 70 | DeprecationWarning, 71 | ) 72 | -------------------------------------------------------------------------------- /terraform_files/baremetal_host/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | libvirt = { 4 | source = "dmacvicar/libvirt" 5 | version = "0.8.1" 6 | } 7 | } 8 | } 9 | 10 | locals { 11 | disk_names = [ 12 | for index in range(var.disk_count) : 13 | "${var.disk_base_name}-disk-${index}" 14 | ] 15 | } 16 | 17 | resource "libvirt_domain" "host" { 18 | name = var.name 19 | 20 | memory = var.memory 21 | vcpu = var.vcpu 22 | running = var.running 23 | 24 | firmware = var.uefi_boot_firmware != "" ? var.uefi_boot_firmware : null 25 | dynamic "nvram" { 26 | for_each = var.uefi_boot_firmware != "" ? [1] : [] 27 | 28 | content { 29 | file = "/tmp/VARS_${var.name}.fd" 30 | template = var.uefi_boot_template 31 | } 32 | } 33 | 34 | dynamic "disk" { 35 | for_each = { 36 | for idx, disk in libvirt_volume.host : idx => disk.id if length(regexall("${var.disk_base_name}-disk-.*", disk.name)) > 0 37 | } 38 | content { 39 | volume_id = disk.value 40 | scsi = true 41 | } 42 | } 43 | 44 | disk { 45 | file = var.image_path 46 | # Set scsi to true here for documentation purpose 47 | # as cdrom drive is always set to ide (at least up to 0.7.1). 48 | # The bus is actually updated through the xsl sheet. 49 | scsi = true 50 | } 51 | 52 | console { 53 | type = "pty" 54 | target_port = 0 55 | } 56 | 57 | cpu { 58 | mode = var.cpu_mode 59 | } 60 | 61 | dynamic "network_interface" { 62 | for_each = var.networks 63 | content { 64 | network_name = network_interface.value.name 65 | hostname = network_interface.value.hostname 66 | addresses = network_interface.value.ips 67 | mac = network_interface.value.mac 68 | } 69 | } 70 | 71 | boot_device { 72 | dev = var.boot_devices 73 | } 74 | 75 | graphics { 76 | type = "vnc" 77 | listen_type = "address" 78 | listen_address = "127.0.0.1" 79 | autoport = true 80 | } 81 | 82 | dynamic "tpm" { 83 | for_each = var.vtpm2 ? [1] : [] 84 | 85 | content { 86 | backend_type = "emulator" 87 | backend_version = "2.0" 88 | } 89 | } 90 | 91 | xml { 92 | xslt = file("../baremetal_host/libvirt_domain_custom.xsl") 93 | } 94 | } 95 | 96 | resource "libvirt_volume" "host" { 97 | for_each = { for idx, obj in local.disk_names : idx => obj } 98 | name = each.value 99 | pool = var.pool 100 | size = var.disk_size 101 | } 102 | -------------------------------------------------------------------------------- /terraform_files/nutanix-ci-machine/main.tf: -------------------------------------------------------------------------------- 1 | terraform { 2 | required_providers { 3 | nutanix = { 4 | source = "nutanix/nutanix" 5 | version = "1.9.5" 6 | } 7 | } 8 | } 9 | 10 | provider "nutanix" { 11 | username = var.nutanix_username 12 | password = var.nutanix_password 13 | endpoint = var.nutanix_endpoint 14 | port = var.nutanix_port 15 | insecure = true 16 | wait_timeout = 60 17 | session_auth = false 18 | } 19 | 20 | data "nutanix_cluster" "cluster" { 21 | name = var.nutanix_cluster 22 | } 23 | 24 | data "nutanix_subnet" "subnet" { 25 | subnet_name = var.nutanix_subnet 26 | } 27 | 28 | resource "nutanix_image" "cloud_image" { 29 | name = "rocky9" 30 | source_uri = var.cloud_image_url 31 | } 32 | 33 | resource "local_file" "cloud_config" { 34 | filename = var.cloud_config_file 35 | content = < 45 | export IMAGE_SERVICE= 46 | export ASSISTED_UI= 47 | export AGENT_DOCKER_IMAGE= 48 | export INSTALLER_IMAGE= 49 | export CONTROLLER_IMAGE= 50 | export PSQL_IMAGE= 51 | ``` 52 | 53 | To target a specific OpenShift version and speed up the deployment: 54 | 55 | ```bash 56 | export OPENSHIFT_VERSION= # e.g., 4.18 57 | ``` 58 | 59 | To learn more about deployment options and customization, refer to the [deployment guide](./assisted-deployment.md). 60 | 61 | ## Test Assisted Installer Components 62 | 63 | Now that `assisted-service` and its components are deployed, you can test them. 64 | 65 | Each deployment type offers different testing options. For the default deployment described above, you can use: 66 | 67 | 1. `make deploy_nodes` — Create a cluster and deploy hosts (networking not configured). 68 | 1. `make deploy_nodes_with_networking` — Create a cluster, deploy hosts, and configure networking. 69 | 1. `make deploy_nodes_with_install` — Full flow: create cluster, deploy hosts, configure networking, and start installation. 70 | 71 | For more testing options and customization details, see the [testing guide](./assisted-testing.md). 72 | -------------------------------------------------------------------------------- /src/cli/commands/test_command.py: -------------------------------------------------------------------------------- 1 | import os 2 | import re 3 | import subprocess 4 | import uuid 5 | from copy import deepcopy 6 | 7 | import pytest 8 | from prompt_toolkit.completion import Completer, NestedCompleter 9 | 10 | from service_client import log 11 | 12 | from .command import Command 13 | 14 | 15 | class TestCommand(Command): 16 | """Command for execution a pytest test""" 17 | 18 | @classmethod 19 | def get_completer(cls) -> Completer: 20 | """Complete all pytest available tests""" 21 | proc = subprocess.Popen( 22 | ["python3", "-m", "pytest", "--collect-only", "-q"], stdout=subprocess.PIPE, stderr=subprocess.PIPE 23 | ) 24 | stdout, _stderr = proc.communicate() 25 | 26 | pattern = r"((?Psrc/tests/.*\.py)::(?P.*)::(?P.*))" 27 | groups = [ 28 | match.groupdict() for match in [re.match(pattern, line) for line in stdout.decode().split("\n")] if match 29 | ] 30 | 31 | # Split pytest function and files. Note that if for a certain test is decorated with pytest.parameterized 32 | # the function will have a different pattern (e.g. test_function[arg_value]) 33 | groups_set = set((group["file"], group.get("func", "").split("[")[0]) for group in groups) 34 | 35 | test_options = {} 36 | for file, func in groups_set: 37 | if file not in test_options: 38 | test_options[file] = {} 39 | 40 | test_options[file][func] = None 41 | 42 | return NestedCompleter.from_nested_dict({"test": test_options}) 43 | 44 | def handle(self): 45 | if not self._text: 46 | return 47 | 48 | original_environ = deepcopy(os.environ) 49 | try: 50 | for arg_str in self._args: 51 | var = re.match(r"(?P.*)=(?P.*)", arg_str).groupdict() 52 | os.environ[var["key"]] = var["value"] 53 | 54 | command = self._text.split(" ") 55 | _command, file, func, *_ = [var for var in command if var] 56 | junit_report_path = f"unittest_{str(uuid.uuid4())[:8]}.xml" 57 | log.setLevel(self._log_default_level) 58 | pytest.main([file, "-k", func, "--verbose", "-s", f"--junit-xml={junit_report_path}"]) 59 | 60 | except BaseException: 61 | """Ignore any exception that might happen during test execution""" 62 | 63 | finally: 64 | from tests.config import reset_global_variables 65 | 66 | os.environ.clear() 67 | os.environ.update(original_environ) 68 | reset_global_variables() # reset the config to its default state 69 | -------------------------------------------------------------------------------- /src/service_client/client_validator.py: -------------------------------------------------------------------------------- 1 | import json 2 | import re 3 | from importlib import metadata 4 | from subprocess import PIPE, CalledProcessError, check_output 5 | 6 | from consts import consts 7 | from service_client.logger import log 8 | from tests.global_variables import DefaultVariables 9 | 10 | global_variables = DefaultVariables() 11 | 12 | 13 | def _get_service_container(namespace: str): 14 | res = check_output(["kubectl", "get", "pods", "-n", namespace, "--output=json"]) 15 | data = json.loads(res) 16 | containers = [item["metadata"]["name"] for item in data["items"] if item and item["metadata"]] 17 | service_containers = [container for container in containers if container.startswith("assisted-service")] 18 | 19 | return service_containers[0] if service_containers else "" 20 | 21 | 22 | def _get_service_version(service_container_name: str, namespace: str) -> str: 23 | try: 24 | cmd = f"kubectl exec -it --namespace={namespace} {service_container_name} -- bash -c 'ls /clients/*.tar.gz'" 25 | src_client_file = check_output(cmd, shell=True, stderr=PIPE) 26 | version = re.findall(r"assisted-service-client-(.*).tar.gz", src_client_file.decode())[0] 27 | return version.strip() 28 | except (CalledProcessError, KeyError): 29 | return "" 30 | 31 | 32 | def verify_client_version(namespace="assisted-installer"): 33 | """Check if the client artifact that exists on the service instance equal to the installed client version 34 | on test-infra image""" 35 | 36 | if global_variables.deploy_target == consts.DeployTargets.ONPREM: 37 | log.info("Onprem environment assisted-python-client validation is currently not supported") 38 | return 39 | 40 | try: 41 | service = _get_service_container(namespace) 42 | service_version = _get_service_version(service, namespace) 43 | client_installed_version = metadata.version("assisted-service-client") 44 | if service_version == client_installed_version: 45 | log.info( 46 | f"Assisted python client versions match! Version on {service}={service_version} == " 47 | f"installed_version={client_installed_version}" 48 | ) 49 | else: 50 | log.warning( 51 | f"Mismatch client versions found! Version on {service}={service_version} != " 52 | f"installed_version={client_installed_version}" 53 | ) 54 | 55 | except BaseException as e: 56 | # best effort 57 | log.info(f"Failed to validate assisted-python-client version, {e}") 58 | 59 | 60 | if __name__ == "__main__": 61 | verify_client_version() 62 | -------------------------------------------------------------------------------- /src/cli/prompt_handler.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Union 3 | 4 | from prompt_toolkit import prompt 5 | from prompt_toolkit.auto_suggest import AutoSuggestFromHistory 6 | from prompt_toolkit.completion import Completer 7 | from prompt_toolkit.history import FileHistory 8 | from prompt_toolkit.shortcuts import CompleteStyle, yes_no_dialog 9 | 10 | from cli.commands.commands_factory import CommandFactory 11 | from cli.commands.test_command import TestCommand 12 | from cli.key_binding import bindings 13 | from service_client import log 14 | from tests.global_variables import DefaultVariables 15 | 16 | 17 | class PromptHandler: 18 | def __init__(self, global_variables: DefaultVariables): 19 | self._global_variables = global_variables 20 | log.setLevel(logging.ERROR) 21 | 22 | @classmethod 23 | def _input( 24 | cls, 25 | prompt_text: str, 26 | completer: Completer, 27 | hint: str = " Control + Q for exit | Control + C for clear", 28 | history_file=None, 29 | ) -> Union[str, None]: 30 | history_args = {} 31 | if history_file: 32 | history_args["history"] = FileHistory(history_file) 33 | history_args["auto_suggest"] = AutoSuggestFromHistory() 34 | history_args["enable_history_search"] = True 35 | 36 | try: 37 | text = prompt( 38 | f"{prompt_text}> ", 39 | key_bindings=bindings, 40 | completer=completer, 41 | complete_style=CompleteStyle.COLUMN, 42 | bottom_toolbar=hint, 43 | **history_args, 44 | ) 45 | except EOFError: 46 | return None 47 | 48 | return text 49 | 50 | def _get_environment_variables(self) -> Union[str, None]: 51 | args = "" 52 | result = yes_no_dialog(title="Environment Variables", text="Do you want to enter environment variables?").run() 53 | 54 | if result: 55 | args = self._input("└──── envs", completer=CommandFactory.env_vars_completers(self._global_variables)) 56 | if args is None: 57 | return None 58 | 59 | return args 60 | 61 | def get_prompt_results(self): 62 | text = self._input("test-infra", completer=CommandFactory.get_completers(), history_file=".cli.history") 63 | if text is None: 64 | return None 65 | 66 | command = CommandFactory.get_command(text) 67 | if isinstance(command, TestCommand): 68 | if (args := self._get_environment_variables()) is None: 69 | return command 70 | command.args = args 71 | 72 | return command 73 | -------------------------------------------------------------------------------- /src/assisted_test_infra/download_logs/resources/man_sosreport.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | SOS_TMPDIR="/var/tmp" 4 | PROXY_SETTING_FILE="/etc/mco/proxy.env" 5 | 6 | TOOLBOX_BIN="toolbox" 7 | TOOLBOX_VERSION="$(rpm -q --queryformat '%{VERSION}' toolbox)" 8 | TOOLBOX_MIN_VERSION="0.1.0" 9 | TOOLBOX_RC="/root/.toolboxrc" 10 | 11 | # setup proxy environment variables 12 | if [ -f "${PROXY_SETTING_FILE}" ] 13 | then 14 | # shellcheck source=/dev/null 15 | source "${PROXY_SETTING_FILE}" 16 | export HTTP_PROXY 17 | export HTTPS_PROXY 18 | export NO_PROXY 19 | fi 20 | 21 | # check if toolbox version is above or equal to 0.1.0 (it should be the case for OCP>=4.11) 22 | # previous versions of toolbox run a shell instead of command specified in parameter 23 | if ! echo -e "${TOOLBOX_MIN_VERSION}\n${TOOLBOX_VERSION}" | sort --version-sort --check=silent; then 24 | # fallback on the upstream version of toolbox 25 | curl -o /tmp/toolbox "https://raw.githubusercontent.com/coreos/toolbox/${TOOLBOX_MIN_VERSION}/rhcos-toolbox" 26 | chmod +x /tmp/toolbox 27 | TOOLBOX_BIN="/tmp/toolbox" 28 | fi 29 | 30 | # By default toolbox uses the /var/lib/kubelet/config.json file to authenticate 31 | # to the image registry in order to pull the images it needs. But in some tests 32 | # this file doesn't exist because the installation may not have started yet. 33 | # For example, the upgrade agent test never starts the installation, so that 34 | # file will not exist. In those cases we can use /root/.docker/config.json 35 | # instead, which is always created by the discovery ignition. 36 | if [ ! -f "/var/lib/kubelet/config.json" ] 37 | then 38 | # Note that if the toolbox configuration file already exists we may be adding 39 | # the AUTHFILE variable multiple times. That is acceptable because that 40 | # configuration file is just a script sourced by toolbox, so only the last 41 | # value will be used. 42 | echo "AUTHFILE=/root/.docker/config.json" >> "${TOOLBOX_RC}" 43 | fi 44 | 45 | # cleanup any previous sos report 46 | find "${SOS_TMPDIR}" -maxdepth 1 -name "sosreport*" -type f -delete 47 | 48 | yes | ${TOOLBOX_BIN} sos report --batch --tmp-dir "${SOS_TMPDIR}" --compression-type xz --all-logs \ 49 | --plugin-timeout=300 \ 50 | -o processor,memory,container_log,filesys,logs,crio,podman,openshift,openshift_ovn,networking,networkmanager,rhcos \ 51 | -k crio.all=on -k crio.logs=on \ 52 | -k podman.all=on -k podman.logs=on \ 53 | -k openshift.with-api=on 54 | 55 | # rename the sosreport archive with a deterministic name in order to download it afterwards 56 | find "${SOS_TMPDIR}" -maxdepth 1 -name "sosreport*.tar.xz" -type f -execdir mv {} "${SOS_TMPDIR}/sosreport.tar.xz" \; 57 | chmod a+r "${SOS_TMPDIR}/sosreport.tar.xz" 58 | -------------------------------------------------------------------------------- /src/assisted_test_infra/test_infra/controllers/iptables.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | from ipaddress import IPv4Address, IPv6Address 3 | from typing import List, Optional 4 | 5 | from assisted_test_infra.test_infra import utils 6 | from service_client import log 7 | 8 | 9 | class IpTableCommandOption(Enum): 10 | CHECK = "check" 11 | INSERT = "insert" 12 | DELETE = "delete" 13 | 14 | 15 | class IptableRule: 16 | CHAIN_INPUT = "INPUT" 17 | CHAIN_FORWARD = "FORWARD" 18 | 19 | def __init__( 20 | self, 21 | chain: str, 22 | target: str, 23 | protocol: str, 24 | dest_port: Optional[str] = "", 25 | sources: Optional[List] = None, 26 | extra_args: Optional[str] = "", 27 | address_familiy=IPv4Address, 28 | ): 29 | self._chain = chain 30 | self._target = target 31 | self._protocol = protocol 32 | self._dest_port = dest_port 33 | self._sources = sources if sources else [] 34 | self._extra_args = extra_args 35 | self.address_familiy = address_familiy 36 | 37 | @property 38 | def _iptables_bin(self): 39 | return "ip6tables" if self.address_familiy is IPv6Address else "iptables" 40 | 41 | def _build_command_string(self, option: IpTableCommandOption) -> str: 42 | sources_string = ",".join(self._sources) 43 | rule_template = [ 44 | f"{self._iptables_bin}", 45 | f"--{option.value}", 46 | self._chain, 47 | "-p", 48 | self._protocol, 49 | "-j", 50 | self._target, 51 | ] 52 | 53 | if self._sources: 54 | rule_template += ["-s", sources_string] 55 | 56 | if self._dest_port: 57 | rule_template += ["--dport", self._dest_port] 58 | 59 | if self._extra_args: 60 | rule_template += [self._extra_args] 61 | rule_str = " ".join(rule_template) 62 | log.info(f"build iptables command: {rule_str}") 63 | return rule_str 64 | 65 | def _does_rule_exist(self) -> bool: 66 | check_rule = self._build_command_string(IpTableCommandOption.CHECK) 67 | _, _, exit_code = utils.run_command(check_rule, shell=True, raise_errors=False) 68 | 69 | return exit_code == 0 70 | 71 | def add_sources(self, sources): 72 | self._sources += sources 73 | 74 | def insert(self) -> None: 75 | if not self._does_rule_exist(): 76 | insert_rule = self._build_command_string(IpTableCommandOption.INSERT) 77 | utils.run_command(insert_rule, shell=True) 78 | 79 | def delete(self) -> None: 80 | if self._does_rule_exist(): 81 | delete_rule = self._build_command_string(IpTableCommandOption.DELETE) 82 | utils.run_command(delete_rule, shell=True) 83 | -------------------------------------------------------------------------------- /terraform_files/nutanix-ci-machine/variables-nutanix.tf: -------------------------------------------------------------------------------- 1 | ////// 2 | // Nutanix variables 3 | ////// 4 | 5 | variable "nutanix_endpoint" { 6 | type = string 7 | description = "Endpoint for the Prism Elements or Prism Central instance. This can also be specified with the NUTANIX_ENDPOINT environment variable" 8 | } 9 | 10 | variable "nutanix_username" { 11 | type = string 12 | description = "Username for the Prism Elements or Prism Central instance. This can also be specified with the NUTANIX_USERNAME environment variable" 13 | } 14 | 15 | variable "nutanix_password" { 16 | type = string 17 | description = "Password for the Prism Elements or Prism Central instance. This can also be specified with the NUTANIX_PASSWORD environment variable" 18 | } 19 | 20 | variable "nutanix_port" { 21 | type = number 22 | description = "Port for the Prism Elements or Prism Central instance. This can also be specified with the NUTANIX_PORT environment variable. Defaults to 9440" 23 | } 24 | 25 | variable "nutanix_cluster" { 26 | type = string 27 | description = "Nutanix cluster name" 28 | } 29 | 30 | variable "nutanix_subnet" { 31 | type = string 32 | description = "Nutanix subnet name. While selected the nic Ip will be in that subnet addresses range" 33 | } 34 | 35 | variable "build_id" { 36 | type = string 37 | description = "The CI build id" 38 | } 39 | 40 | variable "memory" { 41 | type = number 42 | default = 16384 43 | description = "RAM in MiB allocated to masters" 44 | } 45 | 46 | variable "vcpu" { 47 | type = number 48 | default = 4 49 | description = "The total number of virtual processor cores to assign to the virtual machine." 50 | } 51 | 52 | variable "disk_size" { 53 | type = number 54 | default = 650 55 | description = "The size of the virtual machine's disk, in GB" 56 | } 57 | 58 | variable "ssh_public_key" { 59 | type = string 60 | description = "The public ssh key, added as a ssh authorized key" 61 | } 62 | 63 | variable "ssh_private_key" { 64 | type = string 65 | description = "The private ssh key path, used to authenticate against the new template" 66 | sensitive = true 67 | } 68 | 69 | variable "cloud_config_file" { 70 | type = string 71 | default = "cloud-config.yaml" 72 | description = "Name for the cloud init configuration" 73 | } 74 | 75 | variable "cloud_image_url" { 76 | type = string 77 | description = "Cloud image URL" 78 | } 79 | 80 | variable "cores_per_socket" { 81 | type = number 82 | default = 1 83 | description = < None: 53 | self.v1_api.delete_namespaced_secret(name=self.ref.name, namespace=self.ref.namespace) 54 | 55 | log.info("deleted secret %s", self.ref) 56 | 57 | def get(self) -> dict: 58 | return self.v1_api.read_namespaced_secret( 59 | name=self.ref.name, 60 | namespace=self.ref.namespace, 61 | pretty=True, 62 | ) 63 | 64 | 65 | def deploy_default_secret( 66 | kube_api_client: ApiClient, name: str, namespace: str, pull_secret: str, ignore_conflict: bool = True 67 | ) -> Secret: 68 | _validate_pull_secret(pull_secret) 69 | secret = Secret(kube_api_client, name, namespace) 70 | try: 71 | secret.create(pull_secret) 72 | except ApiException as e: 73 | if not (e.reason == "Conflict" and ignore_conflict): 74 | raise 75 | return secret 76 | 77 | 78 | def _validate_pull_secret(pull_secret: str) -> None: 79 | if not pull_secret: 80 | return 81 | try: 82 | json.loads(pull_secret) 83 | except json.JSONDecodeError as e: 84 | raise ValueError(f"invalid pull secret {pull_secret}") from e 85 | --------------------------------------------------------------------------------
Cluster IDHost IDTimeMessageSeverity