├── .ansible-lint
├── .coderabbit.yaml
├── .git-blame-ignore-revs
├── .github
├── CODEOWNERS
├── ISSUE_TEMPLATE
│ ├── bug_report.yml
│ └── feature_request.yml
├── PULL_REQUEST_TEMPLATE.md
└── workflows
│ ├── dci-merge.yml
│ ├── pr.yml
│ ├── preflight-release.yml
│ ├── publish.yml
│ └── stale.yml
├── .gitignore
├── CONTRIBUTING.md
├── LICENSE-Apache-2.0
├── LICENSE-GPL-3.0
├── README.md
├── ansible-collection-redhatci-ocp.spec
├── docs
└── out-of-date.png
├── galaxy.yml
├── hack
├── README.md
├── ansible-lint.sh
├── check_doc.sh
├── check_version.sh
├── dci-merge.sh
├── import-roles.sh
├── rules
│ ├── redhat_ci.md
│ └── redhat_ci.py
└── run_ansible_test.sh
├── meta
├── execution-environment.yml
├── requirements-test.txt
├── requirements.txt
└── runtime.yml
├── playbooks
├── README.md
└── multibench_setup_host.yml
├── plugins
├── filter
│ ├── junit2dict.py
│ ├── junit2obj.py
│ ├── ocp_compatibility.py
│ └── regex_diff.py
├── modules
│ ├── get_compatible_rhocp_repo.py
│ ├── nmcli.py
│ └── packet_missing.py
└── requirements.txt
├── roles
├── acm
│ └── utils
│ │ ├── README.md
│ │ ├── defaults
│ │ └── main.yml
│ │ ├── meta
│ │ └── argument_specs.yml
│ │ └── tasks
│ │ ├── disconnect-agent.yml
│ │ ├── get-credentials.yml
│ │ ├── image-sources.yml
│ │ └── monitor-install.yml
├── acm_hypershift
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── create-cluster.yml
│ │ ├── delete-cluster.yml
│ │ ├── download-cli.yml
│ │ ├── get-mc-facts.yml
│ │ ├── kvirt-disconnected.yml
│ │ ├── main.yml
│ │ ├── restart-cpo.yml
│ │ └── validations.yml
│ └── vars
│ │ └── main.yml
├── acm_setup
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── get-mirroring-config.yml
│ │ ├── main.yml
│ │ └── validation.yml
│ └── vars
│ │ └── main.yml
├── acm_sno
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── acm-pull-data.yml
│ │ ├── create-cluster.yml
│ │ ├── delete-cluster.yml
│ │ ├── main.yml
│ │ └── pre-run.yml
├── acm_spoke_mgmt
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ ├── tasks
│ │ ├── attach.yaml
│ │ ├── delete-ztp-by-ref.yaml
│ │ ├── detach.yaml
│ │ └── main.yaml
│ └── templates
│ │ └── autoimport_secret.yml.j2
├── apply_nmstate
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── check_if_vm_host_is_bastion.yml
│ │ └── main.yml
├── approve_csrs
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── boot_disk
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── boot_iso
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── vitual_media.json.j2
├── catalog_source
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── chart_verifier
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── get-partner-name.yml
│ │ ├── get-tools.yml
│ │ ├── main.yml
│ │ ├── mirror-chart-images.yml
│ │ └── tests.yml
│ └── templates
│ │ └── report.j2
├── check_resource
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ ├── verify_network_state.yml
│ │ ├── wait-mcp.yml
│ │ └── wait-sriov.yml
├── cluster_compare
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── containers.yml
│ │ ├── main.yml
│ │ ├── upstream.yml
│ │ └── validations.yml
│ └── vars
│ │ └── main.yml
├── configure_ztp_gitops_apps
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── configure_ztp_gitops_repo
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── conserver
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── socat_retry.sh
│ ├── tasks
│ │ ├── config-ipmi.yml
│ │ ├── config-libvirt.yml
│ │ ├── config.yml
│ │ ├── main.yml
│ │ ├── packages.yml
│ │ └── service.yml
│ └── templates
│ │ ├── conserver-ipmi.cf
│ │ └── conserver-libvirt.cf
├── copy_and_render
│ ├── README.md
│ └── tasks
│ │ └── main.yml
├── create_certification_project
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── github_token.png
│ ├── tasks
│ │ ├── attach_one_product_listing.yml
│ │ ├── attach_product_listings.yml
│ │ ├── check_if_cnf_project_exists.yml
│ │ ├── check_if_container_project_exists.yml
│ │ ├── check_if_helmchart_project_exists.yml
│ │ ├── check_if_project_exists.yml
│ │ ├── create_project.yml
│ │ ├── get-gh-username.yml
│ │ ├── get_all_projects_for_product_listing.yml
│ │ ├── main.yml
│ │ ├── update_project.yml
│ │ └── validate_cert_settings.yml
│ └── templates
│ │ ├── attach_product_listing.json.j2
│ │ ├── create_project_cnf.json.j2
│ │ ├── create_project_container.json.j2
│ │ ├── create_project_helmchart.json.j2
│ │ ├── create_project_operator.json.j2
│ │ ├── update_project_container.json.j2
│ │ ├── update_project_helmchart.json.j2
│ │ └── update_project_operator.json.j2
├── create_helmchart
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── create_pr
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── bundle_operator.yml
│ │ ├── get-gh-bin.yml
│ │ ├── helm_chart_verifier.yml
│ │ └── main.yml
│ └── templates
│ │ └── ci.yaml.j2
├── create_rhde_builder
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── argument_specs.yml
│ └── tasks
│ │ ├── add_rhde_builder_vm_to_inventory.yml
│ │ ├── cleanup_rhde_builder.yml
│ │ ├── main.yml
│ │ ├── prepare_rhde_virt_image.yml
│ │ └── start_rhde_builder_vm.yml
├── create_vms
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── prepare_network.yml
│ │ ├── prepare_storage_pool.yml
│ │ └── provision_vms.yml
│ └── templates
│ │ ├── create_vm.sh.j2
│ │ ├── network.xml.j2
│ │ ├── rng_device.xml.j2
│ │ └── storage-pool.xml.j2
├── deploy_cr
│ ├── README.md
│ └── tasks
│ │ └── main.yml
├── deprecated_api
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── ensure_ocp_version.yml
│ │ ├── get_api_request_counts_per_namespace.yml
│ │ └── main.yml
├── destroy_vms
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── destroy_networks.yml
│ │ ├── destroy_pools.yml
│ │ ├── destroy_vms.yml
│ │ └── main.yml
├── efi_boot_mgr
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── rm-efiboot
│ └── tasks
│ │ └── main.yml
├── etcd_data
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ └── monitor-etcd-data.yml
│ └── vars
│ │ └── main.yml
├── example_cnf_deploy
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── scripts
│ │ ├── get-example-cnf-results.sh
│ │ └── get-example-cnf-status.sh
│ ├── tasks
│ │ ├── catalog.yml
│ │ ├── deploy.yml
│ │ ├── deploy
│ │ │ ├── app.yml
│ │ │ ├── net-config.yml
│ │ │ └── sub.yml
│ │ ├── deploy_extra_trex.yml
│ │ ├── draining.yml
│ │ ├── draining
│ │ │ ├── gather-facts.yml
│ │ │ ├── node-draining.yml
│ │ │ ├── testpmd-validation.yml
│ │ │ └── trex-validation.yml
│ │ ├── main.yml
│ │ ├── trex
│ │ │ ├── app.yml
│ │ │ ├── job.yml
│ │ │ ├── profile.yml
│ │ │ ├── retry-trex.yml
│ │ │ └── tests.yml
│ │ ├── validate.yml
│ │ └── validate
│ │ │ ├── migrate.yml
│ │ │ ├── pod-delete.yml
│ │ │ └── validate.yml
│ └── templates
│ │ ├── grout-cr.yaml.j2
│ │ ├── testpmd-cr.yaml.j2
│ │ ├── trex-app-cr.yaml.j2
│ │ ├── trex-app-job.yaml.j2
│ │ └── trex-server-cr.yaml.j2
├── extract_openshift_installer
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── fbc_catalog
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── add-bundle.yml
│ │ └── main.yml
│ └── templates
│ │ └── catalog.Dockerfile.j2
├── generate_agent_iso
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── generate_manifests
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── main.yml
│ │ ├── manifest.yml
│ │ └── static.yml
│ └── templates
│ │ ├── agent-config.yaml.j2
│ │ ├── install-config.yaml.j2
│ │ └── registry-config.j2
├── generate_ssh_key_pair
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── get_logs_from_namespace
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── gitops_configure_repo
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ ├── tasks
│ │ └── main.yaml
│ └── templates
│ │ ├── http_private_repo.yaml.j2
│ │ └── ssh_private_repo.yaml.j2
├── hco_setup
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── deploy-hco.yml
│ │ ├── main.yml
│ │ ├── test-deploy-vm.yml
│ │ └── validation.yml
│ └── templates
│ │ └── vm.yaml.j2
├── hostedbm
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── create-agent.yml
│ │ ├── create-cluster.yml
│ │ ├── download-cli.yml
│ │ ├── main.yml
│ │ ├── post-deploy-step-on-guest-cluster.yml
│ │ └── validations.yml
├── include_components
│ ├── README.md
│ └── tasks
│ │ ├── main.yml
│ │ ├── track_dev_git_repo.yml
│ │ ├── track_git_repo.yml
│ │ ├── track_repo_commit_component.yml
│ │ └── track_rpm.yml
├── insert_dns_records
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── nm-dnsmasq.conf
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── configure_firewall.yml
│ │ ├── create_host_entry.yml
│ │ ├── dnsmasq.yml
│ │ ├── main.yml
│ │ └── network-manager.yml
│ └── templates
│ │ ├── nm-dnsmasq.conf.j2
│ │ └── openshift-cluster.conf.j2
├── install_operator_gitops
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── installer
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── customize_filesystem
│ │ │ ├── master
│ │ │ │ └── .gitkeep
│ │ │ └── worker
│ │ ├── filetranspile-1.1.1.py
│ │ ├── ipv6-dual-stack-no-upgrade.yml
│ │ ├── manifests
│ │ │ └── .gitkeep
│ │ └── openshift
│ │ │ └── .gitkeep
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── 10_get_oc.yml
│ │ ├── 15_disconnected_registry_create.yml
│ │ ├── 15_disconnected_registry_existing.yml
│ │ ├── 20_extract_installer.yml
│ │ ├── 23_rhcos_image_paths.yml
│ │ ├── 24_rhcos_image_cache.yml
│ │ ├── 25_create-install-config.yml
│ │ ├── 30_create_metal3.yml
│ │ ├── 40_create_manifest.yml
│ │ ├── 50_extramanifests.yml
│ │ ├── 55_customize_filesystem.yml
│ │ ├── 59_cleanup_bootstrap.yml
│ │ ├── 59_power_off_cluster_servers.yml
│ │ ├── 60_deploy_ocp.yml
│ │ ├── 70_cleanup_sub_man_registration.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── chrony.conf.j2
│ │ ├── etc-chrony.conf.j2
│ │ ├── httpd_conf.j2
│ │ ├── install-config-appends.j2
│ │ ├── install-config-virtualmedia.j2
│ │ ├── install-config.j2
│ │ ├── magic.j2
│ │ └── metal3-config.j2
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
├── jenkins_job_launcher
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── junit2json
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ ├── argument_specs.yml
│ │ └── main.yml
│ ├── tasks
│ │ ├── convert.yml
│ │ ├── expand.yml
│ │ ├── main.yml
│ │ ├── merge.yml
│ │ └── validate-dependency.yml
│ └── tests
│ │ ├── inventory
│ │ └── test.yml
├── k8s_best_practices_certsuite
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── logging.yml
│ │ ├── main.yml
│ │ ├── pre-run.yml
│ │ ├── prepare_json_authentication.yml
│ │ ├── teardown.yml
│ │ └── tests.yml
│ └── templates
│ │ ├── certsuite_config.yml.j2
│ │ └── feedback.js.j2
├── kvirt_vm
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── create-vm.yml
│ │ ├── delete-vm.yml
│ │ ├── main.yml
│ │ └── validations.yml
│ └── templates
│ │ └── vm-template.yml.j2
├── label_nodes
│ ├── README.md
│ ├── tasks
│ │ └── main.yml
│ ├── templates
│ │ └── node_label.yml
│ └── vars
│ │ └── main.yml
├── manage_firewalld_zone
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── merge_registry_creds
│ ├── README.md
│ └── tasks
│ │ └── main.yml
├── metallb_setup
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── clean-resources.yml
│ │ ├── main.yml
│ │ ├── pre-requisites.yml
│ │ └── setup-metallb.yml
│ ├── templates
│ │ └── bgppeers.yml.j2
│ └── vars
│ │ └── main.yml
├── microshift_generate_iso
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── create_dirs.yml
│ │ ├── creating_iso.yml
│ │ ├── creating_rhel_edge_image.yml
│ │ ├── install.yml
│ │ ├── install_packages.yml
│ │ ├── main.yml
│ │ └── register_source.yml
│ └── templates
│ │ ├── blueprint.toml.j2
│ │ ├── kickstart.ks.j2
│ │ ├── microshift-installer.toml.j2
│ │ └── minimal_microshift.toml.j2
├── mirror_catalog
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── mirror_from_directory
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── load-operators.yml
│ │ ├── main.yml
│ │ └── validation.yml
├── mirror_images
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ └── mirror-images.yml
├── mirror_ocp_release
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── artifacts.yml
│ │ ├── dirs.yml
│ │ ├── facts.yml
│ │ ├── fetch.yml
│ │ ├── files.yml
│ │ ├── image-list.yml
│ │ ├── images.yml
│ │ ├── main.yml
│ │ └── registry.yml
│ └── vars
│ │ └── main.yml
├── monitor_agent_based_installer
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── mount_discovery_iso_for_pxe
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── grub.cfg.j2
├── multibench_run
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── images
│ │ └── example-A.png
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── config.ini.j2
├── nfs_external_storage
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── node_prep
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── meta
│ │ └── main.yml
│ ├── tasks
│ │ ├── 10_validation.yml
│ │ ├── 15_validation_disconnected_registry.yml
│ │ ├── 20_sub_man_register.yml
│ │ ├── 30_req_packages.yml
│ │ ├── 40_bridge.yml
│ │ ├── 45_networking_facts.yml
│ │ ├── 50_modify_sudo_user.yml
│ │ ├── 60_enabled_services.yml
│ │ ├── 70_enabled_fw_services.yml
│ │ ├── 80_libvirt_pool.yml
│ │ ├── 90_create_config_install_dirs.yml
│ │ └── main.yml
│ ├── templates
│ │ └── dir.xml.j2
│ ├── tests
│ │ ├── inventory
│ │ └── test.yml
│ └── vars
│ │ └── main.yml
├── ocp_add_users
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── add-roles.yml
│ │ ├── add-users.yml
│ │ ├── create-users.yml
│ │ ├── get-users.yml
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
├── ocp_logging
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── clean-resources.yml
│ │ ├── main.yml
│ │ ├── pre-run.yml
│ │ ├── setup-logging.yml
│ │ └── validate-stack.yml
│ └── vars
│ │ └── main.yml
├── ocp_on_libvirt
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── copy_image_src.yml
│ │ ├── dci_setup.yml
│ │ ├── dns_cleanup.yml
│ │ ├── dns_setup.yml
│ │ ├── libvirt_host_destroy.yml
│ │ ├── libvirt_host_up.yml
│ │ ├── libvirt_host_up2.yml
│ │ ├── libvirt_network_destroy.yml
│ │ ├── libvirt_network_up.yml
│ │ ├── main.yml
│ │ ├── redfish_setup.yml
│ │ ├── setup.yml
│ │ └── ssh_keygen.yml
│ ├── templates
│ │ ├── cloud-config
│ │ │ ├── meta-data
│ │ │ └── user-data
│ │ ├── hosts.j2
│ │ ├── libvirt_network.xml.j2
│ │ └── libvirt_node.xml.j2
│ └── vars
│ │ └── main.yml
├── ocp_remove_nodes
│ ├── README.md
│ ├── meta
│ │ └── argument_specs.yml
│ └── tasks
│ │ └── main.yml
├── odf_setup
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── local-storage-operator.yml
│ │ ├── main.yml
│ │ ├── openshift-storage-operator.yml
│ │ ├── set-facts.yml
│ │ ├── tests.yml
│ │ └── validation.yml
│ └── templates
│ │ ├── local-storage-block.yml.j2
│ │ ├── ocs-disk-gatherer.yml.j2
│ │ └── openshift-storage-cluster.yml.j2
├── olm_operator
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── opcap_tool
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── build.yml
│ │ └── main.yml
├── openshift_cnf
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── operator_sdk
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── build_scorecard_image.yml
│ │ ├── main.yml
│ │ ├── mirroring.yml
│ │ ├── teardown.yml
│ │ └── tests_scorecard_check_operator.yml
│ └── templates
│ │ ├── scorecard-debug-pod-logs.j2
│ │ ├── scorecard-errors-basic-check-spec-test.j2
│ │ └── scorecard-test-config.yaml.j2
├── populate_mirror_registry
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── main.yml
│ │ ├── populate_registry.yml
│ │ ├── prerequisites.yml
│ │ └── var_check.yml
│ └── templates
│ │ └── filter.sh.j2
├── preflight
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ ├── scorecard-images.yml
│ │ └── scorecard-old-images.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ ├── create_cert_projects_loop.yml
│ │ ├── main.yml
│ │ ├── main_standalone_containers_certification.yml
│ │ ├── mirroring.yml
│ │ ├── prepare_custom_certificate.yml
│ │ ├── prepare_operator_metadata.yml
│ │ ├── prepare_preflight_image.yml
│ │ ├── prepare_runtime_assets.yml
│ │ ├── teardown.yml
│ │ ├── test_all_logs.yml
│ │ ├── test_async_logs_check_operator.yml
│ │ ├── test_check_if_container_certified.yml
│ │ ├── test_preflight_check_container.yml
│ │ ├── test_preflight_check_container_one_image.yml
│ │ ├── test_preflight_container_parallel.yml
│ │ ├── test_run_health_check.yml
│ │ ├── test_validate_annotations.yml
│ │ └── tests_preflight_check_operator.yml
│ └── templates
│ │ ├── cluster_logs.j2
│ │ └── mcp_logs.j2
├── prereq_facts_check
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── process_kvm_nodes
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── create_node.yml
│ │ └── main.yml
├── process_nmstate
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── nmstate.yml.j2
├── prune_catalog
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── extract-channel.yml
│ │ ├── extract-operators.yml
│ │ └── main.yml
├── pyxis
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── artifact_info.json.j2
│ │ └── test_results.json.j2
├── redhat_tests
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── cni-tests.yml
│ │ ├── conformance.yml
│ │ ├── csi-tests.yml
│ │ ├── kubevirt-tests.yml
│ │ └── main.yml
├── remove_ztp_gitops_resources
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ └── tasks
│ │ └── main.yaml
├── resources_to_components
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── create-component.yml
│ │ ├── inspect-resources.yml
│ │ ├── main.yml
│ │ └── resource-info-to-components.yml
├── rhoai
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── create-dsc.yml
│ │ ├── install.yml
│ │ ├── install_operator.yml
│ │ ├── main.yml
│ │ ├── uninstall.yml
│ │ └── uninstall_operator.yml
│ └── vars
│ │ └── main.yml
├── setup_gitea
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── install.yml
│ │ └── main.yml
│ └── templates
│ │ ├── deployment_gitea.j2
│ │ ├── gitea_app_ini.j2
│ │ ├── namespace.j2
│ │ ├── role_binding_sa_to_scc_anyuid.j2
│ │ ├── role_scc_anyuid.j2
│ │ ├── route_gitea.j2
│ │ ├── sa_anyuid.j2
│ │ ├── secret_gitea_app_ini.j2
│ │ └── service_gitea.j2
├── setup_http_store
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── setup_minio
│ ├── .linted
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── install.yml
│ │ └── main.yml
│ └── vars
│ │ └── main.yml
├── setup_mirror_registry
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ ├── prerequisites.yml
│ │ ├── retrieve_config.yml
│ │ ├── set_mirror_cert.yml
│ │ ├── setup_registry.yml
│ │ └── var_check.yml
├── setup_netobserv_stack
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── cleanup.yml
│ │ ├── main.yml
│ │ ├── setup.yml
│ │ ├── validation.yml
│ │ └── verify.yml
│ ├── templates
│ │ ├── flow-collector.yml.j2
│ │ └── rbacs.yml.j2
│ └── vars
│ │ └── main.yml
├── setup_ntp
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── chrony.conf.j2
├── setup_radvd
│ ├── README.md
│ ├── defaults
│ │ └── main.yaml
│ ├── files
│ │ └── sysctl.d
│ │ │ └── ipv6.conf
│ ├── handlers
│ │ └── main.yaml
│ ├── tasks
│ │ ├── main.yaml
│ │ └── pre-requisites.yaml
│ └── templates
│ │ └── radvd.conf.j2
├── setup_selfsigned_cert
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── setup_sushy_tools
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ ├── sushy-emulator.conf.j2
│ │ └── sushy-tools.service.j2
├── setup_tftp
│ ├── README.md
│ ├── handlers
│ │ └── main.yml
│ └── tasks
│ │ └── main.yml
├── setup_vm_host_network
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ └── make_network_config.yml
├── sideload_kernel
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── files
│ │ └── flip_kernel
│ └── tasks
│ │ └── main.yml
├── sno_installer
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── 10_get_oc.yml
│ │ ├── 15_disconnected_registry_existing.yml
│ │ ├── 20_extract_installer.yml
│ │ ├── 21_rhcos_image_paths.yml
│ │ ├── 22_rhcos_image_cache.yml
│ │ ├── 23_rhcos_image_pxetftp.yml
│ │ ├── 24_rhcos_image_live.yml
│ │ ├── 25_create_install_config.yml
│ │ ├── 40_create_manifest.yml
│ │ ├── 50_extramanifests.yml
│ │ ├── 55_create_ignition.yml
│ │ ├── 56_create_grubpxe.yml
│ │ ├── 57_embed_ignition_into_iso.yml
│ │ ├── 59_cleanup_sno_vm.yml
│ │ ├── 60_deploy_sno_bm.yml
│ │ ├── 60_deploy_sno_vm.yml
│ │ ├── 70_wait_deployment.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── chrony.conf.j2
│ │ ├── etc-chrony.conf.j2
│ │ ├── grub.cfg.j2
│ │ └── install-config.j2
│ └── vars
│ │ └── main.yml
├── sno_node_prep
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── 10_validation.yml
│ │ ├── 15_validation_disconnected_registry.yml
│ │ ├── 30_req_packages.yml
│ │ ├── 45_networking_facts.yml
│ │ ├── 50_modify_sudo_user.yml
│ │ ├── 60_enabled_services.yml
│ │ ├── 65_dnsmasq_tftp.yml
│ │ ├── 70_cleanup_libvirt_network.yml
│ │ ├── 71_cleanup_dns_entries.yml
│ │ ├── 72_cleanup_files.yml
│ │ ├── 80_libvirt_storage.yml
│ │ ├── 85_libvirt_network.yml
│ │ ├── 87_network_manager.yml
│ │ ├── 88_etc_hosts.yml
│ │ ├── 90_create_config_install_dirs.yml
│ │ ├── 95_check_pull_secret.yml
│ │ └── main.yml
│ ├── templates
│ │ ├── dir.xml.j2
│ │ ├── dnsmasq.conf.j2
│ │ └── network.xml.j2
│ └── vars
│ │ └── main.yml
├── sos_report
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ └── sos-reports.yml
├── sriov_config
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── check_sriov_network.yml
│ │ ├── check_sriov_node_policy.yml
│ │ ├── create_networks.yml
│ │ ├── create_node_policies.yml
│ │ ├── main.yml
│ │ └── validation.yml
│ └── templates
│ │ ├── sriov-network-node-policy.yml.j2
│ │ └── sriov-network.yml.j2
├── storage_tester
│ ├── README.md
│ ├── files
│ │ └── junit-report.xml
│ ├── scripts
│ │ ├── storage-tester-ROX.sh
│ │ ├── storage-tester-RWO.sh
│ │ └── storage-tester-RWX.sh
│ ├── tasks
│ │ ├── gathering-logs.yml
│ │ ├── main.yml
│ │ └── teardown.yml
│ └── templates
│ │ ├── tester-cronjob-rox.yaml.j2
│ │ ├── tester-cronjob-rwo.yaml.j2
│ │ ├── tester-cronjob-rwx.yaml.j2
│ │ └── tester-init-pv-job.yaml.j2
├── upi_installer
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── handlers
│ │ └── main.yml
│ └── tasks
│ │ ├── 00_validation.yml
│ │ ├── 05_create_config_install_dirs.yml
│ │ ├── 10_get_oc.yml
│ │ ├── 20_extract_installer.yml
│ │ ├── 23_rhcos_image_paths.yml
│ │ ├── 25_read_ssh_key.yml
│ │ └── main.yml
├── validate_dns_records
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── check.yml
│ │ └── main.yml
├── validate_http_store
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ └── main.yml
│ └── templates
│ │ └── test_file.j2
├── vbmc
│ ├── README.md
│ ├── README.rst
│ ├── defaults
│ │ └── main.yml
│ ├── tasks
│ │ ├── check.yml
│ │ ├── cleanup.yml
│ │ ├── configure.yml
│ │ ├── firewalld.yml
│ │ ├── install.yml
│ │ ├── main.yml
│ │ ├── remove.yml
│ │ └── start_node.yml
│ └── templates
│ │ └── nodes_dict.json.j2
├── vendors
│ ├── dell
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── disk.yml
│ │ │ ├── disk_redfish.yml
│ │ │ ├── eject.yml
│ │ │ ├── exists.yml
│ │ │ ├── iso.yml
│ │ │ ├── iso_idrac.yml
│ │ │ └── iso_redfish.yml
│ ├── hpe
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── disk.yml
│ │ │ ├── eject.yml
│ │ │ ├── exists.yml
│ │ │ └── iso.yml
│ ├── kvm
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── disk.yml
│ │ │ ├── eject.yml
│ │ │ ├── exists.yml
│ │ │ └── iso.yml
│ ├── lenovo
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── disk.yml
│ │ │ ├── eject.yml
│ │ │ ├── exists.yml
│ │ │ └── iso.yml
│ ├── pxe
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── disk.yml
│ │ │ ├── exists.yml
│ │ │ └── iso.yml
│ ├── supermicro
│ │ ├── README.md
│ │ ├── defaults
│ │ │ └── main.yml
│ │ └── tasks
│ │ │ ├── disk.yml
│ │ │ ├── eject.yml
│ │ │ ├── exists.yml
│ │ │ ├── iso.yml
│ │ │ └── mount.yml
│ └── zt
│ │ ├── README.md
│ │ ├── defaults
│ │ └── main.yml
│ │ └── tasks
│ │ ├── disk.yml
│ │ ├── eject.yml
│ │ ├── exists.yml
│ │ ├── get_resource_id.yml
│ │ ├── iso.yml
│ │ ├── power_off.yml
│ │ └── set_resource_id.yml
├── verify_tests
│ ├── README.md
│ ├── defaults
│ │ └── main.yml
│ └── tasks
│ │ ├── main.yml
│ │ ├── parse_junit_file.yml
│ │ └── read_junit_files.yml
└── ztp
│ └── setup_cluster_image_set
│ ├── README.md
│ ├── defaults
│ └── main.yml
│ └── tasks
│ └── main.yml
└── tests
├── config.yml
├── integration
└── targets
│ └── copy_and_render
│ ├── copy_and_render.yml
│ ├── files
│ ├── file1.txt
│ └── template.j2
│ └── runme.sh
└── unit
├── data
├── merged.junit.json
├── test_junit2obj_complex_input.xml
├── test_junit2obj_complex_result.json
├── test_junit2obj_failure_input.xml
├── test_junit2obj_failure_result.json
├── test_junit2obj_simple_input.xml
├── test_junit2obj_simple_result.json
└── test_ocp_compatibility_data.json
├── filter
├── test_junit2obj.py
└── test_ocp_compatibility.py
└── requirements.txt
/.ansible-lint:
--------------------------------------------------------------------------------
1 | ---
2 | # vim: ft=yaml
3 |
4 | rulesdir:
5 | - hack/rules
6 |
7 | use_default_rules: true
8 |
9 | display_relative_path: true
10 |
11 | verbosity: 1
12 |
13 | exclude_paths:
14 | - .github
15 | - plugins
16 | - hack
17 | - tests
18 | - roles/**/molecule
19 |
20 | skip_list:
21 | - var-naming[no-role-prefix]
22 |
23 | warn_list:
24 | - ignore-errors
25 | - jinja[spacing] # multiline is not working https://github.com/ansible/ansible-lint/discussions/3015
26 | - no-changed-when
27 | - yaml[line-length]
28 | ...
29 |
--------------------------------------------------------------------------------
/.coderabbit.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | tone_instructions: "amiable and concise"
3 | reviews:
4 | high_level_summary: false
5 | poem: false
6 | collapse_walkthrough: true
7 | sequence_diagrams: false
8 | suggested_labels: false
9 |
--------------------------------------------------------------------------------
/.git-blame-ignore-revs:
--------------------------------------------------------------------------------
1 | # Mass linting of roles
2 | 848f809caf5edfccfd543dd82d74196b99560908
3 | d6d3c1301049f6e4cc62eccbe9b9cabdb05c05a3
4 | b395c0ea08065a31a3eec7031fbe28ec3c14b69e
5 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/bug_report.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: 🐞 Bug report
3 | description: Create a bug report
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Thanks for your feedback! Please provide a clear and concise description of what the bug is.
9 |
10 | - type: textarea
11 | id: summary
12 | attributes:
13 | label: Bug Summary
14 | description: Briefly describe the problem.
15 | validations:
16 | required: true
17 |
18 | - type: textarea
19 | id: details
20 | attributes:
21 | label: Details
22 | description: Provide the steps to reproduce or any additional information on what was expected.
23 | validations:
24 | required: true
25 |
--------------------------------------------------------------------------------
/.github/ISSUE_TEMPLATE/feature_request.yml:
--------------------------------------------------------------------------------
1 | ---
2 | name: ✨ Feature request
3 | description: Suggest a new idea for this project
4 | body:
5 | - type: markdown
6 | attributes:
7 | value: |
8 | Feature Request issues are for **feature requests** only.
9 |
10 | - type: textarea
11 | id: feature
12 | attributes:
13 | label: Feature Request
14 | description: Please describe the feature you would like to request.
15 | placeholder: What feature would you like to see?
16 | validations:
17 | required: true
18 |
--------------------------------------------------------------------------------
/.github/workflows/dci-merge.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | name: DCI Merge Queue
4 | on:
5 | merge_group:
6 | jobs:
7 |
8 | dci-job:
9 | name: "DCI Merge Job"
10 | runs-on: bos2
11 | steps:
12 |
13 | - name: Checkout
14 | uses: actions/checkout@v4
15 | with:
16 | fetch-depth: 0 # Fetch all history to be able to look at the merge commits in dci-merge.sh
17 |
18 | - name: DCI Job
19 | run: |
20 | set -ex
21 | export GITHUB_TOKEN=${{ secrets.GITHUB_TOKEN }}
22 | ./hack/dci-merge.sh ${{ github.event.merge_group.base_sha }} ${{ github.event.merge_group.head_sha }}
23 |
24 | ...
25 |
--------------------------------------------------------------------------------
/.github/workflows/stale.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | name: Stale issues/PRs
4 | on:
5 | schedule:
6 | - cron: "0 9 * * 1-5" # Weekdays at 09:00 UTC
7 |
8 | jobs:
9 | stale:
10 | runs-on: ubuntu-latest
11 | steps:
12 | - uses: actions/stale@v9
13 | with:
14 | stale-issue-message: 'This issue is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 7 days'
15 | days-before-stale: 30
16 | days-before-close: 7
17 | stale-pr-message: 'This PR is stale because it has been open 30 days with no activity. Remove stale label or comment or this will be closed in 7 days'
18 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | *.tar.gz
2 | roles/**/.linted
3 | tests/integration/inventory
4 | tests/output
5 | __pycache__
6 |
--------------------------------------------------------------------------------
/docs/out-of-date.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/docs/out-of-date.png
--------------------------------------------------------------------------------
/hack/README.md:
--------------------------------------------------------------------------------
1 | # Hack / maintenance scripts for internal use only
2 |
3 | This directory is intended for maintainers/developers
4 |
--------------------------------------------------------------------------------
/meta/execution-environment.yml:
--------------------------------------------------------------------------------
1 | ---
2 | version: 3
3 | dependencies:
4 | python: requirements.txt # List Python package requirements in the file
5 |
--------------------------------------------------------------------------------
/meta/requirements-test.txt:
--------------------------------------------------------------------------------
1 | -r requirements.txt
2 | pytest
3 | pytest-ansible
4 |
--------------------------------------------------------------------------------
/meta/requirements.txt:
--------------------------------------------------------------------------------
1 | jmespath
2 | junit_xml
3 | junitparser
4 | python-dateutil
5 |
--------------------------------------------------------------------------------
/plugins/requirements.txt:
--------------------------------------------------------------------------------
1 | dateutil
2 |
--------------------------------------------------------------------------------
/roles/acm/utils/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | utils_cm_name: mirror-registry
3 | utils_monitor_timeout: 90
4 | utils_monitor_wait_time: 3
5 | utils_unqualified_search_registries:
6 | - registry.redhat.io
7 | - docker.io
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/acm_hypershift/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ah_cluster_network_cidr: 10.132.0.0/14
3 | ah_clusters_ns: clusters
4 | ah_cluster_type: "kubevirt"
5 | ah_disconnected: false
6 | ah_download_cli: true
7 | ah_force_deploy: false
8 | ah_hc_annotations: ""
9 | ah_node_memory: 8Gi
10 | ah_node_pool_replicas: 2
11 | ah_no_log: true
12 | ah_allow_insecure_registry: true
13 | cpo_annotation: ""
14 | ...
15 |
--------------------------------------------------------------------------------
/roles/acm_hypershift/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ah_cluster_types:
3 | - kubevirt
4 | - agent
5 |
6 | ah_control_plane_availability_policies:
7 | - HighlyAvailable
8 | - SingleReplica
9 | ...
10 |
--------------------------------------------------------------------------------
/roles/acm_setup/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hub_disable_selfmanagement: false
3 | hub_availability: High
4 | hub_instance: multiclusterhub
5 | hub_namespace: open-cluster-management
6 | hub_disconnected: false
7 | hub_hugepages_type: hugepages-2Mi
8 | hub_hugepages_size: 1024Mi
9 | hub_db_volume_size: 40Gi
10 | hub_fs_volume_size: 50Gi
11 | hub_img_volume_size: 80Gi
12 | hub_vm_external_network: true
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/acm_setup/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hub_hypershift_supported: 4.13
3 | hub_siteconfig_supported: 2.12
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/acm_sno/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | acm_cluster_name: sno
3 | acm_base_domain: example.com
4 | acm_cluster_location: unknown
5 | acm_ocp_version: 4.9.47
6 | acm_force_deploy: false
7 | acm_disconnected: false
8 | acm_cluster_network_cidr: 10.128.0.0/14
9 | acm_cluster_network_host_prefix: 23
10 | acm_service_network_cidr: 172.30.0.0/16
11 | acm_release_image: quay.io/openshift-release-dev/ocp-release:4.9.47-x86_64
12 | acm_iso_url: "https://rhcos.mirror.openshift.com/art/storage/releases/rhcos-4.9/49.84.202207192205-0/x86_64/rhcos-49.84.202207192205-0-live.x86_64.iso"
13 | acm_root_fs_url: ''
14 | acm_vm_external_network: true
15 | ...
16 |
--------------------------------------------------------------------------------
/roles/acm_spoke_mgmt/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | asm_delete_ztp_resources: true
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/acm_spoke_mgmt/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Check if the selected action is allowed
4 | vars:
5 | asm_actions:
6 | - detach
7 | - attach
8 | - delete-ztp-by-ref
9 | ansible.builtin.assert:
10 | that:
11 | - asm_action | lower in asm_actions
12 | fail_msg: "{{ asm_action }} is not a supported action"
13 |
14 | - name: Detach a spoke cluster
15 | ansible.builtin.include_tasks: detach.yaml
16 | when:
17 | - asm_action == 'detach'
18 |
19 | - name: Attach a spoke cluster
20 | ansible.builtin.include_tasks: attach.yaml
21 | when:
22 | - asm_action == 'attach'
23 |
24 | - name: Delete a ZTP deployment by references
25 | ansible.builtin.include_tasks: delete-ztp-by-ref.yaml
26 | when:
27 | - asm_action == 'delete-ztp-by-ref'
28 | ...
29 |
--------------------------------------------------------------------------------
/roles/acm_spoke_mgmt/templates/autoimport_secret.yml.j2:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | apiVersion: v1
4 | kind: Secret
5 | metadata:
6 | name: auto-import-secret
7 | namespace: {{ asm_cluster_name }}
8 | stringData:
9 | autoImportRetry: "2"
10 | kubeconfig: >
11 |
12 | {{ asm_kubeconfig | regex_replace('\n', '\n\n') | indent(4, True) }}
13 |
14 | type: Opaque
15 |
16 | ...
17 |
--------------------------------------------------------------------------------
/roles/apply_nmstate/README.md:
--------------------------------------------------------------------------------
1 | # apply_nmstate
2 |
3 | Applies nmstate network configuration to a host.
4 |
5 | ## Variables
6 |
7 | | Variable | Default | Required | Description |
8 | | ------------------------ | -------- | -------------------- | ---------------------------------- |
9 | | rendered_nmstate_yml | - | Yes | The nmstate to apply |
10 | | vm_nmstate_config_path | - | Yes | The path to place the nmstate file |
11 | | vm_network_test_ip | - | Depends on the host | An IP to check outbound connectivity post application. This is required host is the Ansible control node. |
12 |
--------------------------------------------------------------------------------
/roles/apply_nmstate/defaults/main.yml:
--------------------------------------------------------------------------------
1 | vm_host_is_bastion: false
2 |
--------------------------------------------------------------------------------
/roles/apply_nmstate/tasks/check_if_vm_host_is_bastion.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get bastion machine id
3 | ansible.builtin.slurp:
4 | src: /etc/machine-id
5 | register: bastion_machine_id
6 | delegate_to: bastion
7 | become: false
8 |
9 | - name: Get vm_host machine id
10 | ansible.builtin.slurp:
11 | src: /etc/machine-id
12 | register: vmhost_machine_id
13 |
14 | - name: Check if VM Host is bridge
15 | ansible.builtin.set_fact:
16 | vm_host_is_bastion: "{{ (bastion_machine_id.content == vmhost_machine_id.content) | bool }}"
17 |
--------------------------------------------------------------------------------
/roles/approve_csrs/README.md:
--------------------------------------------------------------------------------
1 | # approve_csrs
2 |
3 | Checks for Cert Signing Requests in the pending state and approves them until nodes in the day2_workers group are present in the oc nodes output.
--------------------------------------------------------------------------------
/roles/approve_csrs/defaults/main.yml:
--------------------------------------------------------------------------------
1 | kube_filename: "{{ kubeconfig_dest_filename | default('kubeconfig') }}"
2 | dest_dir: "{{ kubeconfig_dest_dir | default(ansible_env.HOME) }}"
3 | kubeconfig_path: "{{ dest_dir }}/{{ kube_filename }}"
4 |
--------------------------------------------------------------------------------
/roles/approve_csrs/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Wait up to an 60 mins for CSRs to be approved # The 'loop' default will prevent action when none is needed.
2 | shell:
3 | cmd: "oc get csr | grep -i pending | cut -f 1 -d ' ' | xargs -n 1 oc adm certificate approve &> /dev/null; oc get nodes -o json"
4 | register: oc_nodes
5 | environment:
6 | KUBECONFIG: "{{ kubeconfig_path }}"
7 | until: (groups['day2_workers'] | difference(oc_nodes.stdout | default('{}') | from_json | json_query('items[].metadata.name') | list )) | length == 0
8 | retries: 60
9 | delay: 60
10 |
--------------------------------------------------------------------------------
/roles/boot_disk/README.md:
--------------------------------------------------------------------------------
1 | # boot_disk
2 |
3 | Reboots nodes to the disk based on its vendor.
--------------------------------------------------------------------------------
/roles/boot_disk/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 |
--------------------------------------------------------------------------------
/roles/boot_disk/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: Hanen Garcia
3 | description: Boot from disk
4 | company: Red Hat, Inc.
5 | license: Apache License, Version 2.0
6 | min_ansible_version: 2.9
7 | galaxy_tags: []
8 |
--------------------------------------------------------------------------------
/roles/boot_disk/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for boot_disk
3 |
4 | - name: Join list for workers and masters
5 | ansible.builtin.set_fact:
6 | hosts: "{{ groups['masters'] + groups['workers'] | default([]) }}"
7 | when: hosts is not defined
8 |
9 | - name: Boot from disk for "{{ hostvars[item]['vendor'] }}"
10 | ansible.builtin.include_role:
11 | name: "redhatci.ocp.vendors.{{ hostvars[item]['vendor'] | lower }}"
12 | tasks_from: disk.yml
13 | vars:
14 | target_host: "{{ item }}"
15 | loop: "{{ hosts }}"
16 |
--------------------------------------------------------------------------------
/roles/boot_iso/README.md:
--------------------------------------------------------------------------------
1 | # boot_iso
2 |
3 | Boots nodes to the provided ISO on its vendor.
4 |
--------------------------------------------------------------------------------
/roles/boot_iso/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 |
--------------------------------------------------------------------------------
/roles/boot_iso/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: Roger Lopez
3 | description: Boot the Discovery ISO
4 | company: Red Hat, Inc.
5 | license: Apache License, Version 2.0
6 | min_ansible_version: 2.9
7 | galaxy_tags: []
8 |
--------------------------------------------------------------------------------
/roles/boot_iso/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for boot_iso
3 |
4 | - name: "Fail playbook without boot_iso_url"
5 | fail:
6 | msg="Missing argument: this playbook requires 'boot_iso' to be defined with the URL of the ISO to boot the systems"
7 | when: boot_iso_url is not defined
8 | delegate_to: bastion
9 |
10 | - name: Boot ISO for "{{ hostvars[inventory_hostname]['vendor'] }}"
11 | ansible.builtin.include_role:
12 | name: "redhatci.ocp.vendors.{{ hostvars[inventory_hostname]['vendor'] | lower }}"
13 | tasks_from: iso.yml
14 | vars:
15 | target_host: "{{ inventory_hostname }}"
16 | args:
17 | apply:
18 | delegate_to: bastion
19 |
--------------------------------------------------------------------------------
/roles/boot_iso/templates/vitual_media.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "Image": "{{ boot_iso_url }}",
3 | "Inserted": true
4 | }
5 |
--------------------------------------------------------------------------------
/roles/catalog_source/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | cs_namespace: openshift-marketplace
3 | cs_publisher: "Third party"
4 | cs_type: grpc
5 |
6 | ...
7 |
--------------------------------------------------------------------------------
/roles/chart_verifier/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | chart_verifier_image: quay.io/redhat-certification/chart-verifier:main
3 | project_url: "https://catalog.redhat.com/api/containers/v1/vendors/org-id"
4 | organization_id: unknown
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/chart_verifier/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Delete tmp tool directory"
3 | ansible.builtin.file:
4 | state: absent
5 | path: "{{ cv_tool_dir.path }}"
6 | when:
7 | - cv_tool_dir is defined
8 | - cv_tool_dir.path is defined
9 |
10 | - name: "Delete work_dir directory"
11 | ansible.builtin.file:
12 | state: absent
13 | path: "{{ tmp_dir.path }}"
14 | when:
15 | - tmp_dir
16 | - tmp_dir.path is defined
17 | ...
18 |
--------------------------------------------------------------------------------
/roles/chart_verifier/tasks/get-partner-name.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get Partner Name from Organization
3 | ansible.builtin.uri:
4 | url: "{{ project_url }}/{{ organization_id }}"
5 | method: GET
6 | return_content: true
7 | status_code: [200, 404]
8 | timeout: 60
9 | register: _cv_vendor_partner_name
10 | until:
11 | - _cv_vendor_partner_name.status == 200
12 | or _cv_vendor_partner_name.status == 404
13 | retries: 3
14 | delay: 7
15 |
16 | - name: Set partner name with default if not found
17 | ansible.builtin.set_fact:
18 | partner_name: "{{ _cv_vendor_partner_name.json.label if _cv_vendor_partner_name.status == 200
19 | else 'None' }}" # noqa: redhat-ci[no-role-prefix]
20 | ...
21 |
--------------------------------------------------------------------------------
/roles/chart_verifier/tasks/get-tools.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Create tmp tool directory"
3 | ansible.builtin.tempfile:
4 | state: directory
5 | register: cv_tool_dir
6 |
7 | - name: "Download helm binaries"
8 | vars:
9 | helm_ver: "{{ helm_version | default('3.9.0') }}"
10 | ansible.builtin.unarchive:
11 | src: https://get.helm.sh/helm-v{{ helm_ver }}-linux-amd64.tar.gz
12 | dest: "{{ cv_tool_dir.path }}"
13 | extra_opts:
14 | - linux-amd64/helm
15 | - --strip-components=1
16 | remote_src: true
17 |
18 | - name: "Get yq"
19 | vars:
20 | yq_ver: "{{ yq_version | default('4.25.2') }}"
21 | ansible.builtin.get_url:
22 | url: "https://github.com/mikefarah/yq/releases/download/v{{ yq_ver }}/yq_linux_amd64"
23 | dest: "{{ cv_tool_dir.path }}/yq"
24 | mode: "0750"
25 |
26 | - name: "Set tools path"
27 | ansible.builtin.set_fact:
28 | helm_tool_path: "{{ cv_tool_dir.path }}/helm"
29 | yq_tool_path: "{{ cv_tool_dir.path }}/yq"
30 | ...
31 |
--------------------------------------------------------------------------------
/roles/chart_verifier/tasks/mirror-chart-images.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Get chart images"
3 | ansible.builtin.shell:
4 | cmd: >
5 | {{ helm_tool_path }} template {{ chart.chart_file }} |
6 | {{ yq_tool_path }} e '..|.image? | select(.)' - |
7 | sort -u | grep -v -- ---
8 | loop: "{{ dci_charts }}"
9 | loop_control:
10 | loop_var: chart
11 | label: "{{ chart.chart_file | basename }}"
12 | register: chart_images
13 |
14 | - name: "Get all images"
15 | ansible.builtin.set_fact:
16 | all_images: "{{ all_images | default([]) | unique + item.stdout_lines }}"
17 | loop: "{{ chart_images.results }}"
18 |
19 | - name: "Mirror chart images"
20 | ansible.builtin.include_role:
21 | name: redhatci.ocp.mirror_images
22 | vars:
23 | mi_images: "{{ all_images }}"
24 | mi_authfile: "{{ pullsecret_tmp_file }}"
25 | ...
26 |
--------------------------------------------------------------------------------
/roles/chart_verifier/templates/report.j2:
--------------------------------------------------------------------------------
1 | Helm Chart Verifier:
2 | Partner name: {{ partner_name }}
3 | Date/Time: {{ ansible_date_time.iso8601 }} {{ ansible_date_time.tz }}
4 | Target repository: {{ target_repository }}
5 | Charts:
--------------------------------------------------------------------------------
/roles/check_resource/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | cr_kind: "MachineConfigPool"
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/check_resource/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set resource kind
3 | ansible.builtin.set_fact:
4 | cr_kind: "{{ resource_to_check }}"
5 | when: resource_to_check is defined
6 |
7 | - name: Check MachineConfigPool
8 | ansible.builtin.include_tasks:
9 | file: wait-mcp.yml
10 | when: cr_kind == "MachineConfigPool"
11 |
12 | - name: Check SriovNetworkNodeState
13 | ansible.builtin.include_tasks:
14 | file: wait-sriov.yml
15 | when: cr_kind == "SriovNetworkNodeState"
16 |
17 | ...
18 |
--------------------------------------------------------------------------------
/roles/check_resource/tasks/verify_network_state.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Wait until all pods in openshift-ovn-kubernetes namespace are running"
3 | kubernetes.core.k8s_info:
4 | kind: Pod
5 | namespace: "openshift-ovn-kubernetes"
6 | register: ovn_pods
7 | until: ovn_pods | json_query('resources[*].status.phase') | unique == ["Running"]
8 | retries: 6
9 | delay: 30
10 |
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/check_resource/tasks/wait-mcp.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Pause 60 seconds to wait for MC to be triggered by a resource change"
3 | ansible.builtin.pause:
4 | seconds: 60
5 |
6 | - name: Wait for MCP to be updated
7 | kubernetes.core.k8s_info:
8 | api_version: machineconfiguration.openshift.io/v1
9 | kind: MachineConfigPool
10 | register: _cr_mcp_status
11 | vars:
12 | _cr_status_query: "resources[*].status.conditions[?type=='Updated'].status"
13 | _cr_update_status: "{{ _cr_mcp_status | json_query(_cr_status_query) | flatten | unique }}"
14 | until:
15 | - _cr_mcp_status.resources is defined
16 | - _cr_update_status == ['True']
17 | retries: "{{ check_wait_retries }}"
18 | delay: "{{ check_wait_delay }}"
19 |
20 | ...
21 |
--------------------------------------------------------------------------------
/roles/check_resource/tasks/wait-sriov.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Wait until sriovnetworknodestate becomes ready
3 | kubernetes.core.k8s_info:
4 | api_version: sriovnetwork.openshift.io/v1
5 | kind: SriovNetworkNodeState
6 | register: sriovnetnode
7 | until: sriovnetnode.resources is defined and sriovnetnode.resources | selectattr('status.syncStatus', 'defined') | list | rejectattr('status.syncStatus', 'equalto', 'Succeeded') | list | count == 0
8 | retries: "{{ check_wait_retries }}"
9 | delay: "{{ check_wait_delay }}"
10 |
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/cluster_compare/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # cc_reference_container_source: "quay.io/openshift-kni/ztp-site-generator:4.18"
3 | # cc_reference_repository: ""
4 | # cc_reference_branch: "main"
5 | # cc_reference_metadata_yaml_location: "/home/ztp/reference"
6 | # cc_compare_container_source: "registry-proxy.engineering.redhat.com/rh-osbs/openshift-kube-compare-artifacts:v4.18"
7 | # cc_compare_container_executable: "/usr/share/openshift/linux_amd64/kube-compare.rhel8"
8 | # cc_report_creator_version: "latest"
9 | cc_infra_type: core
10 | cc_ocp_supported: 4.16
11 | cc_logging: true
12 | ...
13 |
--------------------------------------------------------------------------------
/roles/cluster_compare/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate requirements
3 | ansible.builtin.include_tasks: validations.yml
4 |
5 | - name: Create working directory
6 | ansible.builtin.tempfile:
7 | state: directory
8 | prefix: cluster_compare
9 | register: _cc_tmp_dir
10 |
11 | - name: Set work directory path
12 | ansible.builtin.set_fact:
13 | cc_work_dir: "{{ _cc_tmp_dir.path }}"
14 |
15 | - name: Test with Upstream
16 | ansible.builtin.include_tasks: upstream.yml
17 |
18 | # - name: Test with downstream container images
19 | # ansible.builtin.include_tasks: containers.yml
20 |
21 | - name: Remove working directory
22 | ansible.builtin.file:
23 | path: "{{ cc_work_dir }}"
24 | state: absent
25 | ...
26 |
--------------------------------------------------------------------------------
/roles/cluster_compare/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | cc_reference_dir: "reference"
3 | cc_report_name: "cluster-compare-report.xml"
4 | cc_compare_output_file: "cluster-compare.json"
5 | cc_compare_human_file: "cluster-compare.txt"
6 | ...
7 |
--------------------------------------------------------------------------------
/roles/configure_ztp_gitops_apps/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | czga_podman_runner_host: podman-runner
4 | czga_clusters_namespace: clusters-sub
5 | czga_kubeconfig_path: "{{ omit }}"
6 | czga_policies_namespace: policies-sub
7 | czga_oc_tool_path: "{{ oc_tool_path | default('/usr/loca/bin/oc') }}"
8 | czga_site_generator_image: registry.redhat.io/openshift4/ztp-site-generate-rhel8
9 | czga_multicluster_image: registry.redhat.io/rhacm2/multicluster-operators-subscription-rhel9
10 |
--------------------------------------------------------------------------------
/roles/configure_ztp_gitops_repo/README.md:
--------------------------------------------------------------------------------
1 | Configure ZTP GitOps Repo
2 | =========
3 |
4 | This Role creates the tracking branch for ArgoCD. It makes it from an existing branch.
5 |
6 | Requirements
7 | ------------
8 |
9 | * Repository already exist
10 |
11 | Role Variables
12 | --------------
13 |
14 | * target_ztp_gitops_repo
15 | * target_ztp_gitops_repo_src_branch
16 | * target_ztp_gitops_repo_dst_branch
--------------------------------------------------------------------------------
/roles/configure_ztp_gitops_repo/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
--------------------------------------------------------------------------------
/roles/conserver/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/conserver/README.md
--------------------------------------------------------------------------------
/roles/conserver/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | conserver_sol_hosts: []
3 | conserver_socket_hosts: []
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/conserver/files/socat_retry.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | reconnTimeOut=5
3 | TERM=vt100
4 | while /bin/true
5 | do /bin/socat $* 2>/dev/null
6 | sleep $reconnTimeOut
7 | done
8 |
--------------------------------------------------------------------------------
/roles/conserver/tasks/config-ipmi.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create conserver-ipmi.cf
3 | become: true
4 | ansible.builtin.template:
5 | dest: "/etc/conserver-ipmi-{{ cluster }}.cf"
6 | src: conserver-ipmi.cf
7 | owner: "root"
8 | group: "root"
9 | mode: "0644"
10 |
11 | - name: Add conserver-ipmi.cf to conserver.cf
12 | ansible.builtin.lineinfile:
13 | path: /etc/conserver.cf
14 | line: '#include /etc/conserver-ipmi-{{ cluster }}.cf'
15 |
--------------------------------------------------------------------------------
/roles/conserver/tasks/config-libvirt.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create conserver-libvirt.cf
3 | become: true
4 | ansible.builtin.template:
5 | dest: "/etc/conserver-libvirt-{{ cluster }}.cf"
6 | src: conserver-libvirt.cf
7 | owner: "root"
8 | group: "root"
9 | mode: "0644"
10 |
11 | - name: Copy socat_retry/sh to /usr/local/bin
12 | become: true
13 | ansible.builtin.copy:
14 | dest: /usr/local/bin/socat_retry.sh
15 | src: socat_retry.sh
16 | owner: "root"
17 | group: "root"
18 | mode: "0755"
19 |
20 | - name: Add conserver-libvirt.cf to conserver.cf
21 | become: true
22 | ansible.builtin.lineinfile:
23 | path: /etc/conserver.cf
24 | line: '#include /etc/conserver-libvirt-{{ cluster }}.cf'
25 |
--------------------------------------------------------------------------------
/roles/conserver/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Handle package install
3 | ansible.builtin.include_tasks: packages.yml
4 | - name: Handle configuration
5 | ansible.builtin.include_tasks: config.yml
6 | - name: Handle systemd service
7 | ansible.builtin.include_tasks: service.yml
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/conserver/tasks/packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install additional packages
3 | become: true
4 | ansible.builtin.package:
5 | name:
6 | - conserver
7 | - conserver-client
8 | - ipmitool
9 | - socat
10 | state: present
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/conserver/tasks/service.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Enable and start service
3 | become: true
4 | ansible.builtin.systemd:
5 | name: "conserver.service"
6 | state: restarted
7 | enabled: true
8 | daemon_reload: true
9 | ...
10 |
--------------------------------------------------------------------------------
/roles/conserver/templates/conserver-ipmi.cf:
--------------------------------------------------------------------------------
1 | default {{ cluster }} {
2 | # The '&' character is substituted with the console name
3 | logfile /var/consoles/{{ cluster }}/&;
4 | timestamp 5mab;
5 | master localhost;
6 | }
7 |
8 | {% for host in conserver_sol_hosts %}
9 | console {{ cluster }}-{{ hostvars[host].name }} {
10 | include {{ cluster }};
11 | type exec;
12 | exec "/usr/bin/ipmitool -I lanplus -H {{ hostvars[host].ipmi_address }} -U {{ hostvars[host].ipmi_user }} -P {{ hostvars[host].ipmi_password }} sol deactivate && echo 'Disconnected' ; ulimit -v 31720 ; /usr/bin/ipmitool -I lanplus -H {{ hostvars[host].ipmi_address }} -U {{ hostvars[host].ipmi_user }} -P {{ hostvars[host].ipmi_password }} sol activate";
13 | }
14 | {% endfor %}
15 |
16 | access * {
17 | trusted 127.0.0.1;
18 | }
19 |
--------------------------------------------------------------------------------
/roles/conserver/templates/conserver-libvirt.cf:
--------------------------------------------------------------------------------
1 | default libvirt {
2 | rw *;
3 | type exec;
4 | exec "/usr/local/bin/socat_retry.sh STDIN UNIX-CONNECT:/var/lib/libvirt/consoles/Q";
5 | execsubst Q=hs;
6 | timestamp 5mab;
7 | logfile /var/consoles/{{ cluster }}/&;
8 | master localhost;
9 | }
10 |
11 | {% for host in conserver_socket_hosts %}
12 | console {{ cluster }}-{{ host }} { include libvirt; host {{ host }}.console; }
13 | {% endfor %}
14 |
15 | access * {
16 | trusted 127.0.0.1;
17 | }
18 |
--------------------------------------------------------------------------------
/roles/copy_and_render/README.md:
--------------------------------------------------------------------------------
1 | # copy_and_render
2 |
3 | Role to copy files and subdirectories from a source directory to a target directory, then render and replace all Jinja2 `.j2` templates.
4 |
5 | Role Variables:
6 |
7 | `car_source_dir` (string, required): Source directory on the control machine containing files and `.j2` templates.
8 | `car_target_dir` (string, required): Destination directory on the remote host where files will be copied and templates rendered.
9 |
10 | Example Playbook:
11 |
12 | ```yaml
13 | - hosts: all
14 | roles:
15 | - role: copy_and_render
16 | vars:
17 | car_source_dir: "/path/to/source"
18 | car_target_dir: "/path/to/target"
19 | ```
20 |
21 | Requirements:
22 |
23 | - Ansible 2.9 or higher
24 |
25 | License:
26 |
27 | Apache-2.0
28 |
--------------------------------------------------------------------------------
/roles/create_certification_project/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | catalog_url: "https://catalog.redhat.com/api/containers/v1"
3 | connect_url: "https://connect.redhat.com/projects"
4 | # TODO: generalize for operator projects
5 | check_for_existing_projects: false
6 | # Page size for API query to retrieve the cert projects
7 | page_size: 200
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/create_certification_project/files/github_token.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/create_certification_project/files/github_token.png
--------------------------------------------------------------------------------
/roles/create_certification_project/tasks/attach_product_listings.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Loop over Product Listings and attach them all
3 | ansible.builtin.include_tasks: attach_one_product_listing.yml
4 | loop: "{{ cert_item.pyxis_product_lists }}"
5 | loop_control:
6 | loop_var: pyxis_product_list_identifier
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/create_certification_project/tasks/check_if_project_exists.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Possible product types: container, operator, helmchart, cnf
3 | - name: Check if container project exists in the same org
4 | ansible.builtin.include_tasks: "check_if_{{ product_type }}_project_exists.yml"
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/create_certification_project/tasks/get-gh-username.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Verify that Github token to open PR is provided
3 | ansible.builtin.assert:
4 | that:
5 | - github_token_path is defined
6 | fail_msg: "Please provide github_token_path as described in the documentation"
7 |
8 | - name: Get Github Binary
9 | ansible.builtin.include_role:
10 | name: redhatci.ocp.create_pr
11 | tasks_from: get-gh-bin.yml
12 |
13 | - name: Login to Github
14 | ansible.builtin.shell:
15 | cmd: >
16 | {{ gh_tool_path }} auth login --with-token < {{ github_token_path }}
17 | register: gh_login
18 | failed_when: gh_login.rc != 0
19 |
20 | - name: "Get GitHub status details"
21 | ansible.builtin.command: "{{ gh_tool_path }} api user --jq .login"
22 | register: github_status
23 |
24 | - name: "Set the GitHub username"
25 | ansible.builtin.set_fact:
26 | github_username: "{{ github_status.stdout }}"
27 | ...
28 |
--------------------------------------------------------------------------------
/roles/create_certification_project/tasks/update_project.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate cert_settings required to update the project
3 | ansible.builtin.include_tasks: validate_cert_settings.yml
4 |
5 | - name: Print JSON draft for the project to be updated
6 | vars:
7 | template_filename: "templates/update_project_{{ product_type }}.json.j2"
8 | ansible.builtin.debug:
9 | msg: "{{ lookup('template', template_filename) }}"
10 |
11 | - name: Update certification project Settings
12 | vars:
13 | template_filename: "templates/update_project_{{ product_type }}.json.j2"
14 | ansible.builtin.uri:
15 | url: "{{ catalog_url }}/projects/certification/id/{{ cert_project_id }}"
16 | method: PATCH
17 | headers:
18 | X-API-KEY: "{{ lookup('file', pyxis_apikey_path) }}"
19 | body_format: json
20 | body: "{{ lookup('template', template_filename) }}"
21 | status_code: 200
22 | timeout: 120
23 | ...
24 |
--------------------------------------------------------------------------------
/roles/create_certification_project/templates/attach_product_listing.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "cert_projects": {{ all_cert_projects | to_nice_json(indent=2) }},
3 | "published": {{ pl_published.json.published if pl_published is succeeded else false | bool }},
4 | "target_platforms": [
5 | "Red Hat OpenShift"
6 | ],
7 | "type": "container stack"
8 | }
9 |
--------------------------------------------------------------------------------
/roles/create_certification_project/templates/create_project_cnf.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "certification_status": "Started",
3 | {% if cert_settings is defined and cert_settings.email_address is defined %}
4 | "contacts": [
5 | {
6 | "email_address": "{{ cert_settings.email_address }}",
7 | "type": "Technical contact"
8 | }
9 | ],
10 | {% endif %}
11 | "name": "{{ cert_item.cnf_name }}",
12 | "project_status": "active",
13 | "type": "OpenShift-cnf"
14 | }
15 |
--------------------------------------------------------------------------------
/roles/create_certification_project/templates/create_project_container.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "certification_status": "Started",
3 | "container": {
4 | "auto_publish": false,
5 | "distribution_method": "external",
6 | "os_content_type": "Red Hat Universal Base Image (UBI)",
7 | "privileged": false,
8 | "registry": "{{ current_operator_image.split('/')[0] }}",
9 | "repository": "{{ current_operator_image.split('@')[0].split(':')[0].split('/')[1:] | join('/') }}",
10 | "type": "container"
11 | },
12 | "marketplace": {
13 | "enablement_status": "ineligible"
14 | },
15 | "name": "{{ current_operator_image.split('@')[0].split(':')[0].split('/')[-1] }}",
16 | "project_status": "active",
17 | "type": "Containers"
18 | }
19 |
--------------------------------------------------------------------------------
/roles/create_certification_project/templates/create_project_helmchart.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "certification_status": "Started",
3 | "contacts": [
4 | {
5 | "email_address": "{{ cert_settings.email_address }}",
6 | "type": "Technical contact"
7 | }
8 | ],
9 | "helm_chart": {
10 | "application_categories": [
11 | "{{ cert_settings.application_categories }}"
12 | ],
13 | "chart_name": "{{ cert_item.chart_name }}",
14 | "distribution_instructions": "{{ cert_settings.distribution_instructions }}",
15 | "distribution_method": "{{ cert_settings.distribution_method }}",
16 | "github_usernames": [
17 | "{{ cert_settings.github_usernames }}"
18 | ],
19 | "long_description": "{{ cert_settings.long_description }}",
20 | "short_description": "{{ cert_item.short_description }}"
21 | },
22 | "marketplace": {
23 | "enablement_status": "ineligible"
24 | },
25 | "name": "{{ cert_item.chart_name }}",
26 | "project_status": "active",
27 | "type": "Helm Chart"
28 | }
29 |
--------------------------------------------------------------------------------
/roles/create_certification_project/templates/update_project_helmchart.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "contacts": [
3 | {
4 | "email_address": "{{ cert_settings.email_address }}",
5 | "type": "Technical contact"
6 | }
7 | ],
8 | "helm_chart": {
9 | "application_categories": [
10 | "{{ cert_settings.application_categories }}"
11 | ],
12 | "chart_name": "{{ cert_item.chart_name }}",
13 | "distribution_instructions": "{{ cert_settings.distribution_instructions }}",
14 | "distribution_method": "{{ cert_settings.distribution_method }}",
15 | "github_usernames": [
16 | "{{ cert_settings.github_usernames }}"
17 | ],
18 | "long_description": "{{ cert_settings.long_description }}",
19 | "short_description": "{{ cert_item.short_description }}"
20 | },
21 | "marketplace": {
22 | "enablement_status": "ineligible"
23 | },
24 | "name": "{{ cert_item.chart_name }}",
25 | "project_status": "active",
26 | "type": "Helm Chart",
27 | }
28 |
--------------------------------------------------------------------------------
/roles/create_helmchart/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | catalog_url: "https://catalog.redhat.com/api/containers/v1"
3 | page_size: 200
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/create_helmchart/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TODO: this role should be merged with create_certification_project
3 | - name: Create certification projects in a loop
4 | ansible.builtin.include_role:
5 | name: create_certification_project
6 | vars:
7 | product_type: "helmchart"
8 | loop: "{{ helmchart_to_certify }}"
9 | loop_control:
10 | loop_var: cert_item
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/create_pr/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | publish_chart: true
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/create_pr/tasks/get-gh-bin.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Create tmp tool directory"
3 | ansible.builtin.tempfile:
4 | state: directory
5 | register: cv_tool_dir
6 |
7 | - name: "Set GH version"
8 | ansible.builtin.set_fact:
9 | gh_version: "{{ gh_version | default('2.12.1') }}"
10 |
11 | - name: "Unarchive GH release tarball"
12 | ansible.builtin.unarchive:
13 | src: "https://github.com/cli/cli/releases/download/v{{ gh_version }}/gh_{{ gh_version }}_linux_amd64.tar.gz"
14 | dest: "{{ cv_tool_dir.path }}"
15 | remote_src: true
16 | extra_opts:
17 | - --strip-components=2
18 |
19 | - name: "Set tools path"
20 | ansible.builtin.set_fact:
21 | gh_tool_path: "{{ cv_tool_dir.path }}/gh"
22 | ...
23 |
--------------------------------------------------------------------------------
/roles/create_pr/templates/ci.yaml.j2:
--------------------------------------------------------------------------------
1 | cert_project_id: {{ cert_project_id }}
2 | preflight-trigger: false
3 | upload-artifacts: false
4 | merge: {% if operator.merge_pr | default(false) | bool %}true{% else %}false{% endif %}
5 |
--------------------------------------------------------------------------------
/roles/create_rhde_builder/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup host environment
3 | become: true
4 | block:
5 | - name: "Install virt-manager"
6 | ansible.builtin.package:
7 | name: "{{ virt_packages }}"
8 | state: present
9 |
10 | - name: Start libvirtd
11 | ansible.builtin.service:
12 | name: libvirtd
13 | state: started
14 |
15 | - name: Start the default virsh network
16 | become: true
17 | community.libvirt.virt_net:
18 | name: default
19 | state: active
20 |
21 | - name: Cleanup previous RHDE VM / Image
22 | ansible.builtin.import_tasks: cleanup_rhde_builder.yml
23 |
24 | - name: Prepare RHDE virt image
25 | ansible.builtin.import_tasks: prepare_rhde_virt_image.yml
26 |
27 | - name: Start RHDE builder VM
28 | ansible.builtin.import_tasks: start_rhde_builder_vm.yml
29 | ...
30 |
--------------------------------------------------------------------------------
/roles/create_vms/README.md:
--------------------------------------------------------------------------------
1 | # create_vms
2 |
3 | Provisions libvirt network, storage pools and the KVM Nodes.
--------------------------------------------------------------------------------
/roles/create_vms/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup host environment
3 | become: true
4 | block:
5 | - name: "Install virt-manager"
6 | package:
7 | name: "{{ virt_packages }}"
8 | state: present
9 |
10 | - name: Insert lines for non-root user
11 | blockinfile:
12 | state: present
13 | dest: /etc/libvirt/qemu.conf
14 | block: |
15 | user= "{{ qemu_user }}"
16 | group= "{{ qemu_user }}"
17 | when: qemu_user != "root"
18 |
19 | - name: Start libvirtd
20 | service:
21 | name: libvirtd
22 | state: started
23 |
24 | - name: Prepare Network
25 | import_tasks: prepare_network.yml
26 |
27 | - name: Prepare Storage Pool
28 | import_tasks: prepare_storage_pool.yml
29 |
30 | - name: Prepare VMS
31 | import_tasks: provision_vms.yml
32 |
--------------------------------------------------------------------------------
/roles/create_vms/tasks/prepare_network.yml:
--------------------------------------------------------------------------------
1 | - name: Setup network
2 | become: true
3 | block:
4 | - name: Check if the selected network mode is allowed
5 | vars:
6 | allowed_vm_network_modes:
7 | - bridge
8 | - nat
9 | assert:
10 | that:
11 | - create_vms_network_mode in allowed_vm_network_modes
12 | fail_msg: "{{ create_vms_network_mode }} is not a supported network mode"
13 |
14 | - name: define network
15 | community.libvirt.virt_net:
16 | name: "{{ network_name }}"
17 | command: define
18 | xml: "{{ lookup('template', 'network.xml.j2') }}"
19 |
20 | - name: start network
21 | community.libvirt.virt_net:
22 | name: "{{ network_name }}"
23 | command: start
24 |
--------------------------------------------------------------------------------
/roles/create_vms/tasks/prepare_storage_pool.yml:
--------------------------------------------------------------------------------
1 | # Sushy-tools does not allow you to specify the libvirt storage pool, and assumes
2 | # that default exists, so we need to make sure that it does
3 | - name: Handle default storage pool
4 | become: true
5 | block:
6 | - name: make images dir
7 | file:
8 | path: "{{ images_dir }}"
9 | state: directory
10 | recurse: true
11 |
12 | - name: Create default storage pool
13 | community.libvirt.virt_pool:
14 | command: define
15 | name: default
16 | xml: '{{ lookup("template", "storage-pool.xml.j2") }}'
17 | autostart: true
18 |
19 | - name: Start default storage pool
20 | community.libvirt.virt_pool:
21 | name: default
22 | state: active
23 |
--------------------------------------------------------------------------------
/roles/create_vms/templates/network.xml.j2:
--------------------------------------------------------------------------------
1 |
2 | {{ network_name }}
3 | {{ net_uuid | default(99999999 | random | to_uuid) }}
4 |
5 | {% if create_vms_network_mode == 'bridge' %}
6 |
7 |
8 |
9 |
10 | {% elif create_vms_network_mode == 'nat' %}
11 |
12 |
13 |
14 |
15 |
16 |
17 | {% endif %}
18 |
19 |
20 |
--------------------------------------------------------------------------------
/roles/create_vms/templates/rng_device.xml.j2:
--------------------------------------------------------------------------------
1 |
2 | /dev/urandom
3 |
4 |
--------------------------------------------------------------------------------
/roles/create_vms/templates/storage-pool.xml.j2:
--------------------------------------------------------------------------------
1 |
2 | default
3 |
4 |
5 |
6 | {{ images_dir }}
7 |
8 | 0711
9 | 0
10 | 0
11 |
12 |
13 |
14 |
15 |
--------------------------------------------------------------------------------
/roles/deploy_cr/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/deploy_cr/README.md
--------------------------------------------------------------------------------
/roles/deprecated_api/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # that simplifies the usage of the role in standalone mode
3 | deprecated_api_logs:
4 | path: "/tmp"
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/deprecated_api/tasks/ensure_ocp_version.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Set da_ocp_version to ocp_version if defined"
3 | ansible.builtin.set_fact:
4 | da_ocp_version: "{{ ocp_version }}"
5 | when: ocp_version is defined
6 |
7 | - name: "Get cluster version"
8 | kubernetes.core.k8s_info:
9 | api: config.openshift.io/v1
10 | kind: ClusterVersion
11 | name: version
12 | register: _deprecated_api_cluster_version
13 | when: ocp_version is not defined
14 |
15 | - name: "Set OCP version in major.minor format"
16 | ansible.builtin.set_fact:
17 | da_ocp_version: "{{ _deprecated_api_cluster_version.resources[0].status.desired.version.split('.')[:2] | join('.') }}"
18 | when: ocp_version is not defined
19 | ...
20 |
--------------------------------------------------------------------------------
/roles/deprecated_api/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Ensure OCP version to be set"
3 | ansible.builtin.include_tasks: ensure_ocp_version.yml
4 |
5 | - name: "Discover all namespaces"
6 | kubernetes.core.k8s_info:
7 | kind: Namespace
8 | register: _da_cluster_namespaces
9 |
10 | - name: "Build namespace list excluding namespaces starting with openshift and kube"
11 | ansible.builtin.set_fact:
12 | da_all_namespaces: "{{ _da_cluster_namespaces.resources
13 | | map(attribute='metadata.name')
14 | | select('match', '^(?!openshift|kube-).*')
15 | | list }}"
16 |
17 | - name: "Loop over deprecated_namespaces to get API request counts"
18 | ansible.builtin.include_tasks: get_api_request_counts_per_namespace.yml
19 | # Check all namespaces by default unless otherwise requested
20 | loop: "{{ deprecated_api_namespaces | default(da_all_namespaces) }}"
21 | loop_control:
22 | loop_var: da_ns
23 | ...
24 |
--------------------------------------------------------------------------------
/roles/destroy_vms/README.md:
--------------------------------------------------------------------------------
1 | # destroy_vms
2 |
3 | Destroys libvirt network, storage pools and the KVM Nodes and the network bridge connection.
--------------------------------------------------------------------------------
/roles/destroy_vms/defaults/main.yml:
--------------------------------------------------------------------------------
1 | virt_packages:
2 | - python3
3 | - libvirt
4 | - virt-install
5 | - qemu-kvm
6 | - virt-manager
7 | - python3-pip
8 | - python3-lxml
9 | - python3-libvirt
10 |
11 | images_dir: /var/lib/libvirt/images/
12 | path_base_dir: /home/redhat
13 | vm_create_scripts_dir: "{{ path_base_dir }}/vm_create_scripts/"
14 |
15 | vm_bridge_name: "{{ cluster_name }}-br"
16 | network_name: "net-{{ cluster_name }}"
17 |
18 | vm_node_prefix: "{{ cluster_name }}_"
19 |
--------------------------------------------------------------------------------
/roles/efi_boot_mgr/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ebm_oc_path: "/usr/local/bin/oc"
3 |
--------------------------------------------------------------------------------
/roles/efi_boot_mgr/files/rm-efiboot:
--------------------------------------------------------------------------------
1 | # Remove non-active UEFI boot entries
2 |
3 | echo "=== Current Boot order ==="
4 | efibootmgr --verbose
5 |
6 | if [[ $? -ne 0 ]]; then
7 | echo "Warning: efibootmgr can't remove boot entries"
8 | exit
9 | fi
10 |
11 | boot_current=$(efibootmgr --verbose | sed -n -e 's,BootCurrent: \(.*\),\1,p')
12 | boot_list=($(efibootmgr --verbose | sed -n -e 's/,/ /g' -e 's,BootOrder: \(.*\),\1,p'))
13 |
14 | for bootnum in ${boot_list[*]}; do
15 | if [[ "${boot_id}" == "${boot_current}" ]]; then
16 | continue
17 | fi
18 | efibootmgr --bootnum ${bootnum} --delete-bootnum
19 | done
20 |
21 | echo "=== New Boot order ==="
22 | efibootmgr --verbose
23 |
--------------------------------------------------------------------------------
/roles/etcd_data/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ed_action: "query"
3 | ed_enc: "aesgcm"
4 | ed_force: false
5 |
--------------------------------------------------------------------------------
/roles/etcd_data/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ed_valid_actions:
3 | - query
4 | - encrypt
5 | - decrypt
6 | ed_valid_encryption_types:
7 | - aescbc
8 | - aesgcm
9 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/tasks/deploy_extra_trex.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "CNFApp must be TestPMD"
3 | ansible.builtin.assert:
4 | that: ecd_cnfapp_name == 'testpmd'
5 |
6 | # Directly call to retry-trex.yml playbook
7 | - name: "Deploy an extra TRex job"
8 | ansible.builtin.include_tasks: trex/retry-trex.yml
9 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/tasks/draining.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "CNFApp must be TestPMD"
3 | ansible.builtin.assert:
4 | that: ecd_cnfapp_name == 'testpmd'
5 |
6 | - name: Gather facts
7 | ansible.builtin.include_tasks: draining/gather-facts.yml
8 |
9 | - name: Drain the worker node where testpmd is running
10 | ansible.builtin.include_tasks: draining/node-draining.yml
11 |
12 | - name: Validate allocation of new testpmd pod
13 | ansible.builtin.include_tasks: draining/testpmd-validation.yml
14 |
15 | - name: Validate TRex job
16 | ansible.builtin.include_tasks: draining/trex-validation.yml
17 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/tasks/draining/gather-facts.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: "Get testpmd-app pod"
4 | kubernetes.core.k8s_info:
5 | kind: Pod
6 | namespace: "{{ ecd_cnf_namespace }}"
7 | label_selectors:
8 | - example-cnf-type=cnf-app
9 | register: _ecd_cnf_pod
10 |
11 | - name: "Get trex-server pod"
12 | kubernetes.core.k8s_info:
13 | kind: Pod
14 | namespace: "{{ ecd_cnf_namespace }}"
15 | label_selectors:
16 | - example-cnf-type=pkt-gen
17 | register: _ecd_pktgen_pod
18 |
19 | - name: "Retrieve context information from testpmd and trex pods"
20 | ansible.builtin.set_fact:
21 | ecd_cnf_existing_node: "{{ _ecd_cnf_pod.resources[0].spec.nodeName }}"
22 | ecd_cnf_existing_pod: "{{ _ecd_cnf_pod.resources[0].metadata.name }}"
23 | ecd_pktgen_existing_node: "{{ _ecd_pktgen_pod.resources[0].spec.nodeName }}"
24 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/tasks/draining/node-draining.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: "Cordon the node where testpmd-app is running"
4 | ansible.builtin.shell: |
5 | {{ ecd_oc_path }} adm cordon {{ ecd_cnf_existing_node }}
6 |
7 | - name: "Wait until node changes to SchedulingDisabled status"
8 | ansible.builtin.shell: >
9 | set -eo pipefail;
10 | {{ ecd_oc_path }} get nodes --no-headers=true |
11 | grep {{ ecd_cnf_existing_node }}
12 | register: _ecd_nodes
13 | until: '"SchedulingDisabled" in _ecd_nodes.stdout'
14 | retries: 6
15 | delay: 10
16 |
17 | # Not really needing a complete draining, just removing testpmd-app pod
18 | # Running this in asynchronous mode, then we will wait until pod is deleted
19 | - name: "Drain the node to remove testpmd-app pod"
20 | ansible.builtin.shell: |
21 | {{ oc_tool_path }} adm drain {{ ecd_cnf_existing_node }} --pod-selector example-cnf-type=cnf-app --delete-emptydir-data
22 | async: 60
23 | poll: 0
24 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/tasks/validate.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Run validation tasks if the automation deployment was previously launched
4 | when: ecd_run_deployment == 1
5 | block:
6 | - name: "Migrate tasks"
7 | ansible.builtin.include_tasks: validate/migrate.yml
8 |
9 | - name: "Validate tasks"
10 | ansible.builtin.include_tasks: validate/validate.yml
11 | when: ecd_try_running_migration_tests|bool
12 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/templates/grout-cr.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: examplecnf.openshift.io/v1
2 | kind: Grout
3 | metadata:
4 | name: grout
5 | namespace: {{ ecd_cnf_namespace }}
6 | spec:
7 | privileged: false
8 | imagePullPolicy: {{ ecd_image_pull_policy }}
9 | size: 1
10 | networks: {{ ecd_networks_cnfapp }}
11 | runDeployment: {{ ecd_run_deployment }}
12 | {% if ecd_high_perf_runtime is defined and ecd_high_perf_runtime|length %}
13 | runtime_class_name: "{{ ecd_high_perf_runtime }}"
14 | {% endif %}
15 | {% if ecd_numa_aware_topology is defined and ecd_numa_aware_topology | length %}
16 | numa_aware_topology: "{{ ecd_numa_aware_topology }}"
17 | {% endif %}
18 |
--------------------------------------------------------------------------------
/roles/example_cnf_deploy/templates/testpmd-cr.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: examplecnf.openshift.io/v1
2 | kind: TestPMD
3 | metadata:
4 | name: testpmd
5 | namespace: {{ ecd_cnf_namespace }}
6 | spec:
7 | privileged: false
8 | imagePullPolicy: {{ ecd_image_pull_policy }}
9 | ethpeerMaclist: {{ ecd_trex_mac_list }}
10 | size: 1
11 | networks: {{ ecd_networks_cnfapp }}
12 | terminationGracePeriodSeconds: {{ ecd_termination_grace_period_seconds }}
13 | reducedMode: {{ ecd_testpmd_reduced_mode }}
14 | runDeployment: {{ ecd_run_deployment }}
15 | {% if ecd_high_perf_runtime is defined and ecd_high_perf_runtime|length %}
16 | runtime_class_name: "{{ ecd_high_perf_runtime }}"
17 | {% endif %}
18 | {% if ecd_numa_aware_topology is defined and ecd_numa_aware_topology | length %}
19 | numa_aware_topology: "{{ ecd_numa_aware_topology }}"
20 | {% endif %}
21 |
--------------------------------------------------------------------------------
/roles/extract_openshift_installer/README.md:
--------------------------------------------------------------------------------
1 | # extract_openshift_installer
2 |
3 | Extracts openshift_installer binary from the release image.
--------------------------------------------------------------------------------
/roles/extract_openshift_installer/defaults/main.yml:
--------------------------------------------------------------------------------
1 | arch: x86_64
2 | version_filter: "[?(openshift_version == '{{ openshift_version }}') && (cpu_architecture == '{{ arch }}')]"
3 | release_image: "{{ (assisted_installer_release_images | json_query(version_filter))[0].url }}"
4 | extract_dest_path: /tmp/wip/extract/
5 | pull_secret_file: "{{ extract_dest_path }}/pull_secret.txt"
6 | openshift_installer_path: "{{ extract_dest_path }}/openshift-install"
7 |
--------------------------------------------------------------------------------
/roles/fbc_catalog/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | fbc_opm_args: ""
3 | fbc_expire: true
4 | fbc_expire_time: 5h
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/fbc_catalog/templates/catalog.Dockerfile.j2:
--------------------------------------------------------------------------------
1 | FROM quay.io/operator-framework/opm:latest
2 | ENTRYPOINT ["/bin/opm"]
3 | CMD ["serve", "/configs"]
4 | ADD catalog /configs
5 | LABEL operators.operatorframework.io.index.configs.v1=/configs
6 | LABEL maintainer="Telco DCI team"{% if fbc_expire %} quay.expires-after={{ fbc_expire_time }}{% endif %}
7 |
--------------------------------------------------------------------------------
/roles/generate_agent_iso/defaults/main.yml:
--------------------------------------------------------------------------------
1 | gai_path_var: "/sbin:/usr/sbin:/usr/local/bin/"
2 | gai_generated_dir: "{{ gai_repo_root_path }}/generated"
3 | gai_manifests_dir: "{{ gai_generated_dir }}/{{ gai_cluster_name }}"
4 | gai_iso_download_dest_path: "/opt/http_store/data"
5 | gai_arch: x86_64
6 | gai_remote_http_src: false
7 | gai_http_delegate_host: http_store
8 |
--------------------------------------------------------------------------------
/roles/generate_manifests/tasks/manifest.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for manifests
3 |
4 | - name: Fail if manifest is not a mapping
5 | ansible.builtin.fail:
6 | msg: "Manifest item must be a dict with either template or file as the key"
7 | when: item is not mapping and (("template" in item) or ("file" in item))
8 |
9 | - name: Load manifest
10 | ansible.builtin.set_fact:
11 | manifest_content: "{{ lookup('template', item.template) }}"
12 | manifest_name: "{{ item.template.rsplit('.j2', 1)[0] | basename }}"
13 | when: ("template" in item)
14 |
15 |
16 | - name: Load manifest
17 | ansible.builtin.set_fact:
18 | manifest_content: "{{ lookup('file', item.file) }}"
19 | manifest_name: "{{ item.file | basename }}"
20 | when: ("file" in item)
21 |
22 | - name: Save Manifest
23 | ansible.builtin.copy:
24 | content: "{{ manifest_content }}"
25 | dest: "{{ extra_manifest_dir }}/{{ manifest_name }}"
26 | mode: "0644"
27 |
--------------------------------------------------------------------------------
/roles/generate_manifests/templates/registry-config.j2:
--------------------------------------------------------------------------------
1 | imageContentSources:
2 | - source: quay.io/openshift-release-dev/ocp-release
3 | mirrors:
4 | - {{ mirror_registry }}/{{ registry_repository }}
5 | - source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
6 | mirrors:
7 | - {{ mirror_registry }}/{{ registry_repository }}
8 | - source: registry.ci.openshift.org/ocp/release
9 | mirrors:
10 | - {{ mirror_registry }}/{{ registry_repository }}
11 | - source: quay.io/edge-infrastructure
12 | mirrors:
13 | - {{ mirror_registry }}/ocpmetal
14 | - source: quay.io/edge-infrastructure
15 | mirrors:
16 | - {{ mirror_registry }}/edge-infrastructure
17 | additionalTrustBundle: |
18 | {{ mirror_certificate | indent(2) }}
19 |
--------------------------------------------------------------------------------
/roles/generate_ssh_key_pair/README.md:
--------------------------------------------------------------------------------
1 | # generate_ssh_key_pair
2 |
3 | Generates an ssh key pair
--------------------------------------------------------------------------------
/roles/generate_ssh_key_pair/defaults/main.yml:
--------------------------------------------------------------------------------
1 | key_pair_dir: /tmp/ssh_key_pair
2 | private_key_name: "{{ cluster_name }}"
3 | public_key_name: "{{ cluster_name }}.pub"
4 | path_base_dir: /home/redhat
5 | ssh_key_dest_base_dir: "{{ path_base_dir }}"
6 | ssh_key_dest_dir: "{{ ssh_key_dest_base_dir }}/ssh_keys/"
7 | fetched_dest: "{{ repo_root_path }}/fetched"
8 |
--------------------------------------------------------------------------------
/roles/get_logs_from_namespace/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | get_logs_ns: default
3 | get_logs_folder: "{{ job_logs.path }}"
4 |
--------------------------------------------------------------------------------
/roles/gitops_configure_repo/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | gcr_ssh_key_path: "{{ lookup('env', 'HOME') }}/.ssh/id_rsa"
3 | gcr_argo_cd_known_host_cm: "argocd-ssh-known-hosts-cm"
4 | gcr_private_repo_secret: "private-repo"
5 |
--------------------------------------------------------------------------------
/roles/gitops_configure_repo/templates/http_private_repo.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | type: Opaque
4 | metadata:
5 | labels:
6 | argocd.argoproj.io/secret-type: repository
7 | name: {{ gcr_private_repo_secret }}
8 | namespace: openshift-gitops
9 | stringData:
10 | # Set project to restrict the access to these credentials
11 | # project: default
12 | type: git
13 | url: {{ gcr_ztp_gitops_repo }}
14 | username: {{ gcr_repo_username }}
15 | password: {{ gcr_repo_password }}
16 |
--------------------------------------------------------------------------------
/roles/gitops_configure_repo/templates/ssh_private_repo.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Secret
3 | metadata:
4 | name: {{ gcr_private_repo_secret }}
5 | namespace: openshift-gitops
6 | labels:
7 | argocd.argoproj.io/secret-type: repository
8 | stringData:
9 | type: git
10 | url: {{ gcr_ztp_gitops_repo }}
11 | sshPrivateKey: |
12 | {{ lookup('file', gcr_ssh_key_path) | indent(4)}}
13 |
--------------------------------------------------------------------------------
/roles/hco_setup/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hs_deploy: true
3 | hs_test_vm: false
4 | hs_ns: openshift-cnv
5 | hs_vm_file: vm.yaml.j2
6 | hs_test_vm_image: quay.io/kubevirt/cirros-container-disk-demo:v0.59.2
7 | hs_retries: 60
8 | hs_kubevirt_api_version: v1
9 | ...
10 |
--------------------------------------------------------------------------------
/roles/hco_setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate requirements
3 | ansible.builtin.include_tasks: validation.yml
4 |
5 | - name: Deploy Hyperconverged Operator
6 | ansible.builtin.include_tasks: deploy-hco.yml
7 | when:
8 | - hs_deploy | bool
9 | tags:
10 | - install
11 |
12 | - name: Deploy test Virtual Machine
13 | ansible.builtin.include_tasks: test-deploy-vm.yml
14 | when:
15 | - hs_test_vm | bool
16 | tags:
17 | - install
18 |
--------------------------------------------------------------------------------
/roles/hco_setup/tasks/validation.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check if the CNV CRD is present
3 | kubernetes.core.k8s_info:
4 | kind: CustomResourceDefinition
5 | name: kubevirts.kubevirt.io
6 | register: kvirt_crd
7 | no_log: true
8 |
9 | - name: Ensure CNV CRD CRD is present
10 | ansible.builtin.assert:
11 | that:
12 | - kvirt_crd.resources | list | count > 0
13 | fail_msg: "Container-native Virtualization CRD is not present"
14 | ...
15 |
--------------------------------------------------------------------------------
/roles/hostedbm/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for hostedbm
3 | hostedbm_agent_ns: "{{ hostedbm_cluster_name }}-agent"
4 | hostedbm_availability_policy: SingleReplica
5 | hostedbm_cluster_ns: clusters
6 | hostedbm_infraenv_name: "{{ hostedbm_cluster_name }}-infraenv"
7 | hostedbm_location_label: ""
8 | hostedbm_node_pool_replicas: 1
9 | hostedbm_no_log: false
10 | hostedbm_out_dir: "{{ hostedbm_working_dir }}/out"
11 | hostedbm_release_image: quay.io/openshift-release-dev/ocp-release:4.15.11-multi
12 | hostedbm_storage_class: lvms-vg1
13 | hostedbm_working_root_dir: "/tmp/hostedcluster"
14 | hostedbm_working_dir: "{{ hostedbm_working_root_dir }}/{{ hostedbm_cluster_name }}"
15 | hostedbm_inject_dns: false
16 | # hostedbm_inject_dns_nameserver:
17 | # hostedbm_bm_cpo_override_image:
18 |
--------------------------------------------------------------------------------
/roles/hostedbm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for hostedbm
3 |
4 | - name: Run Pre validations
5 | ansible.builtin.include_tasks: validations.yml
6 |
7 | - name: Boot hosts with a discovery image, ready to be provisioned
8 | ansible.builtin.include_tasks: create-agent.yml
9 |
10 | - name: Download CLIs
11 | ansible.builtin.include_tasks: download-cli.yml
12 |
13 | - name: Hypershift cluster creation
14 | ansible.builtin.include_tasks: create-cluster.yml
15 |
16 | - name: Configure Metallb and Create Ingress service on Guest cluster
17 | ansible.builtin.include_tasks: post-deploy-step-on-guest-cluster.yml
18 |
--------------------------------------------------------------------------------
/roles/hostedbm/tasks/validations.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure required variables are defined
3 | ansible.builtin.assert:
4 | that:
5 | - "{{ item }} is defined"
6 | fail_msg: "The variable {{ item }} is not defined. Please define it to proceed."
7 | loop:
8 | - hostedbm_cluster_name
9 | - hostedbm_cluster_base_domain
10 | - hostedbm_guest_ingress_ip
11 | - hostedbm_kubeconfig_file
12 |
13 | - name: Manage directories for deployment
14 | ansible.builtin.file:
15 | path: "{{ item.path }}"
16 | mode: "{{ item.mode | default(omit) }}"
17 | state: "{{ item.state }}"
18 | loop:
19 | - path: "{{ hostedbm_working_dir }}"
20 | state: absent
21 | - path: "{{ hostedbm_working_dir }}"
22 | state: directory
23 | mode: "0755"
24 | - path: "{{ hostedbm_out_dir }}"
25 | state: directory
26 | mode: "0755"
27 |
--------------------------------------------------------------------------------
/roles/include_components/tasks/track_rpm.yml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | - name: Create package component
4 | ansible.legacy.dci_component:
5 | display_name: "{{ item }} {{ ansible_facts.packages[item][0].version }}-{{ ansible_facts.packages[item][0].release }}"
6 | version: "{{ ansible_facts.packages[item][0].version }}-{{ ansible_facts.packages[item][0].release }}"
7 | team_id: "{{ job_info['job']['team_id'] }}"
8 | topic_id: "{{ job_info['job']['topic_id'] }}"
9 | type: rpm
10 | state: present
11 | register: package_component
12 |
13 | - name: Attach package component to the job
14 | ansible.legacy.dci_job_component:
15 | component_id: " {{ package_component.component.id }} "
16 | job_id: " {{ job_id }} "
17 | register: job_component_result
18 | until: job_component_result is not failed
19 | retries: 5
20 | delay: 20
21 | when:
22 | - "'component' in package_component"
23 | - "'id' in package_component.component"
24 | ...
25 |
--------------------------------------------------------------------------------
/roles/insert_dns_records/files/nm-dnsmasq.conf:
--------------------------------------------------------------------------------
1 | [main]
2 | dns=dnsmasq
3 |
--------------------------------------------------------------------------------
/roles/insert_dns_records/handlers/main.yml:
--------------------------------------------------------------------------------
1 | - name: "Restart {{ dns_service_name }}"
2 | ansible.builtin.service:
3 | name: "{{ dns_service_name }}"
4 | state: restarted
5 | async: 45
6 | poll: 5
7 | listen: restart_service
8 | become: true
9 |
--------------------------------------------------------------------------------
/roles/insert_dns_records/tasks/create_host_entry.yml:
--------------------------------------------------------------------------------
1 | - name: Make sure ansible_fqdn is populated if required
2 | setup:
3 | delegate_to: "{{ entry_name }}"
4 | delegate_facts: true
5 | when:
6 | - entry_extra_check | default(true)
7 | - hostvars[entry_name]['ansible_fqdn'] is not defined
8 |
9 | - name: "Populate dns entry for {{ entry_name }}"
10 | set_fact:
11 | other_host_dns_records: "{{ (other_host_dns_records | default({})) | combine(
12 | {
13 | entry_address : {
14 | 'name': (other_host_dns_records[entry_address]['name'] | default([])) + [entry_name],
15 | 'address': entry_address,
16 | 'ip': hostvars[entry_name][host_ip_keyword],
17 | }
18 | }
19 | ) }}"
20 |
--------------------------------------------------------------------------------
/roles/insert_dns_records/tasks/dnsmasq.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create dns file
3 | ansible.builtin.template:
4 | src: openshift-cluster.conf.j2
5 | dest: "/etc/dnsmasq.d/{{ dns_entries_file_name }}"
6 | mode: "0644"
7 | notify: restart_service
8 |
9 | - name: Start dnsmasq
10 | ansible.builtin.service:
11 | name: dnsmasq
12 | state: started
13 | enabled: true
14 |
--------------------------------------------------------------------------------
/roles/insert_dns_records/tasks/network-manager.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup network manager to run dnsmasq
3 | ansible.builtin.copy:
4 | src: nm-dnsmasq.conf
5 | dest: /etc/NetworkManager/conf.d/dnsmasq.conf
6 | mode: "0644"
7 |
8 | - name: Create dnsmasq openshift-cluster config file
9 | ansible.builtin.template:
10 | src: openshift-cluster.conf.j2
11 | dest: "/etc/NetworkManager/dnsmasq.d/{{ dns_entries_file_name }}"
12 | mode: "0644"
13 | notify: restart_service
14 |
15 | - name: Start NetworkManager
16 | ansible.builtin.service:
17 | name: NetworkManager
18 | state: started
19 | enabled: true
20 |
21 | - name: Reload NetworkManager
22 | ansible.builtin.service:
23 | name: NetworkManager
24 | state: reloaded
25 |
--------------------------------------------------------------------------------
/roles/insert_dns_records/templates/nm-dnsmasq.conf.j2:
--------------------------------------------------------------------------------
1 | [main]
2 | dns=dnsmasq
3 |
--------------------------------------------------------------------------------
/roles/install_operator_gitops/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 | metallb: true
3 | database_storage_request: 20Gi
4 | filesystem_storage_request: 20Gi
5 | image_storage_request: 100Gi
6 | rhcos_images: null
7 |
8 | iog_configure_only: false
9 | iog_oc_tool_path: "{{ oc_tool_path | default('/usr/local/bin/oc') }}"
10 |
--------------------------------------------------------------------------------
/roles/installer/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/installer/README.md
--------------------------------------------------------------------------------
/roles/installer/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for installer
3 | cache_enabled: false
4 | provision_cache_store: "{{ ansible_env.HOME }}/rhcos_image_cache/"
5 | registry_port_container: 5000
6 | registry_port: "{{ registry_port_container }}"
7 | registry_dir: /opt/registry
8 | install_config_appends_file: install-config-appends.yml
9 | registry_auth_file: registry-auths.json
10 | disconnected_registry_user: dummy
11 | disconnected_registry_password: dummy
12 | webserver_cache_image: "quay.io/fedora/httpd-24:latest"
13 | webserver_caching_port: "{{ webserver_caching_port_container }}"
14 | webserver_caching_port_container: 8080
15 | registry_creation: false
16 | url_passed: false
17 | httpd_cache_files: "{{ provision_cache_store }}httpd/"
18 |
--------------------------------------------------------------------------------
/roles/installer/files/customize_filesystem/master/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/installer/files/customize_filesystem/master/.gitkeep
--------------------------------------------------------------------------------
/roles/installer/files/customize_filesystem/worker:
--------------------------------------------------------------------------------
1 | master
--------------------------------------------------------------------------------
/roles/installer/files/ipv6-dual-stack-no-upgrade.yml:
--------------------------------------------------------------------------------
1 | apiVersion: config.openshift.io/v1
2 | kind: FeatureGate
3 | metadata:
4 | name: cluster
5 | spec:
6 | featureSet: IPv6DualStackNoUpgrade
7 |
--------------------------------------------------------------------------------
/roles/installer/files/manifests/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/installer/files/manifests/.gitkeep
--------------------------------------------------------------------------------
/roles/installer/files/openshift/.gitkeep:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/installer/files/openshift/.gitkeep
--------------------------------------------------------------------------------
/roles/installer/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for installer
3 |
--------------------------------------------------------------------------------
/roles/installer/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: Roger Lopez
3 | description: The installer role assists in installing Red Hat OpenShift
4 | company: Red Hat, Inc.
5 | license: Apache License, Version 2.0
6 | min_ansible_version: 2.9
7 | galaxy_tags: []
8 | dependencies: []
9 |
--------------------------------------------------------------------------------
/roles/installer/tasks/30_create_metal3.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Generate metal3-config.yaml
3 | template:
4 | src: metal3-config.j2
5 | dest: "{{ dir }}/metal3-config.yaml"
6 | owner: "{{ ansible_user }}"
7 | group: "{{ ansible_user }}"
8 | mode: '0644'
9 | tags: metal3config
10 |
--------------------------------------------------------------------------------
/roles/installer/tasks/40_create_manifest.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create OpenShift Manifest
3 | shell: |
4 | /usr/local/bin/{{ installer_cmd }} --dir {{ dir }} create manifests
5 | tags: manifests
6 |
7 | - name: Ensure the manifests dir is owned by {{ ansible_user }}
8 | file:
9 | path: "{{ item }}"
10 | state: directory
11 | recurse: true
12 | owner: "{{ ansible_user }}"
13 | group: "{{ ansible_user }}"
14 | mode: '0755'
15 | with_items:
16 | - "{{ dir }}/openshift"
17 | - "{{ dir }}/manifests"
18 | tags: manifests
19 |
20 | - name: Copy the metal3-config.yaml to {{ dir }}/openshift directory
21 | copy:
22 | src: "{{ dir }}/metal3-config.yaml"
23 | dest: "{{ dir }}/openshift/98_metal3-config.yaml"
24 | owner: "{{ ansible_user }}"
25 | group: "{{ ansible_user }}"
26 | mode: '0644'
27 | remote_src: true
28 | when: release_version is ansible.builtin.version('4.3', '<=')
29 | tags: manifests
30 |
--------------------------------------------------------------------------------
/roles/installer/tasks/70_cleanup_sub_man_registration.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Unregister host via subscription-manager
3 | redhat_subscription:
4 | activationkey: "{{ activation_key }}"
5 | org_id: "{{ org_id }}"
6 | state: absent
7 | ignore_errors: true
8 | tags: unregister
9 |
--------------------------------------------------------------------------------
/roles/installer/templates/chrony.conf.j2:
--------------------------------------------------------------------------------
1 | # This file is managed by the machine config operator
2 | {% for server in clock_servers %}
3 | server {{ server }} iburst
4 | {% endfor %}
5 | stratumweight 0
6 | driftfile /var/lib/chrony/drift
7 | rtcsync
8 | makestep 10 3
9 | bindcmdaddress 127.0.0.1
10 | bindcmdaddress ::1
11 | keyfile /etc/chrony.keys
12 | commandkey 1
13 | generatecommandkey
14 | noclientlog
15 | logchange 0.5
16 | logdir /var/log/chrony
17 |
--------------------------------------------------------------------------------
/roles/installer/templates/install-config-appends.j2:
--------------------------------------------------------------------------------
1 | additionalTrustBundle: |
2 | {{ trustbundle | regex_replace('\n', '\n ') }}
3 | {% if release_version is version('4.14', '<') %}
4 | imageContentSources:
5 | {% else %}
6 | imageDigestSources:
7 | {% endif %}
8 | - mirrors:
9 | - {{ local_registry }}/{{ local_repo }}
10 | source: quay.io/openshift-release-dev/ocp-v4.0-art-dev
11 | - mirrors:
12 | - {{ local_registry }}/{{ local_repo }}
13 | source: registry.svc.ci.openshift.org/ocp/release
14 | - mirrors:
15 | - {{ local_registry }}/{{ local_repo }}
16 | source: quay.io/openshift-release-dev/ocp-release
17 |
--------------------------------------------------------------------------------
/roles/installer/templates/metal3-config.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: ConfigMap
3 | metadata:
4 | name: metal3-config
5 | namespace: openshift-machine-api
6 | data:
7 | cache_url: ''
8 | deploy_kernel_url: http://{{ prov_ip|ipwrap }}:6180/images/ironic-python-agent.kernel
9 | deploy_ramdisk_url: http://{{ prov_ip|ipwrap }}:6180/images/ironic-python-agent.initramfs
10 | dhcp_range: {{ prov_dhcp_range | default('172.22.0.10,172.22.0.100') }}
11 | http_port: "6180"
12 | ironic_endpoint: http://{{ prov_ip|ipwrap }}:6385/v1/
13 | ironic_inspector_endpoint: http://{{ prov_ip|ipwrap }}:5050/v1/
14 | provisioning_interface: {{ masters_prov_nic }}
15 | provisioning_ip: {{ prov_ip }}/24
16 | {% if clusterosimage is defined and clusterosimage|length %}
17 | rhcos_image_url: {{ clusterosimage }}
18 | {% else %}
19 | rhcos_image_url: {{ rhcos_path }}{{ rhcos_uri }}
20 | {% endif %}
21 |
--------------------------------------------------------------------------------
/roles/installer/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
--------------------------------------------------------------------------------
/roles/installer/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - installer
6 |
--------------------------------------------------------------------------------
/roles/jenkins_job_launcher/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | jjl_delay_seconds: 60
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/junit2json/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | ---
3 | # defaults file for role redhatci.ocp.junit2json
4 |
5 | # safeguard global variable:
6 | global_json_reports_list: []
7 |
8 | # defaults:
9 | junit2_input_merged_report: 'merged.junit.xml'
10 | junit2_output_merged_report: 'merged.junit.json'
11 | junit2_do_merge: true
12 | junit2_out_str: true
13 |
--------------------------------------------------------------------------------
/roles/junit2json/meta/main.yml:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | ---
3 | galaxy_info:
4 | author: Max Kovgan
5 | description: Converts XML junit reports passed or in passed directory into single or fragmented JSON report file(s)
6 | company: Red Hat, Inc.
7 | issue_tracker_url: https://github.com/redhatci/ansible-collection-redhatci-ocp/issues
8 | license: Apache-2.0
9 | min_ansible_version: "2.9"
10 | galaxy_tags: []
11 | dependencies: []
12 |
--------------------------------------------------------------------------------
/roles/junit2json/tasks/validate-dependency.yml:
--------------------------------------------------------------------------------
1 | # SPDX-License-Identifier: Apache-2.0
2 | ---
3 | # tasks file validate.dependency.yml for role redhatci.ocp.junit2json
4 |
5 | - name: Check if python dependency is installed - {{ item.package }}
6 | ansible.builtin.command:
7 | cmd: |
8 | command -v {{ item.executable }}
9 | register: _junit2json_dep_check_command
10 | changed_when:
11 | - true
12 |
13 | - name: Respond to the case if the dependency is not installed
14 | ansible.builtin.assert:
15 | that:
16 | - _junit2json_dep_check_command.rc == 0
17 | fail_msg: |
18 | The script '{{ item.executable }}' from Python package '{{ item.package }}' is not on PATH.
19 | Install the collection's dependencies by using ansible-galaxy or install its dependencies manually by
20 | using the file 'meta/requirements.txt'.
21 |
--------------------------------------------------------------------------------
/roles/junit2json/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost ansible_connection=local ansible_python_interpreter="python3"
2 |
3 |
--------------------------------------------------------------------------------
/roles/k8s_best_practices_certsuite/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Run k8s_best_practices_certsuite stages
3 | block:
4 | - name: Execute k8s_best_practices_certsuite pre-run stage
5 | ansible.builtin.include_tasks: pre-run.yml
6 |
7 | - name: Execute k8s_best_practices_certsuite tests stage
8 | ansible.builtin.include_tasks: tests.yml
9 |
10 | always:
11 | - name: Execute k8s_best_practices_certsuite teardown stage
12 | ansible.builtin.include_tasks: teardown.yml
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/k8s_best_practices_certsuite/tasks/prepare_json_authentication.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Copy authentication config
3 | ansible.builtin.copy:
4 | src: "{{ kbpc_partner_creds }}"
5 | dest: "{{ kbpc_certsuite_dir }}/config_files/config.json"
6 | mode: "0750"
7 |
8 | - name: Update kbpc_dockercfg_path variable
9 | ansible.builtin.set_fact:
10 | kbpc_dockercfg_path: "{{ kbpc_certsuite_dir }}/config_files/config.json"
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/k8s_best_practices_certsuite/templates/feedback.js.j2:
--------------------------------------------------------------------------------
1 | var feedback={
2 | {% for key, value in kbpc_feedback.items() %}
3 | "source-telco-{{ key }}": "{{ value }}",
4 | "source-nontelco-{{ key }}": "{{ value }}",
5 | "source-faredge-{{ key }}": "{{ value }}",
6 | "source-extended-{{ key }}": "{{ value }}",
7 | "source-all-{{ key }}": "{{ value }}"{% if not loop.last %},{% endif %}
8 |
9 | {% endfor %}
10 | }
11 |
--------------------------------------------------------------------------------
/roles/kvirt_vm/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | kvirt_vm_api_version: kubevirt.io/v1
3 | kvirt_vm_namespace: default
4 | kvirt_vm_memory: 8Gi
5 | kvirt_vm_disk_mode: ReadWriteOnce
6 | kvirt_vm_disk_size: 60Gi
7 | kvirt_vm_os: rhcos
8 | kvirt_vm_cpu_cores: 8
9 | kvirt_vm_cpu_sockets: 1
10 | kvirt_vm_cpu_threads: 1
11 | kvirt_vm_network_interface_multiqueue: true
12 | kvirt_vm_running: false
13 | kvirt_vm_force: false
14 | kvirt_vm_interfaces:
15 | - masquerade: {}
16 | model: virtio
17 | name: default
18 | kvirt_vm_networks:
19 | - name: default
20 | pod: {}
21 | ...
22 |
--------------------------------------------------------------------------------
/roles/kvirt_vm/tasks/delete-vm.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Delete VM {{ vm.name }}"
3 | kubernetes.core.k8s:
4 | kind: VirtualMachine
5 | api_version: "{{ vm.api_version | default(kvirt_vm_api_version) }}"
6 | name: "{{ vm.name }}"
7 | namespace: "{{ vm.namespace | default(kvirt_vm_namespace) }}"
8 | state: absent
9 |
10 | - name: Wait for VirtualMachine to be deleted
11 | kubernetes.core.k8s_info:
12 | api: "{{ vm.api_version | default(kvirt_vm_api_version) }}"
13 | kind: VirtualMachine
14 | name: "{{ vm.name }}"
15 | namespace: "{{ vm.namespace | default(kvirt_vm_namespace) }}"
16 | register: info
17 | until:
18 | - info.resources is defined
19 | - info.resources | length == 0
20 | retries: 60
21 | delay: 5
22 | ...
23 |
--------------------------------------------------------------------------------
/roles/kvirt_vm/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Run validations
3 | ansible.builtin.include_tasks: validations.yml
4 |
5 | - name: Delete VM
6 | ansible.builtin.include_tasks: delete-vm.yml
7 | when: vm.force | default(kvirt_vm_force) | bool
8 | loop: "{{ vm_configs }}"
9 | loop_control:
10 | loop_var: vm
11 | label: "{{ vm.name }}"
12 |
13 | - name: Create the VM
14 | ansible.builtin.include_tasks: create-vm.yml
15 | loop: "{{ vm_configs }}"
16 | loop_control:
17 | loop_var: vm
18 | label: "{{ vm.name }}"
19 | ...
20 |
--------------------------------------------------------------------------------
/roles/label_nodes/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Label nodes
3 | kubernetes.core.k8s:
4 | definition: "{{ lookup('template', 'node_label.yml') | from_yaml }}"
5 | loop: "{{ hosts_list }}"
6 | loop_control:
7 | loop_var: inv_host
8 | when:
9 | - host_item[inv_host]['labels'] is defined
10 | ...
11 |
--------------------------------------------------------------------------------
/roles/label_nodes/templates/node_label.yml:
--------------------------------------------------------------------------------
1 | apiversion: v1
2 | kind: Node
3 | metadata:
4 | name: {{ host_item[inv_host].name }}
5 | labels:
6 | {% for key, value in host_item[inv_host].labels.items() %}
7 | {{ key }}: "{{ value }}"
8 | {% endfor %}
9 |
--------------------------------------------------------------------------------
/roles/label_nodes/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | hosts_list: []
3 | host_item: "{{ hostvars }}"
4 | ...
--------------------------------------------------------------------------------
/roles/manage_firewalld_zone/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mfz_state: present
3 | mfz_masquerade: false
4 | mfz_ifaces: []
5 | mfz_services: []
6 | mfz_ports: []
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/manage_firewalld_zone/meta/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | galaxy_info:
3 | author: Jorge A Gallegos
4 | description: Manages a firewalld zone
5 | company: Red Hat, Inc.
6 | license: Apache License, Version 2.0
7 | min_ansible_version: 2.9
8 | galaxy_tags: []
9 |
--------------------------------------------------------------------------------
/roles/metallb_setup/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mlb_ocp_supported: 4.10
3 | mlb_namespace: metallb-system
4 | mlb_bfd_profile: bfd-fastest
5 | mlb_ipv4_enabled: true
6 | mlb_ipv6_enabled: false
7 | mlb_action: install
8 | mlb_wait_delay: 10
9 | mlb_wait_retries: 18
10 | ...
11 |
--------------------------------------------------------------------------------
/roles/metallb_setup/tasks/clean-resources.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Delete MetalLB BGP Peers"
3 | kubernetes.core.k8s:
4 | state: absent
5 | api: metallb.io/v1beta2
6 | kind: BGPPeer
7 | name: "{{ peer.name }}"
8 | namespace: "{{ mlb_namespace }}"
9 | wait: true
10 | wait_condition:
11 | status: "False"
12 | wait_sleep: 2
13 | wait_timeout: 300
14 | loop: "{{ mlb_bgp_peers }}"
15 | loop_control:
16 | loop_var: peer
17 | when:
18 | - mlb_bgp_peers is defined
19 |
20 | - name: "Delete MetalLB resources"
21 | kubernetes.core.k8s:
22 | state: absent
23 | api: "{{ resource.apiVer }}"
24 | kind: "{{ resource.kind }}"
25 | name: "{{ resource.name }}"
26 | namespace: "{{ mlb_namespace }}"
27 | wait: true
28 | wait_condition:
29 | status: "False"
30 | wait_sleep: 2
31 | wait_timeout: 300
32 | ignore_errors: true
33 | loop: "{{ mlb_resources }}"
34 | loop_control:
35 | loop_var: resource
36 | ...
37 |
--------------------------------------------------------------------------------
/roles/metallb_setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Configure MetalLB resources"
3 | when:
4 | - mlb_action == 'install'
5 | block:
6 | - name: "Validate required variables"
7 | ansible.builtin.include_tasks: pre-requisites.yml
8 |
9 | - name: "Set up MetalLB resources"
10 | ansible.builtin.include_tasks: setup-metallb.yml
11 |
12 | - name: "Cleanup MetalLB resources"
13 | ansible.builtin.include_tasks: clean-resources.yml
14 | when:
15 | - mlb_action == 'cleanup'
16 | ...
17 |
--------------------------------------------------------------------------------
/roles/metallb_setup/templates/bgppeers.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: metallb.io/v1beta2
2 | kind: BGPPeer
3 | metadata:
4 | name: "{{ peer.name }}"
5 | namespace: "{{ mlb_namespace }}"
6 | spec:
7 | peerAddress: "{{ peer.address }}"
8 | peerASN: {{ peer.remote_asn }}
9 | myASN: {{ peer.local_asn }}
10 | bfdProfile: "{{ mlb_bfd_profile }}"
11 |
--------------------------------------------------------------------------------
/roles/metallb_setup/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mlb_resources:
3 | - kind: 'MetalLB'
4 | name: 'metallb'
5 | apiVer: 'metallb.io/v1beta1'
6 | - kind: 'BFDProfile'
7 | name: '{{ mlb_bfd_profile }}'
8 | apiVer: 'metallb.io/v1beta1'
9 | - kind: 'BGPAdvertisement'
10 | name: '{{ mlb_setup_name }}'
11 | apiVer: 'metallb.io/v1beta1'
12 | - kind: 'IPAddressPool'
13 | name: '{{ mlb_setup_name }}'
14 | apiVer: 'metallb.io/v1beta1'
15 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | microshift_generate_iso_action: "install"
3 | microshift_generate_iso_action_allowed: ['install']
4 | microshift_generate_iso_folder: "{{ ansible_user_dir }}/microshift"
5 | microshift_generate_iso_folder_blueprints_dir: "{{ microshift_generate_iso_folder }}/blueprints"
6 | microshift_generate_iso_ssh_key: "{{ lookup('file', lookup('env', 'HOME') + '/.ssh/id_rsa.pub') }}"
7 | microshift_generate_iso_fast_datapath_repo_url: "https://cdn.redhat.com/content/dist/layered/rhel{{ ansible_distribution_major_version }}/{{ ansible_architecture }}/fast-datapath/os"
8 | microshift_generate_iso_localhost_folder: "/tmp"
9 | microshift_generate_iso_microshift_iso_name: "microshift.iso"
10 | microshift_generate_iso_kickstart_post: []
11 | microshift_generate_iso_additional_blueprints: {}
12 | microshift_generate_iso_customization_fragment: ""
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/tasks/create_dirs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate Directory
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: directory
6 | mode: '0755'
7 | loop:
8 | - "{{ microshift_generate_iso_folder }}"
9 | - "{{ microshift_generate_iso_folder_blueprints_dir }}"
10 | ...
11 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.include_tasks: install_packages.yml # noqa name[missing]
3 | - ansible.builtin.include_tasks: create_dirs.yml # noqa name[missing]
4 | - ansible.builtin.include_tasks: register_source.yml # noqa name[missing]
5 | - ansible.builtin.include_tasks: creating_rhel_edge_image.yml # noqa name[missing]
6 | - ansible.builtin.include_tasks: creating_iso.yml # noqa name[missing]
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/tasks/install_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup Repositories
3 | become: true
4 | community.general.rhsm_repository:
5 | state: enabled
6 | purge: true
7 | name:
8 | - rhel-{{ ansible_distribution_major_version }}-for-x86_64-appstream-rpms
9 | - rhel-{{ ansible_distribution_major_version }}-for-x86_64-baseos-rpms
10 |
11 | - name: Install Packages needed by the composer-cli
12 | become: true
13 | ansible.builtin.package:
14 | state: latest # noqa package-latest
15 | name:
16 | - osbuild-composer
17 | - composer-cli
18 | - cockpit-composer
19 | - bash-completion
20 | - firewalld
21 | - podman
22 | - lorax
23 | - rsync
24 |
25 | - name: Enable Cockpit/Composer/Firewalld
26 | become: true
27 | ansible.builtin.systemd:
28 | state: started
29 | enabled: true
30 | name: "{{ item }}"
31 | loop:
32 | - osbuild-composer.socket
33 | - cockpit.socket
34 | ...
35 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - ansible.builtin.include_tasks: "{{ microshift_generate_iso_action }}.yml" # noqa name[missing]
3 | when: microshift_generate_iso_action in microshift_generate_iso_action_allowed
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/templates/blueprint.toml.j2:
--------------------------------------------------------------------------------
1 | id = "{{ _bp_id }}"
2 | name = "{{ _bp_data.name | default(_bp_id) }}"
3 | type = "yum-baseurl"
4 | url = "{{ _bp_data.url }}"
5 | check_gpg = {{ _bp_data.check_gpg | default(False) | ternary("true", "false") }}
6 | check_ssl = {{ _bp_data.check_ssl | default(True) | ternary("true", "false") }}
7 | system = {{ _bp_data.system | default(False) | ternary("true", "false") }}
8 | rhsm = {{ _bp_data.rhsm | default(False) | ternary("true", "false") }}
9 | check_repogpg = {{ _bp_data.check_repogpg | default(False) | ternary("true", "false") }}
10 | gpgkeys = {{ _bp_data.gpgkeys | default([]) | to_json }}
11 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/templates/microshift-installer.toml.j2:
--------------------------------------------------------------------------------
1 | name = "microshift-installer"
2 |
3 | description = ""
4 | version = "0.0.0"
5 | modules = []
6 | groups = []
7 | packages = []
8 |
--------------------------------------------------------------------------------
/roles/microshift_generate_iso/templates/minimal_microshift.toml.j2:
--------------------------------------------------------------------------------
1 | name = "minimal-microshift"
2 |
3 | description = ""
4 | version = "0.0.1"
5 | modules = []
6 | groups = []
7 |
8 | [[packages]]
9 | name = "microshift"
10 | version = "{{ microshift_generate_iso_microshift_version | default('*') }}"
11 |
12 | [customizations.services]
13 | enabled = ["microshift"]
14 |
15 | # BEGIN customizations
16 | {{ microshift_generate_iso_customization_fragment }}
17 | # END customizations
18 |
--------------------------------------------------------------------------------
/roles/mirror_catalog/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mc_is_type: "icsp"
3 | mc_continue_on_error: false
4 | mc_allow_unsecure_registry: true
5 | mc_manifest_only: false
6 | mc_max_components: 3
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/mirror_from_directory/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mfd_delete_workspace: true
3 | mfd_local_registry_path: ""
4 | mfd_max_nested_paths: 2
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/mirror_from_directory/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Validate requirements"
3 | include_tasks: validation.yml
4 |
5 | - name: "Mirror operators"
6 | include_tasks: load-operators.yml
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/mirror_from_directory/tasks/validation.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Validate requirements"
3 | ansible.builtin.assert:
4 | that:
5 | - mfd_local_registry is defined
6 | - mfd_local_registry | length
7 |
8 | - name: "Assert that the source directory exists and has tar files"
9 | ansible.builtin.find:
10 | path: "{{ mfd_operators_dir }}"
11 | recurse: true
12 | patterns: "*.tar"
13 | register: catalog_path
14 | failed_when:
15 | - catalog_path is undefined or
16 | catalog_path.matched == 0
17 | ...
18 |
--------------------------------------------------------------------------------
/roles/mirror_images/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mi_dst_org: ""
3 | mi_random_tag: false
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/mirror_images/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate inputs
3 | ansible.builtin.assert:
4 | that:
5 | - mi_registry is defined
6 | - mi_images is defined
7 | - mi_images | length
8 |
9 | - name: "Mirror images"
10 | ansible.builtin.include_tasks: mirror-images.yml
11 | loop: "{{ mi_images }}"
12 | loop_control:
13 | loop_var: image
14 |
--------------------------------------------------------------------------------
/roles/mirror_ocp_release/tasks/dirs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Create master cache directory"
3 | ansible.builtin.file:
4 | path: "{{ mor_cache_dir }}"
5 | state: directory
6 | owner: "{{ mor_owner }}"
7 | group: "{{ mor_group }}"
8 | mode: "0755"
9 | become: true
10 |
11 | - name: "Create cache directory for release"
12 | ansible.builtin.file:
13 | path: "{{ mor_cache_dir }}/{{ mor_version }}"
14 | owner: "{{ mor_owner }}"
15 | group: "{{ mor_group }}"
16 | mode: "0755"
17 | state: directory
18 | become: true
19 | ...
20 |
--------------------------------------------------------------------------------
/roles/mirror_ocp_release/tasks/files.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Download and refresh channel upgrade graph"
3 | ansible.builtin.get_url:
4 | url: "https://api.openshift.com/api/upgrades_info/v1/graph?channel=fast-{{ mor_base_version }}&arch=amd64"
5 | dest: "{{ mor_cache_dir }}/graph-{{ mor_base_version }}"
6 | owner: "{{ mor_owner }}"
7 | group: "{{ mor_group }}"
8 | mode: "0644"
9 | timeout: 1200
10 | setype: "httpd_sys_content_t"
11 | headers:
12 | Accept: "application/json"
13 | become: true
14 | retries: 10
15 | delay: 20
16 | register: downloaded
17 | until: downloaded is not failed
18 | ...
19 |
--------------------------------------------------------------------------------
/roles/mirror_ocp_release/tasks/images.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Mirror Disk Images for the install type"
3 | ansible.builtin.include_tasks: fetch.yml
4 | vars:
5 | mor_uri: "{{ ocp_release_data['rhcos_images'][item + '_location'] }}"
6 | mor_checksum: "sha256:{{ ocp_release_data['rhcos_images'][item + '_sha256'] }}"
7 | mor_dir: "{{ mor_cache_dir }}"
8 | loop: "{{ mor_images }}"
9 | ...
10 |
--------------------------------------------------------------------------------
/roles/mirror_ocp_release/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | mor_is_types:
3 | - "idms"
4 | - "icsp"
5 |
--------------------------------------------------------------------------------
/roles/monitor_agent_based_installer/README.md:
--------------------------------------------------------------------------------
1 | # monitor_agent_based_installer
2 |
3 | Tracks the progress of the agent based installation via openshift_installer
--------------------------------------------------------------------------------
/roles/monitor_agent_based_installer/defaults/main.yml:
--------------------------------------------------------------------------------
1 | generated_dir: "{{ repo_root_path }}/generated"
2 | manifests_dir: "{{ generated_dir}}/{{ cluster_name }}"
3 |
4 | agent_based_installer_bootstrap_node: "{{ groups['masters'][0] }}"
5 | host_ip_keyword: ansible_host
6 |
7 | # Retry install complete check in case of finding issues with API VIP reachability
8 | mabi_retry_install_complete_check: false
9 |
--------------------------------------------------------------------------------
/roles/mount_discovery_iso_for_pxe/README.md:
--------------------------------------------------------------------------------
1 | # mount_discovery_iso_for_pxe
2 |
3 | Extracts the required artifacts for a PXE boot from a discovery ISO
--------------------------------------------------------------------------------
/roles/mount_discovery_iso_for_pxe/templates/grub.cfg.j2:
--------------------------------------------------------------------------------
1 | set timeout=1
2 | menuentry 'Install Red Hat Enterprise Linux CoreOS' --class fedora --class gnu-linux --class gnu --class os {
3 | linuxefi vmlinuz random.trust_cpu=on ignition.firstboot ignition.platform.id=metal 'coreos.live.rootfs_url=http://{{ HTTPD_PXE_HOST }}/{{ pxe_directory }}/rootfs.img'
4 | initrdefi initrd.img ignition.img
5 | }
6 |
--------------------------------------------------------------------------------
/roles/multibench_run/defaults/main.yml:
--------------------------------------------------------------------------------
1 | nb_iterations: "{{ multibench_sample | default('3') }}"
2 | multibench_tags:
3 | automation: ansible
4 | multibench_script_dir: "/root/crucible-examples/multibench/openshift/example-A"
5 | multibench_script: "run.sh"
6 |
--------------------------------------------------------------------------------
/roles/multibench_run/images/example-A.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/multibench_run/images/example-A.png
--------------------------------------------------------------------------------
/roles/multibench_run/templates/config.ini.j2:
--------------------------------------------------------------------------------
1 | ocp_host={{ ocp_host }}
2 | num_samples={{ multibench_sample | default(3) }}
3 | tags={{ multibench_tags.keys()|zip(multibench_tags.values())|map('join',':')|join(',') }}
4 |
--------------------------------------------------------------------------------
/roles/nfs_external_storage/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | nes_namespace: openshift-nfs-storage
3 | nes_provisioner_image: "registry.k8s.io/sig-storage/nfs-subdir-external-provisioner:v4.0.2"
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/node_prep/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/node_prep/README.md
--------------------------------------------------------------------------------
/roles/node_prep/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for node_prep
3 | activation_key: ""
4 | org_id: ""
5 | network_type: "OVNKubernetes"
6 | firewall: "firewalld"
7 | ipv6_enabled: false
8 | no_proxy_list: ""
9 | http_proxy: ""
10 | https_proxy: ""
11 | ipv4_baremetal: false
12 | ipv4_provisioning: false
13 | dualstack_baremetal: false
14 | dualstack_vips: false
15 | provisioning_bridge: "provisioning"
16 | webserver_url: ""
17 | baremetal_bridge: "baremetal"
18 | disable_bmc_certificate_verification: false
19 | redfish_inspection: true
20 | enable_virtualmedia: false
21 | static_bootstrap_extnet: false
22 |
--------------------------------------------------------------------------------
/roles/node_prep/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # handlers file for node_prep
3 |
--------------------------------------------------------------------------------
/roles/node_prep/meta/main.yml:
--------------------------------------------------------------------------------
1 | galaxy_info:
2 | author: Roger Lopez
3 | description: The node_prep role assists in setup of the provision host.
4 | company: Red Hat, Inc.
5 | license: Apache License, Version 2.0
6 | min_ansible_version: 2.9
7 | galaxy_tags: []
8 | dependencies: []
9 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/20_sub_man_register.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Register host via Activation key
3 | redhat_subscription:
4 | activationkey: "{{ activation_key }}"
5 | org_id: "{{ org_id }}"
6 | state: present
7 | pool: '^(Red Hat Enterprise Linux Server, Standard (8 sockets) (Unlimited guests))$'
8 | force_register: true
9 | ignore_errors: true
10 | become: true
11 | when:
12 | - activation_key != ""
13 | - org_id != ""
14 | tags: subscription
15 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/30_req_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install packages
3 | tags: packages
4 | block:
5 | - name: Create list of packages to be installed
6 | set_fact:
7 | package_list: "{{ package_list + cache_package_list }}"
8 | when: cache_enabled|bool
9 |
10 | - name: Install required packages
11 | yum:
12 | name: "{{ package_list }}"
13 | state: present
14 | update_cache: true
15 | disable_gpg_check: true
16 | become: true
17 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/50_modify_sudo_user.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add "{{ ansible_user }}" user to libvirt group and get ssh key setup
3 | user:
4 | name: "{{ ansible_user }}"
5 | groups: libvirt
6 | append: true
7 | state: present
8 | generate_ssh_key: true
9 | become: true
10 | tags: user
11 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/60_enabled_services.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Enable and restart Services
3 | service:
4 | name: "{{ item }}"
5 | state: restarted
6 | enabled: true
7 | become: true
8 | with_items:
9 | - libvirtd
10 | tags: services
11 |
12 | - name: Enable Services (iptables)
13 | service:
14 | name: "{{ item }}"
15 | state: restarted
16 | enabled: true
17 | become: true
18 | with_items:
19 | - "{{ firewall }}"
20 | tags: services
21 | when: firewall == "iptables"
22 |
23 | - name: Enable Services (firewalld)
24 | service:
25 | name: "{{ item }}"
26 | state: started
27 | enabled: true
28 | become: true
29 | with_items:
30 | - "{{ firewall }}"
31 | tags: services
32 | when: firewall != "iptables"
33 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/80_libvirt_pool.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Define, Start, Autostart Storage Pool
3 | become: true
4 | tags: storagepool
5 | block:
6 | - name: Define Storage Pool for default
7 | virt_pool:
8 | command: define
9 | name: default
10 | xml: '{{ lookup("template", "dir.xml.j2") }}'
11 |
12 | - name: Start Storage Pool for default
13 | virt_pool:
14 | state: active
15 | name: default
16 |
17 | - name: Autostart Storage Pool for default
18 | virt_pool:
19 | autostart: true
20 | name: default
21 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/90_create_config_install_dirs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup clusterconfigs dir
3 | tags: clusterconfigs
4 | block:
5 | - name: Clear config dir (if any, in case this is a re-run)
6 | file:
7 | path: "{{ item }}"
8 | state: absent
9 | with_items:
10 | - "{{ dir }}"
11 |
12 | - name: Create config dir
13 | file:
14 | path: "{{ item }}"
15 | state: directory
16 | owner: "{{ ansible_user }}"
17 | group: "{{ ansible_user }}"
18 | mode: '0755'
19 | with_items:
20 | - "{{ dir }}"
21 |
--------------------------------------------------------------------------------
/roles/node_prep/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - include_tasks: 10_validation.yml
3 | tags:
4 | - validation
5 | - create_registry
6 |
7 | - include_tasks: 15_validation_disconnected_registry.yml
8 | when:
9 | - "'registry_host' in groups"
10 | - "groups['registry_host']"
11 | tags:
12 | - disconnected
13 | - create_registry
14 |
15 | - include_tasks: 20_sub_man_register.yml
16 | tags: subscription
17 | - include_tasks: 30_req_packages.yml
18 | tags: packages
19 | - include_tasks: 40_bridge.yml
20 | tags:
21 | - network
22 | - include_tasks: 45_networking_facts.yml
23 | tags:
24 | - network_facts
25 | - include_tasks: 50_modify_sudo_user.yml
26 | tags: user
27 | - include_tasks: 60_enabled_services.yml
28 | tags: services
29 | - include_tasks: 70_enabled_fw_services.yml
30 | tags: firewall
31 | - include_tasks: 80_libvirt_pool.yml
32 | tags: storagepool
33 | - include_tasks: 90_create_config_install_dirs.yml
34 | tags: clusterconfigs
35 |
--------------------------------------------------------------------------------
/roles/node_prep/templates/dir.xml.j2:
--------------------------------------------------------------------------------
1 |
2 | default
3 |
4 | /var/lib/libvirt/images
5 |
6 |
7 |
--------------------------------------------------------------------------------
/roles/node_prep/tests/inventory:
--------------------------------------------------------------------------------
1 | localhost
2 |
--------------------------------------------------------------------------------
/roles/node_prep/tests/test.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - hosts: localhost
3 | remote_user: root
4 | roles:
5 | - node_prep
6 |
--------------------------------------------------------------------------------
/roles/ocp_add_users/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | oau_passwd_len: 15
3 | oau_secure_log: true
4 |
--------------------------------------------------------------------------------
/roles/ocp_add_users/tasks/add-roles.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add roles to users
3 | vars:
4 | user: "{{ item.split(':')[0] }}"
5 | role: "{{ item.split(':')[1] }}"
6 | kubernetes.core.k8s:
7 | state: present
8 | definition:
9 | apiVersion: rbac.authorization.k8s.io/v1
10 | kind: ClusterRoleBinding
11 | metadata:
12 | name: "{{ user | replace('_', '-') }}-0"
13 | roleRef:
14 | apiGroup: rbac.authorization.k8s.io
15 | kind: ClusterRole
16 | name: "{{ role }}"
17 | subjects:
18 | - kind: User
19 | name: "{{ user }}"
20 | loop: "{{ oau_users }}"
21 | when: role != "none"
22 |
--------------------------------------------------------------------------------
/roles/ocp_add_users/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | oau_valid_roles:
3 | - admin
4 | - basic-user
5 | - cluster-admin
6 | - cluster-status
7 | - cluster-reader
8 | - edit
9 | - self-provisioner
10 | - view
11 | - none
12 |
--------------------------------------------------------------------------------
/roles/ocp_logging/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ol_namespace: openshift-logging
3 | ol_event_router_image: registry.redhat.io/openshift-logging/eventrouter-rhel8:v5.2.1-1
4 | ol_action: 'install'
5 | ol_settings: ""
6 | ...
7 |
--------------------------------------------------------------------------------
/roles/ocp_logging/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Install the cluster Logging stack"
3 | when:
4 | - ol_action == 'install'
5 | block:
6 | - name: "Pre-run validations"
7 | ansible.builtin.include_tasks: pre-run.yml
8 |
9 | - name: "Set up the cluster logging subsystem"
10 | ansible.builtin.include_tasks: setup-logging.yml
11 |
12 | - name: "Validate cluster logging subsystem"
13 | ansible.builtin.include_tasks: validate-stack.yml
14 |
15 | - name: "Cleanup Logging stack resources"
16 | when:
17 | - ol_action == 'cleanup'
18 | block:
19 | - name: "Cleanup Logging stack resources"
20 | ansible.builtin.include_tasks: clean-resources.yml
21 | ...
22 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | libvirt_image_path: /var/lib/libvirt/images
3 | enable_conserver: false
4 | bootmode: "{{ 'uefi' if enable_redfish else 'legacy' }}"
5 | enable_legacy_vga_mode: false
6 | do_dns_config: true
7 | apps_ip_address: 192.168.123.10
8 | api_ip_address: 192.168.123.5
9 | dns_vip_address: 192.168.123.6
10 |
11 | # REDFISH
12 | enable_redfish: false
13 | enable_virtualmedia: false
14 | redfish_port: 8082
15 | ocp_on_libvirt_repo_root_path: /tmp
16 | ocp_on_libvirt_cert_country: US
17 | ocp_on_libvirt_cert_state: MA
18 | ocp_on_libvirt_cert_locality: Westford
19 | ocp_on_libvirt_cert_organization: DCI
20 | ocp_on_libvirt_cert_organizational_unit: Lab
21 | ocp_on_libvirt_vbmc_ipmi_nodes: ipmi_nodes.json
22 |
23 | ...
24 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/tasks/dns_cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Cleanup openshift dnsmasq settings
3 | ansible.builtin.file:
4 | path: /etc/NetworkManager/dnsmasq.d/openshift.conf
5 | state: absent
6 | become: "{{ libvirt_become }}"
7 |
8 | - name: Restart NetworkManager
9 | ansible.builtin.service:
10 | name: NetworkManager
11 | state: restarted
12 | become: "{{ libvirt_become }}"
13 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/tasks/libvirt_host_destroy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "List nodes for {{ host['name'] }}"
3 | community.libvirt.virt:
4 | command: list_vms
5 | register: _ool_vms_list
6 |
7 | - name: "Set ool_node_exists"
8 | ansible.builtin.set_fact:
9 | ool_node_exists: "{{ host['name'] in _ool_vms_list.list_vms }}"
10 |
11 | - name: "Get status if exists - {{ host['name'] }}"
12 | community.libvirt.virt:
13 | name: "{{ host['name'] }}"
14 | command: status
15 | register: _ool_node_status
16 | when: ool_node_exists
17 |
18 | - name: "Destroy {{ host.name }}"
19 | community.libvirt.virt:
20 | name: "{{ host['name'] }}"
21 | command: destroy
22 | when: _ool_node_status['status'] is defined and _ool_node_status['status'] == 'running'
23 |
24 | - name: "Undefine {{ host.name }}"
25 | become: true
26 | community.libvirt.virt:
27 | name: "{{ host['name'] }}"
28 | command: undefine
29 | flags: nvram
30 | when: ool_node_exists
31 | ...
32 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/tasks/libvirt_host_up2.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Wait for VM to shut down"
3 | community.libvirt.virt:
4 | name: "{{ host['name'] }}"
5 | command: status
6 | register: vmstatus
7 | until: vmstatus.status == 'shutdown'
8 | retries: 150
9 | delay: 10
10 | when: ool_node_not_exists
11 |
12 | - name: "Remove cdrom (cloud-init)"
13 | ansible.builtin.command: virsh change-media {{ host['name'] }} hda --eject --config
14 | become: "{{ libvirt_become }}"
15 | when: ool_node_not_exists
16 |
17 | - name: Sync memory data to disk
18 | ansible.builtin.command: sync
19 |
20 | - name: "Start VM again"
21 | community.libvirt.virt:
22 | name: "{{ host['name'] }}"
23 | state: "running"
24 | when: ool_node_not_exists
25 | ...
26 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/tasks/libvirt_network_destroy.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Does node exist"
3 | community.libvirt.virt_net:
4 | name: "{{ network['name'] }}"
5 | command: status
6 | ignore_errors: true
7 | register: node_exists
8 |
9 | - name: "Destroy network"
10 | community.libvirt.virt_net:
11 | name: "{{ network['name'] }}"
12 | command: destroy
13 | when: not node_exists.failed
14 |
15 | - name: "Undefine network"
16 | community.libvirt.virt_net:
17 | name: "{{ network['name'] }}"
18 | command: undefine
19 | when: not node_exists.failed
20 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/tasks/libvirt_network_up.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Define network"
3 | community.libvirt.virt_net:
4 | name: "{{ network['name'] }}"
5 | command: define
6 | xml: "{{ lookup('template', 'libvirt_network.xml.j2') }}"
7 |
8 | - name: "Start network"
9 | community.libvirt.virt_net:
10 | state: active
11 | name: "{{ network['name'] }}"
12 |
13 | - name: "Enable Autostart network"
14 | community.libvirt.virt_net:
15 | autostart: true
16 | name: "{{ network['name'] }}"
17 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/templates/cloud-config/meta-data:
--------------------------------------------------------------------------------
1 | instance-id: {{ host['name'] }}
2 | {% set static_network = 0 %}
3 | {% for network in host['networks'] %}
4 | {% if static_network == 0 and network['address'] is defined %}
5 | network-interfaces: |
6 | {% endif %}
7 | {% if network['address'] is defined %}
8 | {% set static_network = 1 %}
9 | auto eth{{ loop.index0 }}
10 | iface eth{{ loop.index0 }} inet static
11 | address {{ network['address'] }}
12 | network {{ network['network'] }}
13 | netmask {{ network['netmask'] }}
14 | broadcast {{ network['broadcast'] }}
15 | gateway {{ network['gateway'] }}
16 | {% endif %}
17 | {% endfor %}
18 | local-hostname: {{ host['name'] }}.{{ host['domain'] }}
19 |
--------------------------------------------------------------------------------
/roles/ocp_on_libvirt/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ssh_key: id_rsa
3 | libvirt_become: true
4 | net: baremetal
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/ocp_remove_nodes/meta/argument_specs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | argument_specs:
3 | main:
4 | short_description: Entrypoint for ocp_remove_nodes role
5 | description:
6 | - Remove worker nodes from an OpenShift cluster.
7 | - /!\ IMPORTANT /!\ This role removes permanently the nodes from an OCP cluster, use it with care.
8 | options:
9 | orn_nodes:
10 | type: "list"
11 | required: true
12 | description:
13 | - A list of OCP node names to remove from the cluster.
14 | ...
15 |
--------------------------------------------------------------------------------
/roles/odf_setup/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate requirements
3 | ansible.builtin.include_tasks: validation.yml
4 |
5 | - name: Set Facts
6 | ansible.builtin.include_tasks: set-facts.yml
7 |
8 | - name: Setup Local Storage operator
9 | ansible.builtin.include_tasks: local-storage-operator.yml
10 | when:
11 | - ocs_install_type == 'internal'
12 |
13 | - name: Setup Openshift Storage Operator
14 | ansible.builtin.include_tasks: openshift-storage-operator.yml
15 |
16 | - name: Perform OCS Tests
17 | ansible.builtin.include_tasks: tests.yml
18 | ...
19 |
--------------------------------------------------------------------------------
/roles/odf_setup/tasks/set-facts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Set facts for OCP integration with Internal OCS
3 | ansible.builtin.set_fact:
4 | ocs_storagecluster_name: ocs-storagecluster
5 | ocs_sc_rbd_name: ocs-storagecluster-ceph-rbd
6 | ocs_sc_cephfs_name: ocs-storagecluster-cephfs
7 | when:
8 | - ocs_install_type == 'internal'
9 |
10 | - name: Set fact for External StorageCluster name
11 | ansible.builtin.set_fact:
12 | ocs_storagecluster_name: ocs-external-storagecluster
13 | ocs_sc_rbd_name: ocs-external-storagecluster-ceph-rbd
14 | ocs_sc_cephfs_name: ocs-external-storagecluster-cephfs
15 | when:
16 | - ocs_install_type == 'external'
17 |
18 | - name: Set fact for default Storage Class
19 | ansible.builtin.set_fact:
20 | ocs_default_storage_class: "{{ ocs_sc_rbd_name }}"
21 | when:
22 | - ocs_default_storage_class is undefined
23 |
--------------------------------------------------------------------------------
/roles/odf_setup/templates/local-storage-block.yml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: local.storage.openshift.io/v1
2 | kind: LocalVolume
3 | metadata:
4 | namespace: "{{ local_storage_namespace }}"
5 | name: local-block
6 | spec:
7 | tolerations:
8 | - key: "node.ocs.openshift.io/storage"
9 | value: "true"
10 | effect: NoSchedule
11 | nodeSelector:
12 | nodeSelectorTerms:
13 | - matchExpressions:
14 | - key: cluster.ocs.openshift.io/openshift-storage
15 | operator: In
16 | values:
17 | - ""
18 | storageClassDevices:
19 | - storageClassName: {{ local_storage_class }}
20 | {% if local_volume_mode is defined and local_volume_mode == 'filesystem' %}
21 | volumeMode: Filesystem
22 | fsType: xfs
23 | {% elif local_volume_mode is defined and local_volume_mode == 'block' %}
24 | volumeMode: Block
25 | {% endif %}
26 | devicePaths:
27 | {% for path in disk_id.stdout_lines %}
28 | {{ path }}
29 | {% endfor %}
30 |
--------------------------------------------------------------------------------
/roles/olm_operator/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | channel: ""
3 | install_approval: "Manual"
4 | olm_operator_skippable: false
5 | operator_group_name: "{{ operator }}"
6 | source_ns: openshift-marketplace
7 | source: redhat-operators
8 | starting_csv: ""
9 | olm_operator_validate_install: true
10 | ...
11 |
--------------------------------------------------------------------------------
/roles/opcap_tool/defaults/main.yml:
--------------------------------------------------------------------------------
1 | opcap_repo: "https://github.com/opdev/opcap"
2 | opcap_version: "0.2.1"
3 | opcap_download_url: "https://github.com/opdev/opcap/releases/download"
4 | opcap_audit_plan: "OperatorInstall,OperandInstall"
5 |
--------------------------------------------------------------------------------
/roles/opcap_tool/tasks/build.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Ensure bin directory exists
3 | ansible.builtin.file:
4 | path: "{{ opcap_dir.path }}/opcap/bin"
5 | state: directory
6 | mode: "0755"
7 |
8 | - name: Download opcap release
9 | vars:
10 | opcap_archi: "{{ 'linux-amd64' if ansible_architecture == 'x86_64' else 'arm64' }}"
11 | ansible.builtin.get_url:
12 | url: "{{ opcap_download_url }}/v{{ opcap_version }}/opcap-{{ opcap_archi }}"
13 | dest: "{{ opcap_dir.path }}/opcap/bin/opcap"
14 | mode: "0755"
15 | register: opcap_download
16 | until: opcap_download is succeeded
17 | retries: 2
18 | delay: 30
19 |
20 | - name: Check opcap bin
21 | ansible.builtin.command:
22 | cmd: >
23 | ./bin/opcap version
24 | chdir: "{{ opcap_dir.path }}/opcap"
25 | ...
26 |
--------------------------------------------------------------------------------
/roles/openshift_cnf/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | catalog_url: "https://catalog.redhat.com/api/containers/v1"
3 | page_size: 200
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/openshift_cnf/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TODO: this role should be merged with create_certification_project
3 | - name: Create certification projects in a loop
4 | ansible.builtin.include_role:
5 | name: create_certification_project
6 | vars:
7 | product_type: "cnf"
8 | loop: "{{ cnf_to_certify }}"
9 | loop_control:
10 | loop_var: cert_item
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/operator_sdk/tasks/teardown.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Delete Image Source from cluster
3 | kubernetes.core.k8s:
4 | state: absent
5 | definition: "{{ lookup('file', scorecard_is_file) }}"
6 |
7 | - name: Wait for MCP status
8 | ansible.builtin.include_role:
9 | name: redhatci.ocp.check_resource
10 | vars:
11 | resource_to_check: "MachineConfigPool"
12 | check_wait_retries: 60
13 | check_wait_delay: 20
14 |
15 | - name: Remove tmp directory
16 | ansible.builtin.file:
17 | path: "{{ scorecard_tmp_dir.path }}"
18 | state: absent
19 | when: scorecard_tmp_dir is defined
20 |
21 | - name: Remove scorecard Image Source file
22 | ansible.builtin.file:
23 | path: "{{ scorecard_is_file }}"
24 | state: absent
25 | when: scorecard_is_file is defined
26 |
27 | - name: Remove operator_sdk_img built from the source
28 | ansible.builtin.command: podman rmi -f {{ operator_sdk_img }}
29 | ...
30 |
--------------------------------------------------------------------------------
/roles/operator_sdk/templates/scorecard-debug-pod-logs.j2:
--------------------------------------------------------------------------------
1 | Events from scorecard ns =================================================
2 | {% if scorecard_events.resources %}
3 | {{ scorecard_events | json_query('resources[*]') | join('\n') }}
4 | {% else %}
5 | {% endif %}
6 | Fetch logs from all pods =================================================
7 | {% if scorecard_pods.resources %}
8 | {{ scorecard_pods | json_query('resources[*]') | join('\n') }}
9 | {% else %}
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/roles/operator_sdk/templates/scorecard-errors-basic-check-spec-test.j2:
--------------------------------------------------------------------------------
1 | Scorecard pods ===========================================================
2 | {% if scorecard_pods.resources %}
3 | {{ scorecard_pods | json_query('resources[*]') | join('\n') }}
4 | {% else %}
5 | {% endif %}
6 | Scorecard events =========================================================
7 | {% if scorecard_events.resources %}
8 | {{ scorecard_events | json_query('resources[*]') | join('\n') }}
9 | {% else %}
10 | {% endif %}
11 |
--------------------------------------------------------------------------------
/roles/populate_mirror_registry/README.md:
--------------------------------------------------------------------------------
1 | # populate_mirror_registry
2 |
3 | Copies the images required for installation to a local registry
--------------------------------------------------------------------------------
/roles/populate_mirror_registry/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # As "opm" is no longer used, let's remove the tmp directory with the binaries.
3 | # "oc" and "kubectl" will remain in /usr/local/bin
4 | - name: Remove tmp directory for all binaries
5 | file:
6 | path: "{{ binaries_tool_path }}"
7 | state: absent
8 |
--------------------------------------------------------------------------------
/roles/populate_mirror_registry/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for populate_mirror_registry
3 | - import_tasks: var_check.yml
4 | tags:
5 | - populate_registry
6 |
7 | - import_tasks: prerequisites.yml
8 | tags:
9 | - populate_registry
10 |
11 | - import_tasks: populate_registry.yml
12 | tags:
13 | - populate_registry
14 |
15 | - import_tasks: cleanup.yml
16 | tags:
17 | - populate_registry
18 |
--------------------------------------------------------------------------------
/roles/populate_mirror_registry/tasks/var_check.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check openshift_full_version is set
3 | fail:
4 | msg: openshift_full_version must be set and not empty
5 | when: (openshift_full_version is not defined) or (openshift_full_version == "")
6 |
7 | - name: Check openshift_full_version is has at last two parts
8 | block:
9 | - name: Split openshift_full_version
10 | set_fact:
11 | openshift_version_parts: "{{ openshift_full_version.split('.') }}"
12 | - name: Incorrect format for openshift_full_version
13 | fail:
14 | msg: openshift_full_version does not have at least two parts
15 | when: openshift_version_parts | length < 2
16 |
--------------------------------------------------------------------------------
/roles/populate_mirror_registry/templates/filter.sh.j2:
--------------------------------------------------------------------------------
1 | #! /usr/bin/env bash
2 |
3 | {{ downloads_path }}/{{ openshift_full_version }}/opm render {{ olm_index_item.remote }} | jq -s > {{ temp_dir.path }}/configs/raw-index.json
4 |
5 | for val in {{ mirror_packages | join(" ") }}; do
6 | jq ".[] | select(.name == \"${val}\" or .package == \"${val}\")" {{ temp_dir.path }}/configs/raw-index.json >> {{ temp_dir.path }}/configs/index.json
7 | done
8 |
9 | rm {{ temp_dir.path }}/configs/raw-index.json
10 |
--------------------------------------------------------------------------------
/roles/preflight/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | preflight_image: "quay.io/opdev/preflight:1.13.1"
3 | preflight_repo_https: "https://github.com/redhat-openshift-ecosystem/openshift-preflight"
4 | preflight_namespace: preflight-testing
5 | preflight_sa: default
6 | preflight_custom_ca: ""
7 | preflight_podman_ca: ""
8 | catalog_url: "https://catalog.redhat.com/api/containers/v1"
9 | preflight_test_certified_image: false
10 | preflight_run_health_check: true
11 | max_images_per_batch: 1
12 | ...
13 |
--------------------------------------------------------------------------------
/roles/preflight/files/scorecard-images.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # 1/ scorecard-storage: https://github.com/operator-framework/operator-sdk/pull/6425/files
3 | # 2/ scorecard-untar: https://github.com/operator-framework/operator-sdk/pull/6425/files
4 | images:
5 | - quay.io/operator-framework/scorecard-storage@sha256:a3bfda71281393c7794cabdd39c563fb050d3020fd0b642ea164646bdd39a0e2
6 | - quay.io/operator-framework/scorecard-untar@sha256:2e728c5e67a7f4dec0df157a322dd5671212e8ae60f69137463bd4fdfbff8747
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/preflight/files/scorecard-old-images.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # TODO: Open an issue to Operator-SDK because 1.30.0 release still requires docker.io/busybox:1.33.0
3 | # to start scorecard pod
4 | images:
5 | - registry.access.redhat.com/ubi8@sha256:910f6bc0b5ae9b555eb91b88d28d568099b060088616eba2867b07ab6ea457c7
6 | - mirror.gcr.io/busybox@sha256:eccadc4fb09194c8163cfb7edcd9727933104e86da2ba8ad076732e5e3702a6a
7 | - quay.io/openshift-release-dev/ocp-v4.0-art-dev@sha256:bf657092557cf90be41f7d5a382c9734759afe0feab480732d70ad960b2a407d
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/preflight/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Remove the local preflight image"
3 | ansible.builtin.command: podman rmi -f {{ preflight_image }}
4 | ...
5 |
--------------------------------------------------------------------------------
/roles/preflight/tasks/prepare_runtime_assets.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Run preflight runtime-assets
3 | ansible.builtin.command:
4 | cmd: >
5 | podman run
6 | --rm
7 | --pull=always
8 | -e PFLT_LOGLEVEL=error
9 | {{ preflight_image }}
10 | runtime-assets
11 | chdir: "{{ preflight_tmp_dir.path }}"
12 | register: preflight_assets
13 | retries: 2
14 | delay: 10
15 | until: not preflight_assets.failed
16 |
17 | - name: Get scorecard config images
18 | ansible.builtin.set_fact:
19 | preflight_assets_images: "{{ preflight_assets.stdout | from_json | json_query('images') | list | unique }}"
20 | ...
21 |
--------------------------------------------------------------------------------
/roles/preflight/tasks/test_async_logs_check_operator.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # global Preflight timeout = 180 seconds
3 | - name: "Retrieve logs for operator {{ operator.name }}"
4 | vars:
5 | attempts:
6 | - one
7 | - two
8 | - three
9 | ansible.builtin.include_tasks: test_all_logs.yml
10 | loop: "{{ attempts }}"
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/preflight/tasks/test_preflight_check_container.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Preflight check container for {{ operator.name }}"
3 | ansible.builtin.include_tasks: test_preflight_check_container_one_image.yml
4 | loop: "{{ operator.operator_images }}"
5 | loop_control:
6 | loop_var: current_operator_image
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/preflight/templates/mcp_logs.j2:
--------------------------------------------------------------------------------
1 | Main logs:
2 | {{ mcp_logs.stdout }}
3 | Worker logs:
4 | {{ mcp_worker_logs | json_query('results[*].stdout') | join('\n') }}
5 |
--------------------------------------------------------------------------------
/roles/prereq_facts_check/README.md:
--------------------------------------------------------------------------------
1 | # Prereqs facts check
2 |
3 | Checks that required facts are set correctly
4 |
5 | ## Role Variables
6 |
7 | - `pull_secret_check`: Whether to check `pull_secret` fact is valid
8 | - `ssh_public_check`: Weather to check `ssh_public` fact is valid
9 | - `mirror_certificate_check`: Weather to check `mirror_certificate` fact is valid
10 |
11 | ## Example Playbook
12 |
13 | ```yaml
14 | - name: Check facts
15 | hosts: localhost
16 | roles:
17 | - prereq_facts_check
18 | ```
19 |
--------------------------------------------------------------------------------
/roles/prereq_facts_check/defaults/main.yml:
--------------------------------------------------------------------------------
1 | pull_secret_check: true
2 | ssh_public_check: true
3 | mirror_certificate_check: true
4 |
--------------------------------------------------------------------------------
/roles/prereq_facts_check/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check for pull_secret
3 | assert:
4 | that:
5 | - pull_secret is defined
6 | - pull_secret.auths is defined
7 | - pull_secret | trim != ''
8 | quiet: true
9 | msg: "The required 'pull_secret' is not defined or is not valid"
10 | when: pull_secret_check | bool
11 |
12 | - name: Check for ssh_public_key
13 | assert:
14 | that:
15 | - ssh_public_key is defined
16 | - ssh_public_key is string
17 | - ssh_public_key | trim != ''
18 | quiet: true
19 | msg: "The required 'ssh_public_key' is not defined or is not valid"
20 | when: ssh_public_check | bool
21 |
22 | - name: Check for mirror_certificate
23 | assert:
24 | that:
25 | - mirror_certificate is defined
26 | - mirror_certificate is string
27 | - mirror_certificate | trim != ''
28 | quiet: true
29 | msg: "The required 'mirror_certificate' is not defined or is not valid"
30 | when: mirror_certificate_check | bool
31 |
--------------------------------------------------------------------------------
/roles/process_kvm_nodes/README.md:
--------------------------------------------------------------------------------
1 | # process_kvm_nodes
2 |
3 | Creates and distributes KVM node specifications to VM hosts
--------------------------------------------------------------------------------
/roles/process_kvm_nodes/defaults/main.yml:
--------------------------------------------------------------------------------
1 | vm_spec_defaults:
2 | master:
3 | memory: 16384
4 | vcpu: 6
5 | disk_size: 120
6 | worker:
7 | memory: 30000
8 | vcpu: 8
9 | disk_size: 120
10 |
11 | vm_group_params:
12 | master:
13 | memory: "{{ vm_spec_master_memory | default(vm_spec_defaults.master.memory) }}"
14 | vcpu: "{{ vm_spec_master_vcpu | default(vm_spec_defaults.master.vcpu) }}"
15 | disk_size: "{{ vm_spec_master_disk_size | default(vm_spec_defaults.master.disk_size) }}"
16 | worker:
17 | memory: "{{ vm_spec_worker_memory | default(vm_spec_defaults.worker.memory) }}"
18 | vcpu: "{{ vm_spec_worker_vcpu | default(vm_spec_defaults.worker.vcpu) }}"
19 | disk_size: "{{ vm_spec_worker_disk_size | default(vm_spec_defaults.worker.disk_size) }}"
20 |
21 | vm_node_prefix: "{{ cluster_name }}_"
22 |
23 | nodes_to_process: "{{ groups['nodes'] }}"
24 |
--------------------------------------------------------------------------------
/roles/process_kvm_nodes/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Set defualt for kvm_nodes
2 | set_fact:
3 | processed_kvm_nodes: {}
4 |
5 | - name: Set kvm_nodes
6 | include_tasks: create_node.yml
7 | loop: "{{ nodes_to_process }}"
8 | loop_control:
9 | loop_var: kvm_node_hostname
10 | when: hostvars[kvm_node_hostname]['vendor'] | lower == 'kvm'
11 |
12 | - name: Distribute kvm_nodes for vm_host to that host
13 | set_fact:
14 | kvm_nodes: "{{ processed_kvm_nodes[item] | default([]) }}"
15 | delegate_to: "{{ item }}"
16 | delegate_facts: true
17 | loop: "{{ groups['vm_hosts'] | default([]) }}"
18 |
--------------------------------------------------------------------------------
/roles/process_nmstate/README.md:
--------------------------------------------------------------------------------
1 | # process_nmstate
2 |
3 | Renders nmstate from crucible network_config
--------------------------------------------------------------------------------
/roles/process_nmstate/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # target_name is the hostname for which the template will be rendered. The reason for this variable is
2 | # becuase this role is used in two roles (setup_vm_host_network & generate_discovery_iso).
3 | # This default will be used in setup_vm_host_network, while generate_discovery_iso will set target_name
4 | # with the host which the nmstate is being rendered for.
5 | target_name: "{{ inventory_hostname }}"
6 |
--------------------------------------------------------------------------------
/roles/process_nmstate/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: "Set rendered_nmstate_yml from raw value for {{ target_name }}"
2 | set_fact:
3 | rendered_nmstate_yml: "{{ network_config.raw | to_yaml | string }}"
4 | when: network_config.raw is defined
5 |
6 | - name: "Template rendered_nmstate_yml for {{ target_name }}"
7 | set_fact:
8 | rendered_nmstate_yml: "{{ lookup('template', network_config.template | default('nmstate.yml.j2')) }}"
9 | when: network_config.raw is not defined
10 |
11 | - name: Debug
12 | ansible.builtin.debug:
13 | msg: "{{ rendered_nmstate_yml }}"
14 | verbosity: 1
15 |
--------------------------------------------------------------------------------
/roles/prune_catalog/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | pc_opm_args: ""
3 | pc_expire: false
4 | pc_expire_time: 5h
5 | pc_maintainer: "redhatci.ocp"
6 | pc_ignore_pull_errors: false
7 | pc_allow_insecure_registry: true
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/prune_catalog/tasks/extract-operators.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Extract the listed operators"
3 | ansible.builtin.shell:
4 | chdir: "{{ pc_tmp_dir }}"
5 | cmd: >
6 | set -x;
7 | jq 'select( .package == "{{ operator_name }}" or .name == "{{ operator_name }}")'
8 | {{ pc_tmp_dir }}/index-packages
9 | >> {{ pc_tmp_dir }}/configs/index.json
10 | with_items: "{{ pc_operators }}"
11 | register: extract_result
12 | changed_when: extract_result.rc != 0
13 | loop_control:
14 | loop_var: operator_name
15 | ...
16 |
--------------------------------------------------------------------------------
/roles/pyxis/README.md:
--------------------------------------------------------------------------------
1 | # Pyxis API to submit Preflight certification results for operators
2 |
3 | ## Fully automated preflight operator certification flow
4 |
5 | - DCI runs preflight tests and generates a file with test results for each operator.
6 | - Result file results.json is parsed and submitted to Pyxis API.
7 | - The submission is triggered automatically when `operator.pyxis_operator_identifier` is defined.
8 |
--------------------------------------------------------------------------------
/roles/pyxis/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | catalog_url: "https://catalog.redhat.com/api/containers/v1"
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/pyxis/templates/artifact_info.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "_links": {
3 | "cert_project": {
4 | "href": "{{ cert_project_id }}"
5 | },
6 | "container_image": {
7 | "href": "{{ operator.bundle_image }}"
8 | }
9 | },
10 | "cert_project": "{{ cert_project_id }}",
11 | "certification_hash": "{{ preflight_output.certification_hash }}",
12 | "content": "{{ preflight_artifact | b64encode }}",
13 | "file_size": {{ artifact_info.stat.size | int }},
14 | "content_type": "text/plain",
15 | "filename": "preflight.log",
16 | "operator_package_name": "{{ operator.name }}",
17 | "version": "{{ operator.version }}"
18 | }
--------------------------------------------------------------------------------
/roles/pyxis/templates/test_results.json.j2:
--------------------------------------------------------------------------------
1 | {
2 | "_links": {
3 | "cert_project": {
4 | "href": "{{ cert_project_id }}"
5 | },
6 | "container_image": {
7 | "href": "{{ operator.bundle_image }}"
8 | }
9 | },
10 | "cert_project": "{{ cert_project_id}}",
11 | "certification_hash": "{{ preflight_output.certification_hash }}",
12 | "image": "{{ operator.bundle_image }}",
13 | "operator_package_name": "{{ operator.name }}",
14 | "passed": {{ preflight_output.passed }},
15 | "results": {{ preflight_output.results }},
16 | "test_library": {{ preflight_output.test_library | combine({'commit': preflight_latest.json.object.sha, 'version': preflight_release}) }},
17 | "tested_on": {
18 | "name": "OCP",
19 | "version": "{{ ocp_version_full }}"
20 | },
21 | "version": "{{ operator.version }}"
22 | }
23 |
--------------------------------------------------------------------------------
/roles/redhat_tests/tasks/csi-tests.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "test_ redhat_tests: Run the openshift CSI tests" # noqa: name[casing]
3 | ansible.builtin.shell:
4 | cmd: >
5 | podman run --pull=always --rm
6 | -v {{ ts_configs_dir }}:/tests:Z
7 | -v {{ ts_log_dir }}:/logs:Z
8 | -v {{ ts_csi_tests_dir }}/{{ ts_csi_test_manifest }}:/manifest.yaml:z
9 | -e TEST_CSI_DRIVER_FILES=/manifest.yaml
10 | -e KUBECONFIG=/tests/kubeconfig
11 | {{ ts_e2e_image }}:{{ ts_ocp_version_maj }}.{{ ts_ocp_version_min }}
12 | /bin/bash -c
13 | "openshift-tests run openshift/csi --junit-dir /logs"
14 | > {{ ts_log_dir }}/csi-report.log
15 | ignore_errors: true
16 | ...
17 |
--------------------------------------------------------------------------------
/roles/remove_ztp_gitops_resources/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | ---
2 |
3 | rzgr_gitops_applications:
4 | - clusters
5 | - policies
6 | rzgr_gitops_appprojects:
7 | - ztp-app-project
8 | - policy-app-project
9 | rzgr_policies_namespace: policies-sub
10 | rzgr_extra_namespaces:
11 | - ztp-common
12 | - ztp-group
13 | - ztp-site
14 | rzgr_cluster_role_bindings:
15 | - gitops-policy
16 | - gitops-cluster
17 | rzgr_private_repo_secret: "private-repo"
18 | rzgr_argo_cd_known_host_cm: "argocd-ssh-known-hosts-cm"
19 |
--------------------------------------------------------------------------------
/roles/resources_to_components/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | rtc_supported_resources:
3 | - "Pod"
4 | - "Deployment"
5 | - "ReplicaSet"
6 | - "StatefulSet"
7 | - "ClusterServiceVersion"
8 |
--------------------------------------------------------------------------------
/roles/resources_to_components/tasks/create-component.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create resource component
3 | ansible.legacy.dci_component:
4 | display_name: "{{ comp_display_name }}"
5 | version: "{{ comp_version }}"
6 | team_id: "{{ job_info['job']['team_id'] }}"
7 | topic_id: "{{ job_info['job']['topic_id'] }}"
8 | type: "{{ comp_type }}"
9 | state: present
10 | register: resource_component
11 |
12 | - name: Attach resource component to the job
13 | ansible.legacy.dci_job_component:
14 | component_id: " {{ resource_component.component.id }} "
15 | job_id: " {{ job_id }} "
16 | register: job_component_result
17 | until: job_component_result is not failed
18 | retries: 5
19 | delay: 20
20 | when:
21 | - "'component' in resource_component"
22 | - "'id' in resource_component.component"
23 |
24 | ...
25 |
--------------------------------------------------------------------------------
/roles/resources_to_components/tasks/inspect-resources.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # Just create the component if the resource is under the covered resources.
3 | - name: Retrieve resource information and create the components
4 | ansible.builtin.include_tasks: resource-info-to-components.yml
5 | when:
6 | - resources.resource in rtc_supported_resources
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/resources_to_components/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # If the item is not correctly build, it will be omitted.
3 | - name: Inspect resources from a given namespace
4 | ansible.builtin.include_tasks: inspect-resources.yml
5 | when:
6 | - resources.resource is defined
7 | - resources.resource | length
8 | - resources.namespace is defined
9 | - resources.namespace | length
10 | loop: "{{ rtc_resources_to_components }}"
11 | loop_control:
12 | loop_var: resources
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/rhoai/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | rhoai_action: install
3 | rhoai_operator_map: {}
4 | rhoai_source_catalog: redhat-operators
5 | rhoai_source_namespace: openshift-marketplace
6 | rhoai_create_dsc: true
7 | rhoai_dsc_name: default-dsc
8 | rhoai_wait_for_dsc: true
9 | rhoai_part_of: rhoai
10 | ...
11 |
--------------------------------------------------------------------------------
/roles/rhoai/tasks/install.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install operators
3 | ansible.builtin.include_tasks: install_operator.yml
4 | vars:
5 | rhoai_op_name: "{{ item }}"
6 | rhoai_op_package: "{{ __rhoai_operator_map[item].package }}"
7 | rhoai_op_channel: "{{ __rhoai_operator_map[item].channel }}"
8 | rhoai_op_namespace: "{{ __rhoai_operator_map[item].namespace }}"
9 | loop: # install has to be in order
10 | - servicemesh
11 | - serverless
12 | - rhods
13 |
14 | - name: Created DataScienceCluster
15 | ansible.builtin.include_tasks: create-dsc.yml
16 | ...
17 |
--------------------------------------------------------------------------------
/roles/rhoai/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Merge default and provided operator map"
3 | ansible.builtin.set_fact:
4 | __rhoai_operator_map: "{{ rhoai_default_operator_map | combine(rhoai_operator_map, recursive=true) }}"
5 |
6 | - name: Validate operator map is complete
7 | ansible.builtin.assert:
8 | fail_msg: >
9 | Configuration for {{ item.key }} must contain package,
10 | channel, and namespace
11 | that:
12 | - "'package' in item.value"
13 | - "'channel' in item.value"
14 | - "'namespace' in item.value"
15 | loop: "{{ __rhoai_operator_map | dict2items }}"
16 |
17 | - name: "Execute action {{ rhoai_action }}"
18 | ansible.builtin.include_tasks: "{{ rhoai_action }}.yml"
19 | ...
20 |
--------------------------------------------------------------------------------
/roles/rhoai/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | rhoai_default_operator_map:
3 | servicemesh:
4 | package: servicemeshoperator
5 | channel: stable
6 | namespace: openshift-operators
7 | serverless:
8 | package: serverless-operator
9 | channel: stable
10 | namespace: openshift-serverless
11 | rhods:
12 | package: rhods-operator
13 | channel: fast
14 | namespace: redhat-ods-operator
15 |
16 | rhoai_default_dsc_spec_components:
17 | codeflare: Removed
18 | kserve: Managed
19 | ray: Removed
20 | kueue: Removed
21 | workbenches: Managed
22 | dashboard: Managed
23 | modelmeshserving: Managed
24 | datasciencepipelines: Managed
25 | ...
26 |
--------------------------------------------------------------------------------
/roles/setup_gitea/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | sg_namespace: gitea
3 | sg_action: install
4 | sg_gitea_image: mirror.gcr.io/gitea/gitea:latest-rootless
5 | sg_repo_branch: main
6 | sg_url: http://localhost:3000
7 | ...
8 |
--------------------------------------------------------------------------------
/roles/setup_gitea/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Delete Gitea Namespace"
3 | kubernetes.core.k8s:
4 | state: absent
5 | api: "v1"
6 | kind: "Namespace"
7 | name: "{{ sg_namespace }}"
8 |
9 | - name: "Wait for the Gitea Namespace to be terminated"
10 | kubernetes.core.k8s_info:
11 | api: "v1"
12 | kind: "Namespace"
13 | name: "{{ sg_namespace }}"
14 | register: _sg_gitea_namespace
15 | retries: 10
16 | delay: 5
17 | until: _sg_gitea_namespace.resources | length == 0
18 |
19 | ...
20 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/namespace.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Namespace
3 | metadata:
4 | name: "{{ sg_namespace }}"
5 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/role_binding_sa_to_scc_anyuid.j2:
--------------------------------------------------------------------------------
1 | kind: RoleBinding
2 | apiVersion: rbac.authorization.k8s.io/v1
3 | metadata:
4 | name: sa-to-scc-anyuid
5 | namespace: "{{ sg_namespace }}"
6 | subjects:
7 | - kind: ServiceAccount
8 | name: anyuid
9 | roleRef:
10 | kind: Role
11 | name: scc-anyuid
12 | apiGroup: rbac.authorization.k8s.io
13 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/role_scc_anyuid.j2:
--------------------------------------------------------------------------------
1 | apiVersion: rbac.authorization.k8s.io/v1
2 | kind: Role
3 | metadata:
4 | name: scc-anyuid
5 | namespace: "{{ sg_namespace }}"
6 | rules:
7 | - apiGroups:
8 | - security.openshift.io
9 | resourceNames:
10 | - anyuid
11 | resources:
12 | - securitycontextconstraints
13 | verbs:
14 | - use
15 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/route_gitea.j2:
--------------------------------------------------------------------------------
1 | apiVersion: route.openshift.io/v1
2 | kind: Route
3 | metadata:
4 | name: gitea
5 | namespace: "{{ sg_namespace }}"
6 | spec:
7 | port:
8 | targetPort: http
9 | to:
10 | kind: Service
11 | name: gitea
12 | weight: 100
13 | wildcardPolicy: None
14 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/sa_anyuid.j2:
--------------------------------------------------------------------------------
1 | kind: ServiceAccount
2 | apiVersion: v1
3 | metadata:
4 | name: anyuid
5 | namespace: "{{ sg_namespace }}"
6 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/secret_gitea_app_ini.j2:
--------------------------------------------------------------------------------
1 | kind: Secret
2 | apiVersion: v1
3 | metadata:
4 | name: gitea-app-ini
5 | namespace: "{{ sg_namespace }}"
6 | data:
7 | app.ini: {{ lookup('template', 'gitea_app_ini.j2') | b64encode }}
8 |
--------------------------------------------------------------------------------
/roles/setup_gitea/templates/service_gitea.j2:
--------------------------------------------------------------------------------
1 | apiVersion: v1
2 | kind: Service
3 | metadata:
4 | name: "gitea"
5 | namespace: "{{ sg_namespace }}"
6 | spec:
7 | type: "ClusterIP"
8 | ports:
9 | - name: http
10 | port: 3000
11 | targetPort: 3000
12 | protocol: TCP
13 | selector:
14 | app: gitea
15 |
--------------------------------------------------------------------------------
/roles/setup_http_store/defaults/main.yml:
--------------------------------------------------------------------------------
1 | http_store_container_name: http_store
2 | http_store_pod_name: http_store_pod
3 | http_dir: /opt/http_store
4 | http_data_dir: "{{ http_dir }}/data"
5 | http_port: 80
6 | # Note if you change this you might have to change the env vars and volumes for podman task
7 | container_image: quay.io/fedora/httpd-24:latest
8 | file_owner: "{{ ansible_env.USER }}"
9 | file_group: "{{ file_owner }}"
10 | http_store_ephemeral: false
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/setup_minio/.linted:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/setup_minio/.linted
--------------------------------------------------------------------------------
/roles/setup_minio/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | sm_claim_size: 10Gi
3 | sm_namespace: minio
4 | sm_access_key_id: minioadmin
5 | sm_access_key_secret: minioadmin
6 | sm_bucket_name: minio
7 | sm_action: install
8 | sm_minio_image: quay.io/minio/minio
9 | sm_minio_client: quay.io/minio/mc
10 | sm_service_type: NodePort
11 | ...
12 |
--------------------------------------------------------------------------------
/roles/setup_minio/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Delete Minio Namespace"
3 | kubernetes.core.k8s:
4 | state: absent
5 | api: "v1"
6 | kind: "Namespace"
7 | name: "{{ sm_namespace }}"
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/setup_minio/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate that a storageclass is provided
3 | ansible.builtin.assert:
4 | that:
5 | - sm_storage_class is defined
6 | fail_msg: "A storage classname is required"
7 |
8 | - name: Create Minio deployment
9 | ansible.builtin.include_tasks: install.yml
10 | when:
11 | - sm_action == 'install'
12 |
13 | - name: Create Minio deployment
14 | ansible.builtin.include_tasks: cleanup.yml
15 | when:
16 | - sm_action == 'cleanup'
17 | ...
18 |
--------------------------------------------------------------------------------
/roles/setup_minio/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | deployment_name: minio
3 | service_name: minio-service
4 | pvc_name: minio-pvc-claim
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/setup_mirror_registry/README.md:
--------------------------------------------------------------------------------
1 | # setup_mirror_registry
2 |
3 | Deploys a local container registry
--------------------------------------------------------------------------------
/roles/setup_mirror_registry/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # tasks file for disconnected_registry
3 | - import_tasks: var_check.yml
4 | tags: create_registry
5 |
6 | - import_tasks: prerequisites.yml
7 | tags:
8 | - create_registry
9 |
10 | - import_tasks: set_mirror_cert.yml
11 |
12 | - import_tasks: setup_registry.yml
13 | tags: create_registry
14 |
15 | - import_tasks: retrieve_config.yml
16 | tags:
17 | - copy_config
18 |
--------------------------------------------------------------------------------
/roles/setup_mirror_registry/tasks/prerequisites.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Make sure needed packages are installed
3 | package:
4 | name: "{{ required_packages }}"
5 | state: present
6 | become: true
7 | tags:
8 | - create_registry
9 |
10 | - name: Check cert exists
11 | block:
12 | - name: Get cert stat
13 | stat:
14 | path: "{{ registry_dir_cert }}/{{ cert_file_prefix }}.crt"
15 | register: cert_file
16 | - name: "Fail: Certificate not found"
17 | fail:
18 | msg: "Cert file {{ registry_dir_cert }}/{{ cert_file_prefix }}.crt missing"
19 | when: not cert_file.stat.exists
20 |
21 | - name: Create config_file_path dir
22 | file:
23 | path: "{{ config_file_path }}"
24 | owner: "{{ file_owner }}"
25 | group: "{{ file_group }}"
26 | mode: "0775"
27 | state: directory
28 |
29 | - name: Copy pull_secret
30 | copy:
31 | src: "{{ local_pull_secret_path }}"
32 | dest: "{{ config_file_path }}/{{ pull_secret_file_name }}"
33 | mode: "0644"
34 |
--------------------------------------------------------------------------------
/roles/setup_mirror_registry/tasks/set_mirror_cert.yml:
--------------------------------------------------------------------------------
1 | - name: Get cert contents
2 | set_fact:
3 | mirror_certificate: "{{ lookup('file', fetched_dest + '/' + cert_file_prefix + '.crt') }}"
4 |
5 | - name: Populate mirror_certificate in bastion
6 | set_fact:
7 | mirror_certificate: "{{ mirror_certificate }}"
8 | delegate_to: "{{ item }}"
9 | delegate_facts: true
10 | loop: "{{ cert_targets }}"
11 |
--------------------------------------------------------------------------------
/roles/setup_mirror_registry/tasks/var_check.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check REGISTRY_HTTP_SECRET is set
3 | fail:
4 | msg: REGISTRY_HTTP_SECRET must be set and not empty
5 | when: (REGISTRY_HTTP_SECRET is not defined) or (REGISTRY_HTTP_SECRET == "")
6 |
7 | - name: Check disconnected_registry_user is set
8 | fail:
9 | msg: disconnected_registry_user must be set and not empty
10 | when: (disconnected_registry_user is not defined) or (disconnected_registry_user == "")
11 |
12 | - name: Check disconnected_registry_password is set
13 | fail:
14 | msg: disconnected_registry_password must be set and not empty
15 | when: (disconnected_registry_password is not defined) or (disconnected_registry_password == "")
16 |
--------------------------------------------------------------------------------
/roles/setup_netobserv_stack/tasks/cleanup.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Delete the Flow collector
3 | kubernetes.core.k8s:
4 | api_version: flows.netobserv.io/v1beta1
5 | kind: FlowCollector
6 | name: "cluster"
7 | state: absent
8 | wait: true
9 |
10 | - name: Delete the namespace netobserv
11 | kubernetes.core.k8s:
12 | state: absent
13 | api: "v1"
14 | kind: "Namespace"
15 | name: "netobserv"
16 | wait: true
17 | ...
18 |
--------------------------------------------------------------------------------
/roles/setup_netobserv_stack/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install the NetObservability stack
3 | when:
4 | - setup_netobserv_stack_action == "install"
5 | block:
6 | - name: Pre-run validations
7 | ansible.builtin.include_tasks: validation.yml
8 |
9 | - name: Set up the NetObservability stack
10 | ansible.builtin.include_tasks: setup.yml
11 |
12 | - name: Validate the NetObservability resources
13 | ansible.builtin.include_tasks: verify.yml
14 |
15 | - name: Cleanup NetObservability stack resources
16 | when:
17 | - setup_netobserv_stack_action == "cleanup"
18 | block:
19 | - name: Cleanup Logging stack resources
20 | ansible.builtin.include_tasks: cleanup.yml
21 | ...
22 |
--------------------------------------------------------------------------------
/roles/setup_netobserv_stack/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | pvcs:
3 | - { kind: 'PersistentVolumeClaim', name: 'storage-netobserv-loki-compactor-0', apiVer: 'v1' }
4 | - { kind: 'PersistentVolumeClaim', name: 'storage-netobserv-loki-index-gateway-0', apiVer: 'v1' }
5 | - { kind: 'PersistentVolumeClaim', name: 'storage-netobserv-loki-index-gateway-1', apiVer: 'v1' }
6 | - { kind: 'PersistentVolumeClaim', name: 'storage-netobserv-loki-ingester-0', apiVer: 'v1' }
7 | - { kind: 'PersistentVolumeClaim', name: 'storage-netobserv-loki-ingester-1', apiVer: 'v1' }
8 | - { kind: 'PersistentVolumeClaim', name: 'wal-netobserv-loki-ingester-0', apiVer: 'v1' }
9 | - { kind: 'PersistentVolumeClaim', name: 'wal-netobserv-loki-ingester-1', apiVer: 'v1' }
10 |
11 | # Minimal OCP version supported by the role
12 | ocp_supported: 4.12
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/setup_ntp/README.md:
--------------------------------------------------------------------------------
1 | # setup_ntp
2 |
3 | Deploys chrony
--------------------------------------------------------------------------------
/roles/setup_ntp/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | ntp_pool_servers:
3 | - 0.us.pool.ntp.org
4 | - 1.us.pool.ntp.org
5 | - 2.us.pool.ntp.org
6 | - 3.us.pool.ntp.org
7 |
8 | enable_logging: false
9 |
10 | ntp_server_allows: "{% if ntp_server_allow is defined %}{{ [ntp_server_allow] }}{% else %}{{ [] }}{% endif %}"
11 |
--------------------------------------------------------------------------------
/roles/setup_ntp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart chronyd
3 | ansible.builtin.service:
4 | name: chronyd
5 | state: restarted
6 | become: true
7 |
8 | - name: Start chronyd
9 | ansible.builtin.service:
10 | name: chronyd
11 | state: started
12 | enabled: true
13 | become: true
14 |
--------------------------------------------------------------------------------
/roles/setup_ntp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup Chrony
3 | become: true
4 | block:
5 | - name: Install Chrony
6 | ansible.builtin.package:
7 | name: chrony
8 | state: present
9 |
10 | - name: Configure chrony
11 | ansible.builtin.template:
12 | src: chrony.conf.j2
13 | dest: /etc/chrony.conf
14 | owner: root
15 | group: root
16 | mode: "0644"
17 | notify: Restart chronyd
18 |
19 | - name: Start chrony
20 | ansible.builtin.service:
21 | name: chronyd
22 | state: started
23 | enabled: true
24 |
25 | - name: Allow incoming ntp traffic
26 | ansible.posix.firewalld:
27 | zone: public
28 | service: ntp
29 | permanent: true
30 | state: enabled
31 | immediate: true
32 |
--------------------------------------------------------------------------------
/roles/setup_ntp/templates/chrony.conf.j2:
--------------------------------------------------------------------------------
1 | # {{ ansible_managed }}
2 | driftfile /var/lib/chrony/drift
3 | bindcmdaddress {{ ntp_server }}
4 | bindcmdaddress 127.0.0.1
5 | bindcmdaddress ::1
6 | keyfile /etc/chrony.keys
7 | local stratum 10
8 | rtcsync
9 | makestep 1.0 3
10 | manual
11 | {% if enable_logging %}
12 | logdir /var/log/chrony
13 | log measurements statistics tracking
14 | {% endif %}
15 |
16 | allow 127.0.0.1
17 | {% for allow_server in ntp_server_allows %}
18 | allow {{ allow_server }}
19 | {% endfor %}
20 |
21 | server 127.0.0.1
22 | {% for item in ntp_pool_servers %}
23 | server {{ item }}
24 | {% endfor %}
25 |
--------------------------------------------------------------------------------
/roles/setup_radvd/defaults/main.yaml:
--------------------------------------------------------------------------------
1 | # The interface name (usually a bridge) on which radvd will listen
2 | setup_radvd_baremetal_bridge: "baremetal"
3 |
4 | # The minimum time allowed between sending unsolicited multicast router advertisements from the interface, in seconds.
5 | setup_radvd_min_interval: 30
6 |
7 | # The maximum time allowed between sending unsolicited multicast router advertisements from the interface, in seconds.
8 | setup_radvd_max_interval: 100
9 |
10 | # The lifetime associated with the default router in units of seconds.
11 | # A lifetime of 0 indicates that the router is not a default router and should not appear on the default router list.
12 | setup_radvd_default_lifetime: 9000
13 |
--------------------------------------------------------------------------------
/roles/setup_radvd/files/sysctl.d/ipv6.conf:
--------------------------------------------------------------------------------
1 | net.ipv4.conf.all.rp_filter=0
2 | net.ipv6.conf.all.forwarding=1
3 | net.ipv6.conf.all.accept_ra=2
4 | net.ipv6.conf.lo.disable_ipv6=0
5 |
--------------------------------------------------------------------------------
/roles/setup_radvd/handlers/main.yaml:
--------------------------------------------------------------------------------
1 | - name: Restart radvd
2 | ansible.builtin.service:
3 | name: radvd
4 | state: restarted
5 | listen: restart_service
6 |
--------------------------------------------------------------------------------
/roles/setup_radvd/tasks/main.yaml:
--------------------------------------------------------------------------------
1 | - name: Perform validations
2 | ansible.builtin.include_tasks: pre-requisites.yaml
3 |
4 | - name: Install radvd
5 | ansible.builtin.package:
6 | name: radvd
7 | state: present
8 |
9 | - name: Create sysctl file for ipv6 settings
10 | ansible.builtin.copy:
11 | dest: /etc/sysctl.d/ipv6.conf
12 | src: sysctl.d/ipv6.conf
13 | mode: "0644"
14 | owner: root
15 | group: root
16 | notify: restart_service
17 |
18 | - name: Create radvd.conf file
19 | ansible.builtin.template:
20 | src: radvd.conf.j2
21 | dest: "/etc/radvd.conf"
22 | mode: "0644"
23 | notify: restart_service
24 |
25 | - name: Start radv daemon
26 | ansible.builtin.service:
27 | name: radvd
28 | state: started
29 | enabled: true
30 |
--------------------------------------------------------------------------------
/roles/setup_selfsigned_cert/README.md:
--------------------------------------------------------------------------------
1 | # setup_selfsigned_cert
2 |
3 | Generates self signed SSL certs
--------------------------------------------------------------------------------
/roles/setup_sushy_tools/README.md:
--------------------------------------------------------------------------------
1 | # setup_sushy_tools
2 |
3 | deploys virtual redfish for KVM
--------------------------------------------------------------------------------
/roles/setup_sushy_tools/templates/sushy-tools.service.j2:
--------------------------------------------------------------------------------
1 | [Unit]
2 | Description=Sushy Tools (Redfish Emulator for Libvirt)
3 | After=network.target syslog.target
4 |
5 | [Service]
6 | Type=simple
7 | TimeoutStartSec=5m
8 | WorkingDirectory={{ sushy_dir }}
9 | ExecStart={{ sushy_dir }}/bin/python3 {{ sushy_dir }}/bin/sushy-emulator --config {{ sushy_dir }}/sushy-emulator.conf
10 | Restart=always
11 |
12 | [Install]
13 | WantedBy=multi-user.target
14 |
--------------------------------------------------------------------------------
/roles/setup_tftp/README.md:
--------------------------------------------------------------------------------
1 | # setup_tftp
2 |
3 | Deploys a TFTP server
--------------------------------------------------------------------------------
/roles/setup_tftp/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Restart TFTP
3 | become: true
4 | ansible.builtin.service:
5 | name: tftp.socket
6 | state: restarted
7 |
8 | - name: Start TFTP
9 | become: true
10 | ansible.builtin.service:
11 | name: tftp.socket
12 | state: started
13 | enabled: true
14 |
15 |
--------------------------------------------------------------------------------
/roles/setup_tftp/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Setup TFTP
3 | become: true
4 | block:
5 | - name: Install TFTP
6 | ansible.builtin.package:
7 | name: tftp-server
8 | state: present
9 |
10 | - name: Start TFTP
11 | ansible.builtin.service:
12 | name: tftp.socket
13 | state: started
14 | enabled: true
15 |
16 | - name: Allow incoming tftp traffic
17 | ansible.posix.firewalld:
18 | zone: public
19 | service: tftp
20 | permanent: true
21 | state: enabled
22 | immediate: true
23 |
--------------------------------------------------------------------------------
/roles/setup_vm_host_network/README.md:
--------------------------------------------------------------------------------
1 | # setup_vm_host_network
2 |
3 | Configures the network for vm hosts
--------------------------------------------------------------------------------
/roles/setup_vm_host_network/defaults/main.yml:
--------------------------------------------------------------------------------
1 | vm_bridge_ip: "{{ machine_network_cidr | ansible.utils.ipaddr('next_usable') }}"
2 | vm_bridge_prefix: "{{ machine_network_cidr | ansible.utils.ipaddr('prefix') }}"
3 | vm_bridge_name: "{{ cluster_name }}-br"
4 | vm_bridge_port_name: "{{ vm_bridge_interface }}"
5 | vm_vlan_name: "{{ cluster_name }}.{{ vm_vlan_tag }}"
6 | vm_nmstate_config_path: "/tmp/{{ cluster_name }}_nmstate.yml"
7 |
--------------------------------------------------------------------------------
/roles/setup_vm_host_network/tasks/main.yml:
--------------------------------------------------------------------------------
1 | - name: Create network_config
2 | ansible.builtin.include_tasks: make_network_config.yml
3 | when: network_config is not defined
4 |
5 | - name: process network_config
6 | ansible.builtin.import_role:
7 | name: redhatci.ocp.process_nmstate
8 |
9 | - name: apply nmstate config
10 | ansible.builtin.import_role:
11 | name: redhatci.ocp.apply_nmstate
12 |
--------------------------------------------------------------------------------
/roles/sideload_kernel/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | sideload_kernel_namespace: default
3 | sideload_kernel_force: false
4 | sideload_kernel_job_timeout: 15
5 | sideload_kernel_base_image: ubi9
6 | k8s_auth: {}
7 |
--------------------------------------------------------------------------------
/roles/sno_installer/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for sno_installer
3 | cache_enabled: true
4 | si_cache_dir: "/opt/cache"
5 | webserver_caching_image: "quay.io/fedora/httpd-24:latest"
6 | webserver_caching_port_container: 8080
7 | webserver_caching_port: "{{ webserver_caching_port_container }}"
8 | url_passed: false
9 | tftp_dir: "/var/lib/tftpboot"
10 | dnsmasq_enabled: true
11 |
12 | si_cache_server: "{{ cache_provisioner | ternary(groups['provisioner'][0], groups['registry_host'][0]) }}"
13 | si_cache_server_major_version: "{{ ansible_distribution_major_version }}"
14 | si_cache_server_user_id: "{{ ansible_user_id }}"
15 | si_cache_server_user_gid: "{{ ansible_user_gid }}"
16 | si_cache_server_user_dir: "{{ ansible_user_dir }}"
17 |
--------------------------------------------------------------------------------
/roles/sno_installer/tasks/40_create_manifest.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create OpenShift Manifest
3 | ansible.builtin.command: |
4 | {{ ocp_binary_path }}/openshift-install --dir {{ dir }} create manifests
5 | tags: manifests
6 |
7 | - name: Ensure the manifests dir is owned by {{ ansible_user }}
8 | ansible.builtin.file:
9 | path: "{{ item }}"
10 | state: directory
11 | recurse: true
12 | owner: "{{ ansible_user }}"
13 | group: "{{ ansible_user }}"
14 | mode: '0755'
15 | with_items:
16 | - "{{ dir }}/openshift"
17 | - "{{ dir }}/manifests"
18 | tags: manifests
19 |
--------------------------------------------------------------------------------
/roles/sno_installer/tasks/56_create_grubpxe.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Generate grub.cfg file for SNO node"
3 | ansible.builtin.template:
4 | src: grub.cfg.j2
5 | dest: "{{ tftp_dir }}/grub.cfg-01-{{ hostvars[groups['masters'][0]]['baremetal_mac'] | replace(':', '-') }}"
6 | owner: "root"
7 | group: "root"
8 | mode: "0644"
9 | setype: tftpdir_rw_t
10 | become: true
11 | delegate_to: "{{ tftp_server }}"
12 | tags:
13 | - tftp
14 | - rhcos_grub
15 | ...
16 |
--------------------------------------------------------------------------------
/roles/sno_installer/templates/chrony.conf.j2:
--------------------------------------------------------------------------------
1 | # This file is managed by the machine config operator
2 | {% for server in clock_servers %}
3 | server {{ server }} iburst
4 | {% endfor %}
5 | stratumweight 0
6 | driftfile /var/lib/chrony/drift
7 | rtcsync
8 | makestep 10 3
9 | bindcmdaddress 127.0.0.1
10 | bindcmdaddress ::1
11 | keyfile /etc/chrony.keys
12 | commandkey 1
13 | generatecommandkey
14 | noclientlog
15 | logchange 0.5
16 | logdir /var/log/chrony
17 |
--------------------------------------------------------------------------------
/roles/sno_installer/templates/etc-chrony.conf.j2:
--------------------------------------------------------------------------------
1 | apiVersion: machineconfiguration.openshift.io/v1
2 | kind: MachineConfig
3 | metadata:
4 | labels:
5 | machineconfiguration.openshift.io/role: {{ item }}
6 | name: 98-{{ item }}-etc-chrony-conf
7 | spec:
8 | config:
9 | ignition:
10 | config: {}
11 | security:
12 | tls: {}
13 | timeouts: {}
14 | version: 3.1.0
15 | networkd: {}
16 | passwd: {}
17 | storage:
18 | files:
19 | - contents:
20 | source: data:text/plain;charset=utf-8;base64,{{ chronyconfig }}
21 | group:
22 | name: root
23 | mode: 420
24 | overwrite: true
25 | path: /etc/chrony.conf
26 | user:
27 | name: root
28 | osImageURL: ""
29 |
--------------------------------------------------------------------------------
/roles/sno_installer/templates/grub.cfg.j2:
--------------------------------------------------------------------------------
1 | set default="1"
2 |
3 | function load_video {
4 | insmod efi_gop
5 | insmod efi_uga
6 | insmod video_bochs
7 | insmod video_cirrus
8 | insmod all_video
9 | }
10 |
11 | load_video
12 | set gfxpayload=keep
13 | insmod gzio
14 | insmod part_gpt
15 | insmod ext2
16 |
17 | set timeout=5
18 | ### END /etc/grub.d/00_header ###
19 |
20 | ### BEGIN /etc/grub.d/10_linux ###
21 | menuentry 'RHEL CoreOS (Live)' --class fedora --class gnu-linux --class gnu --class os {
22 | linux {{ coreos_pxe_kernel_path }} random.trust_cpu=on ignition.config.url={{ coreos_sno_ignition_url }} coreos.live.rootfs_url={{ coreos_pxe_rootfs_url }} ignition.firstboot ignition.platform.id=metal
23 | initrd {{ coreos_pxe_initramfs_path }}
24 | }
25 |
--------------------------------------------------------------------------------
/roles/sno_installer/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for sno_installer
3 | pullsecret_file: "{{ si_cache_dir }}/pull-secret.txt"
4 | default_libvirt_pool_dir: "/var/lib/libvirt/images"
5 | force_mirroring: false
6 | ocp_binary_path: "/usr/local/bin"
7 |
8 | snp_cache_dir: "{{ si_cache_dir }}"
9 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # defaults file for sno_node_prep
3 | network_type: "OVNKubernetes"
4 | firewall: "firewalld"
5 | ipv4_enabled: true
6 | ipv6_enabled: false
7 | no_proxy_list: ""
8 | http_proxy: ""
9 | https_proxy: ""
10 | ipv4_baremetal: false
11 | ipv4_provisioning: false
12 | ipv6_baremetal: false
13 | dualstack_baremetal: false
14 | tftp_dir: "/var/lib/tftpboot"
15 | dnsmasq_enabled: true
16 | webserver_url: ""
17 | registry_host_exists: false
18 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/30_req_packages.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Install required packages"
3 | ansible.builtin.yum:
4 | name: "{{ default_pkg_list }}"
5 | state: present
6 | update_cache: true
7 | disable_gpg_check: true
8 | become: true
9 | tags: packages
10 |
11 | - name: "Install specific packages for SNO virtual"
12 | ansible.builtin.yum:
13 | name: "{{ snovm_pkg_list }}"
14 | state: present
15 | disable_gpg_check: true
16 | become: true
17 | when:
18 | - (sno_install_type is undefined) or (sno_install_type == "virtual")
19 | tags: packages
20 | ...
21 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/50_modify_sudo_user.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add ansible user user to libvirt and qemu group and get ssh key setup
3 | ansible.builtin.user:
4 | name: "{{ ansible_user }}"
5 | groups: libvirt,qemu
6 | append: true
7 | state: present
8 | generate_ssh_key: true
9 | become: true
10 | tags:
11 | - user
12 |
13 | - name: Grant ansible user user execute access to ansible user dir
14 | ansible.posix.acl:
15 | path: "{{ ansible_user_dir }}"
16 | entity: qemu
17 | etype: user
18 | permissions: x
19 | state: present
20 | tags:
21 | - user
22 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/60_enabled_services.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Enable and restart Services"
3 | ansible.builtin.service:
4 | name: "{{ item }}"
5 | state: restarted
6 | enabled: true
7 | become: true
8 | with_items:
9 | - libvirtd
10 | when:
11 | - sno_install_type | default("virtual") == "virtual"
12 | tags: services
13 |
14 | - name: "Enable Services (iptables)"
15 | ansible.builtin.service:
16 | name: "{{ item }}"
17 | state: restarted
18 | enabled: true
19 | become: true
20 | with_items:
21 | - "{{ firewall }}"
22 | when: firewall == "iptables"
23 | tags: services
24 |
25 | - name: "Enable Services (firewalld)"
26 | ansible.builtin.service:
27 | name: "{{ item }}"
28 | state: started
29 | enabled: true
30 | become: true
31 | with_items:
32 | - "{{ firewall }}"
33 | when: firewall != "iptables"
34 | tags: services
35 | ...
36 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/70_cleanup_libvirt_network.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Does network exist
3 | community.libvirt.virt_net:
4 | name: "{{ network['name'] }}"
5 | command: status
6 | register: network_exists
7 | become: true
8 | ignore_errors: true
9 | tags:
10 | - cleanup
11 |
12 | - name: Stop SNO network
13 | community.libvirt.virt_net:
14 | command: destroy
15 | name: "{{ network['name'] }}"
16 | become: true
17 | when:
18 | - not network_exists.failed
19 | tags:
20 | - cleanup
21 |
22 | - name: Undefine SNO network
23 | community.libvirt.virt_net:
24 | command: undefine
25 | name: "{{ network['name'] }}"
26 | become: true
27 | when:
28 | - not network_exists.failed
29 | tags:
30 | - cleanup
31 | ...
32 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/72_cleanup_files.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Remove Kubeconfig from Ansible User .bashrc
3 | ansible.builtin.lineinfile:
4 | path: "{{ ansible_user_dir }}/.bashrc"
5 | state: absent
6 | regex: "^export KUBECONFIG={{ dir }}/auth/kubeconfig"
7 | tags:
8 | - cleanup
9 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/80_libvirt_storage.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Define, Start, Autostart Storage Pool
3 | become: true
4 | tags:
5 | - storagepool
6 | block:
7 | - name: Define Storage Pool for default
8 | community.libvirt.virt_pool:
9 | command: define
10 | name: "{{ vm_pool_name }}"
11 | xml: '{{ lookup("template", "dir.xml.j2") }}'
12 |
13 | - name: Start Storage Pool for default
14 | community.libvirt.virt_pool:
15 | state: active
16 | name: "{{ vm_pool_name }}"
17 |
18 | - name: Autostart Storage Pool for default
19 | community.libvirt.virt_pool:
20 | autostart: true
21 | name: "{{ vm_pool_name }}"
22 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/85_libvirt_network.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Define, Start, Libvirt Network
3 | become: true
4 | tags: virtnetwork
5 | block:
6 | - name: Define network
7 | community.libvirt.virt_net:
8 | command: define
9 | name: "{{ network['name'] }}"
10 | xml: '{{ lookup("template", "network.xml.j2") }}'
11 |
12 | - name: Start network
13 | community.libvirt.virt_net:
14 | state: active
15 | name: "{{ network['name'] }}"
16 |
17 | - name: Autostart network
18 | community.libvirt.virt_net:
19 | autostart: true
20 | name: "{{ network['name'] }}"
21 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/88_etc_hosts.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Add DNS /etc/hosts entries (Fedora)
3 | ansible.builtin.lineinfile:
4 | path: /etc/hosts
5 | line: "{{ sno_extnet_ip }} api.{{ cluster }}.{{ domain }}"
6 | become: true
7 | tags:
8 | - dnsredirect
9 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/90_create_config_install_dirs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Clear config dir (if any, in case this is a re-run)
3 | ansible.builtin.file:
4 | path: "{{ item }}"
5 | state: absent
6 | with_items:
7 | - "{{ dir }}"
8 | tags:
9 | - cleanup
10 |
11 | - name: Create config dir
12 | ansible.builtin.file:
13 | path: "{{ item }}"
14 | state: directory
15 | owner: "{{ ansible_user }}"
16 | group: "{{ ansible_user }}"
17 | mode: '0755'
18 | with_items:
19 | - "{{ dir }}"
20 | tags:
21 | - clusterconfigs
22 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/tasks/95_check_pull_secret.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check that pull-secret is available
3 | ansible.builtin.copy:
4 | src: "{{ sno_pullsecret_file }}"
5 | dest: "{{ pullsecret_file }}"
6 | mode: "0644"
7 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/templates/dir.xml.j2:
--------------------------------------------------------------------------------
1 |
2 | default
3 |
4 | {{ default_libvirt_pool_dir }}
5 |
6 |
7 |
--------------------------------------------------------------------------------
/roles/sno_node_prep/vars/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # vars file for sno_node_prep
3 | default_pkg_list:
4 | - "{{ firewall }}"
5 | - jq
6 | - python3-devel
7 | - python3-libvirt
8 | - python3-lxml
9 | - python3-yaml
10 | - python3-netaddr
11 | - python3-jmespath
12 | - libsemanage-python3
13 | - policycoreutils-python3
14 | - podman
15 | - tar
16 | - ipmitool
17 |
18 | snovm_pkg_list:
19 | - libvirt
20 | - qemu-kvm
21 | - virt-install
22 |
23 | snobm_pkg_list:
24 | - grub2-efi-x64
25 | - shim-x64
26 | - dnsmasq
27 |
28 | pullsecret_file: "{{ snp_cache_dir }}/pull-secret.txt"
29 |
--------------------------------------------------------------------------------
/roles/sos_report/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | sos_report_dir: "/tmp"
3 | sos_report_image: "registry.redhat.io/rhel9/support-tools"
4 | sos_report_oc_path: "/usr/local/bin/oc"
5 | sos_report_options: "-k crio.all=on -k crio.logs=on -k podman.all=on -k podman.logs=on"
6 |
--------------------------------------------------------------------------------
/roles/sos_report/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validation for sos report
3 | ansible.builtin.assert:
4 | that:
5 | - sos_report_nodes is defined
6 | - sos_report_nodes | length
7 |
8 | - name: Generate SOS reports
9 | ansible.builtin.include_tasks: sos-reports.yml
10 |
--------------------------------------------------------------------------------
/roles/sriov_config/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | sriov_config_retries_per_node: 60
3 | sriov_config_delay_per_node: 20
4 | sriov_config_wait_node_policy: true
5 | sriov_config_wait_network: true
6 |
--------------------------------------------------------------------------------
/roles/sriov_config/tasks/check_sriov_network.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Wait until NetworkAttachmentDefinition is created from SriovNetwork {{ sriov_conf.network.name }}
3 | kubernetes.core.k8s_info:
4 | api_version: k8s.cni.cncf.io/v1
5 | name: "{{ sriov_conf.network.name }}"
6 | namespace: "{{ sriov_conf.network.network_namespace | default('default') }}"
7 | kind: NetworkAttachmentDefinition
8 | register: net_attach_def_check
9 | retries: 6
10 | delay: 10
11 | until: net_attach_def_check.resources|length == 1
12 | no_log: true
13 |
--------------------------------------------------------------------------------
/roles/sriov_config/tasks/create_networks.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create SriovNetwork
3 | kubernetes.core.k8s:
4 | definition: "{{ lookup('template', 'templates/sriov-network.yml.j2') }}"
5 | loop: "{{ sriov_network_configs }}"
6 | loop_control:
7 | loop_var: sriov
8 | label: "{{ sriov.resource }}"
9 | when: sriov.network is defined
10 |
11 | - name: Check for SRIOV Network
12 | ansible.builtin.include_tasks: check_sriov_network.yml
13 | loop: "{{ sriov_network_configs }}"
14 | loop_control:
15 | loop_var: sriov_conf
16 | label: "{{ sriov_conf.resource }}"
17 | when:
18 | - sriov_conf.network is defined
19 | - sriov_config_wait_network | bool
20 |
--------------------------------------------------------------------------------
/roles/sriov_config/tasks/create_node_policies.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # OCPBUGS-32139 workaround suggests to retry the creation of the SRIOV policy if failing.
3 | # Trying this during 1 minute in case any of them fails.
4 | - name: Create SriovNetworkNodePolicy
5 | kubernetes.core.k8s:
6 | definition: "{{ lookup('template', 'templates/sriov-network-node-policy.yml.j2') }}"
7 | loop: "{{ sriov_network_configs }}"
8 | loop_control:
9 | loop_var: sriov
10 | label: "{{ sriov.resource }}"
11 | retries: 6
12 | delay: 10
13 | register: _sc_node_policy_retry
14 | until: _sc_node_policy_retry.error is not defined
15 | when: sriov.node_policy is defined
16 |
17 | - name: Check for SRIOV Node Policy
18 | ansible.builtin.include_tasks: check_sriov_node_policy.yml
19 | loop: "{{ sriov_network_configs }}"
20 | loop_control:
21 | loop_var: sriov_conf
22 | label: "{{ sriov_conf.resource }}"
23 | when:
24 | - sriov_conf.node_policy is defined
25 | - sriov_config_wait_node_policy | bool
26 |
--------------------------------------------------------------------------------
/roles/sriov_config/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Validate SR-IOV requirements
3 | ansible.builtin.include_tasks: validation.yml
4 |
5 | - name: Create SriovNetworkNodePolicies
6 | ansible.builtin.include_tasks: create_node_policies.yml
7 | when: sriov_network_configs | json_query('[*].node_policy') | select('defined') | list | length
8 |
9 | - name: Create SriovNetworks
10 | ansible.builtin.include_tasks: create_networks.yml
11 | when: sriov_network_configs | json_query('[*].network') | select('defined') | list | length
12 |
--------------------------------------------------------------------------------
/roles/storage_tester/tasks/teardown.yml:
--------------------------------------------------------------------------------
1 | - name: "Delete PVC used for upgrade tests"
2 | kubernetes.core.k8s:
3 | api_version: v1
4 | kind: PersistentVolumeClaim
5 | name: "{{ pvc_to_be_deleted }}"
6 | namespace: storage-tester
7 | state: absent
8 | wait: true
9 | loop:
10 | - storage-upgrade-tester-rwo
11 | - storage-upgrade-tester-rwx
12 | - storage-upgrade-tester-rox
13 | loop_control:
14 | loop_var: pvc_to_be_deleted
15 |
16 | - name: "Delete storage-tester Namespace"
17 | kubernetes.core.k8s:
18 | api_version: v1
19 | kind: Namespace
20 | name: storage-tester
21 | state: absent
22 | wait: true
23 |
--------------------------------------------------------------------------------
/roles/storage_tester/templates/tester-init-pv-job.yaml.j2:
--------------------------------------------------------------------------------
1 | apiVersion: batch/v1
2 | kind: Job
3 | metadata:
4 | name: init-pv
5 | namespace: storage-tester
6 | spec:
7 | parallelism: 1
8 | completions: 1
9 | activeDeadlineSeconds: 360
10 | backoffLimit: 6
11 | template:
12 | metadata:
13 | name: init-pv
14 | spec:
15 | containers:
16 | - name: init-pv
17 | {% if dci_disconnected | default(false) %}
18 | image: "{{ dci_local_registry }}/rhel8/support-tools"
19 | {% else %}
20 | image: "registry.redhat.io/rhel8/support-tools"
21 | {% endif %}
22 | command: ["bin/sh"]
23 | args: ["-c", "echo \"Read from shared volume!\" >> /data-tester/yes.txt"]
24 | volumeMounts:
25 | - name: volume-to-be-initialzed
26 | mountPath: /data-tester
27 | volumes:
28 | - name: volume-to-be-initialzed
29 | persistentVolumeClaim:
30 | claimName: storage-upgrade-init-rox
31 | restartPolicy: OnFailure
--------------------------------------------------------------------------------
/roles/upi_installer/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/upi_installer/README.md
--------------------------------------------------------------------------------
/roles/upi_installer/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | serverport: 8000
3 | timeout: 600
4 | webserver_url: ""
5 | cache_dir: "/opt/cache"
6 | upi_pullsecret: "{{ cache_dir }}/pull-secret.txt"
7 | provision_cache_store: "{{ cache_dir }}"
8 | force_mirroring: false
9 | ocp_binary_path: "/usr/local/bin"
10 | ...
11 |
--------------------------------------------------------------------------------
/roles/upi_installer/handlers/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: "Delete tmp file"
3 | ansible.builtin.file:
4 | path: "{{ upi_creds_file.path }}"
5 | state: absent
6 | when:
7 | - upi_creds_file is defined
8 | ...
9 |
--------------------------------------------------------------------------------
/roles/upi_installer/tasks/05_create_config_install_dirs.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Create clusterconfigs dir
3 | ansible.builtin.file:
4 | path: "{{ dir }}"
5 | state: directory
6 | owner: "{{ ansible_user }}"
7 | group: "{{ ansible_user }}"
8 | mode: '0755'
9 | tags: clusterconfigs
10 |
--------------------------------------------------------------------------------
/roles/upi_installer/tasks/25_read_ssh_key.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Verify that SSH key for ansible_user exists
3 | ansible.builtin.stat:
4 | path: "{{ ansible_user_dir }}/.ssh/id_rsa.pub"
5 | register: sshkeypath
6 | tags: sshconfig
7 |
8 | - name: Get the contents of the ssh key for ansible_user
9 | ansible.builtin.slurp:
10 | src: "{{ ansible_user_dir }}/.ssh/id_rsa.pub"
11 | register: sshkey
12 | when: sshkeypath.stat.exists | bool
13 | tags: sshconfig
14 |
15 | - name: Set Fact for the ssh key of ansible_user
16 | ansible.builtin.set_fact:
17 | key: "{{ sshkey['content'] | b64decode }}"
18 | tags: sshconfig
19 |
--------------------------------------------------------------------------------
/roles/validate_dns_records/README.md:
--------------------------------------------------------------------------------
1 | # validate_dns_records
2 |
3 | Checks for the required dns entries for ingress and API VIPs
--------------------------------------------------------------------------------
/roles/validate_dns_records/defaults/main.yml:
--------------------------------------------------------------------------------
1 | required_domains:
2 | "api": "api.{{ domain }}"
3 | "api-int": "api-int.{{ domain }}"
4 | "apps": "*.apps.{{ domain }}"
5 |
6 | expected_answers:
7 | "api": "{{ api_vip }}"
8 | "api-int": "{{ api_vip }}"
9 | "apps": "{{ ingress_vip }}"
10 |
11 | required_binary: dig
12 | required_binary_provided_in_package: bind-utils
13 | domain: "{{ cluster_name }}.{{ base_dns_domain }}"
14 |
--------------------------------------------------------------------------------
/roles/validate_dns_records/tasks/check.yml:
--------------------------------------------------------------------------------
1 | - name: Check required domain {item} exists
2 | ansible.builtin.command:
3 | cmd: "{{ required_binary }} {{ item.value }} A {{ item.value }} AAAA +short"
4 | register: res
5 | changed_when: false
6 |
7 | - name: Check stdout for expected IP address
8 | ansible.builtin.set_fact:
9 | failed_domains: "{{ (failed_domains | default({})) | combine(
10 | {item.value: {
11 | 'stdout': res.stdout,
12 | 'stderr': res.stderr,
13 | 'expected': expected_answers[item.key],
14 | }}
15 | ) }}"
16 | when: expected_answers[item.key] not in res.stdout
17 |
--------------------------------------------------------------------------------
/roles/validate_http_store/README.md:
--------------------------------------------------------------------------------
1 | # validate_http_store
2 |
3 | Checks via a round trip that HTTP store is functional
--------------------------------------------------------------------------------
/roles/validate_http_store/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | http_store_dir : "{{ iso_download_dest_path | default('/opt/http_store/data') }}"
3 | http_port: 80
4 | http_host: "{{ discovery_iso_server | default('http://' + hostvars['http_store']['ansible_host']) }}:{{ http_port }}"
5 | ...
6 |
--------------------------------------------------------------------------------
/roles/validate_http_store/templates/test_file.j2:
--------------------------------------------------------------------------------
1 | {{ 99999999 | random | to_uuid }}
2 |
--------------------------------------------------------------------------------
/roles/vbmc/README.md:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/redhatci/ansible-collection-redhatci-ocp/9ad485a10e0a4db164e516643d75b5da6d718b60/roles/vbmc/README.md
--------------------------------------------------------------------------------
/roles/vbmc/README.rst:
--------------------------------------------------------------------------------
1 | Virtualbmc role
2 | ---------------
3 |
4 | This roles installs python-virtualbmc package and configures it for VMs hosted
5 | on the hypervisor. Should be run on the hypervisor host.
6 |
7 | Usage examples
8 | ==============
9 |
10 | 1. Run default vbmc configuration::
11 |
12 | - name: Configure vbmc
13 | hosts: localhost
14 | any_errors_fatal: true
15 | tasks:
16 | - ansible.builtin.include_role:
17 | name: redhatci.ocp.vbmc
18 | vars:
19 | vbmc_nodes: "{{ groups.get('master', []) }}"
20 |
--------------------------------------------------------------------------------
/roles/vbmc/defaults/main.yml:
--------------------------------------------------------------------------------
1 | # choice from hypervisor or undercloud
2 | vbmc_user: "root"
3 | vbmc_pass: "password"
4 | vbmc_home: "/root"
5 | vbmc_host: "hypervisor"
6 | vbmc_start_port: 6230
7 | action: install
8 | vbmc_virtualenv: "/root/.virtualenvs/vbmc"
9 | vbmc_config_dir: "{{ vbmc_home }}/.vbmc"
10 | vbmc_systemd_unit: "/etc/systemd/system/virtualbmc.service"
11 | vbmc_version: '1.4.0'
12 | vbmc_bin: "{{ vbmc_virtualenv }}/bin/vbmc"
13 | vbmcd_bin: "{{ vbmc_virtualenv }}/bin/vbmcd"
14 | zone: "libvirt"
15 | vbmc_ipmi_nodes: ipmi_nodes.json
16 |
--------------------------------------------------------------------------------
/roles/vbmc/tasks/check.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Get vbmc node state
3 | ansible.builtin.shell: |
4 | set -eo pipefail
5 | vbmc list | awk '/{{ item }}/ {print $4,$8}'
6 | register: vbmc_status
7 | with_items: "{{ vbmc_nodes }}"
8 |
9 | - name: Check and run vbmc node if it is down
10 | ansible.builtin.command: "vbmc start {{ item.item }}"
11 | with_items: "{{ vbmc_status.results }}"
12 | when: item.stdout.split()[0] == 'down'
13 |
14 | - name: Create dictionary with vbmc port
15 | ansible.builtin.set_fact:
16 | vbmc_ports: "{{ vbmc_ports | default({}) | combine({item.item: item.stdout.split()[1]}) }}"
17 | with_items: "{{ vbmc_status.results }}"
18 |
19 | - name: Include firewalld rules check
20 | ansible.builtin.include_tasks: firewalld.yml
21 |
--------------------------------------------------------------------------------
/roles/vbmc/tasks/firewalld.yml:
--------------------------------------------------------------------------------
1 | - name: Install firewalld
2 | become: true
3 | ansible.builtin.package:
4 | name:
5 | - firewalld
6 | state: installed
7 |
8 | - name: Set firewall zone to public
9 | ansible.builtin.set_fact:
10 | zone: public
11 | when: ansible_distribution_version|int < 8
12 |
13 | - name: Allow access to port vbmc from baremetal network
14 | ignore_errors: true
15 | become: true
16 | ansible.posix.firewalld:
17 | zone: "{{ zone }}"
18 | port: "{{ item.value }}/udp"
19 | permanent: true
20 | state: enabled
21 | immediate: true
22 | with_dict:
23 | - "{{ vbmc_ports }}"
24 |
--------------------------------------------------------------------------------
/roles/vbmc/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install hook
3 | when: hook_action == 'install'
4 | become: true
5 | block:
6 | - name: Install
7 | ansible.builtin.include_tasks: install.yml
8 | - name: Configure
9 | ansible.builtin.include_tasks: configure.yml
10 | - name: Register vars for instackenv template
11 | ansible.builtin.set_fact:
12 | vbmc_ports: "{{ vbmc_ports }}"
13 | vbmc_management_address: "{{ vbmc_management_address }}"
14 |
15 | - name: Remove/cleanup hook
16 | become: true
17 | block:
18 | - name: Check
19 | ansible.builtin.include_tasks: check.yml
20 | when: hook_action == 'check'
21 | - name: Cleanup
22 | ansible.builtin.include_tasks: cleanup.yml
23 | when: hook_action == 'cleanup'
24 | - name: Remove
25 | ansible.builtin.include_tasks: remove.yml
26 | when: hook_action == 'remove'
27 |
--------------------------------------------------------------------------------
/roles/vbmc/tasks/remove.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Check for any vbmc processes
3 | ansible.builtin.command: pgrep vbmc
4 | register: vbmc_proc
5 | failed_when: false
6 | changed_when: false
7 |
8 | - name: Remove existing Virtual BMCs
9 | ansible.builtin.shell: |
10 | {{ vbmc_bin }} stop {{ hostvars[vbmc_node].original_name | default(vbmc_node) }}
11 | {{ vbmc_bin }} delete {{ hostvars[vbmc_node].original_name | default(vbmc_node) }}
12 | failed_when: false
13 | when:
14 | - vbmc_proc.rc == 0
15 | - vbmc_node is defined
16 |
--------------------------------------------------------------------------------
/roles/vbmc/tasks/start_node.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Start each node using vbmc
3 | block:
4 | - name: Trying to start the node
5 | ansible.builtin.command: "{{ vbmc_bin }} start {{ node.key }}"
6 | register: command_result
7 | until: command_result.rc == 0
8 | retries: 5
9 | delay: 5
10 | rescue:
11 | - name: Check log message
12 | ansible.builtin.debug:
13 | msg: "{{ command_result.stderr }}"
14 | - name: Fail if the BMC instance is not running yet # noqa: no-jinja-when
15 | ansible.builtin.fail:
16 | msg: Fail if the BMC instance is not running yet
17 | when: not "BMC instance {{ node.key }} already running" in command_result.stderr
18 |
--------------------------------------------------------------------------------
/roles/vbmc/templates/nodes_dict.json.j2:
--------------------------------------------------------------------------------
1 | { {% for host_string in nodes_info.results|map(attribute='stdout')|list %}{% set host_data = host_string|from_yaml %}"{{ host_data.name }}": {"ipmi_user": "{{ vbmc_user }}", "ipmi_pass": "{{ vbmc_pass }}", "ipmi_address": "{{ vbmc_management_address }}","ipmi_port":"{{ vbmc_ports[host_data.name] }}","mac_address": "{{ host_data.mac }}"{% if host_data.hint_serial | length %},"root_device_hint": "serialNumber","root_device_hint_value": "{{ host_data.hint_serial }}"{% endif %}}{% if not loop.last %},{% endif %}{% endfor %} }
2 |
--------------------------------------------------------------------------------
/roles/vendors/dell/README.md:
--------------------------------------------------------------------------------
1 | # dell
2 |
3 | Boots a Dell iDRAC machine to ISO or disk via redfish
4 |
--------------------------------------------------------------------------------
/roles/vendors/dell/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
4 | bmc_user: "{{ hostvars[target_host]['bmc_user'] }}"
5 | bmc_password: "{{ hostvars[target_host]['bmc_password'] }}"
6 |
--------------------------------------------------------------------------------
/roles/vendors/dell/tasks/disk.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Discovery iDRAC versions for Dell hardware
3 | containers.podman.podman_container:
4 | name: "{{ bmc_address }}-rac-version"
5 | network: host
6 | image: quay.io/dphillip/racadm-image
7 | state: started
8 | detach: false
9 | rm: true
10 | command:
11 | [
12 | "-v",
13 | "-r", "{{ bmc_address }}",
14 | "-u", "{{ bmc_user }}",
15 | "-p", "{{ bmc_password }}",
16 | "-i",
17 | "{{ boot_iso_url }}",
18 | ]
19 | register: drac_version
20 |
21 | - name: "Using iDrac ISO method | Found iDrac {{ drac_version.stdout }}"
22 | ansible.builtin.fail:
23 | msg: "Not implemented"
24 | when: drac_version.stdout | int <= 13
25 |
26 | - name: "Using RedFish ISO method | Found iDrac {{ drac_version.stdout }}"
27 | ansible.builtin.include_tasks: ./disk_redfish.yml
28 | when: drac_version.stdout | int > 13
29 |
--------------------------------------------------------------------------------
/roles/vendors/dell/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "Dell vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/dell/tasks/iso.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Discovery iDRAC versions for Dell hardware
3 | containers.podman.podman_container:
4 | name: "{{ bmc_address }}-rac-version"
5 | network: host
6 | image: quay.io/dphillip/racadm-image
7 | state: started
8 | detach: false
9 | rm: true
10 | command:
11 | [
12 | "-v",
13 | "-r", "{{ bmc_address }}",
14 | "-u", "{{ bmc_user }}",
15 | "-p", "{{ bmc_password }}",
16 | "-i",
17 | "{{ boot_iso_url }}",
18 | ]
19 | register: drac_version
20 |
21 | - name: Using iDRAC ISO method for 13G and below
22 | ansible.builtin.include_tasks: ./iso_idrac.yml
23 | when: drac_version.stdout | int <= 13
24 |
25 | - name: Using iDRAC ISO method for 13G and below
26 | ansible.builtin.include_tasks: ./iso_redfish.yml
27 | when: drac_version.stdout | int > 13
28 |
--------------------------------------------------------------------------------
/roles/vendors/dell/tasks/iso_idrac.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Mount Live ISO, Boot into Live ISO (Dell 13G iDRAC8 and below)
3 | block:
4 | - name: Racadm container to mount and boot to discovery ISO
5 | containers.podman.podman_container:
6 | name: "{{ bmc_address }}-rac-image"
7 | network: host
8 | image: quay.io/dphillip/racadm-image
9 | state: started
10 | rm: true
11 | command: [
12 | "-r", "{{ bmc_address }}",
13 | "-u", "{{ bmc_user }}",
14 | "-p", "{{ bmc_password }}",
15 | "-i", "{{ boot_iso_url }}"
16 | ]
17 |
--------------------------------------------------------------------------------
/roles/vendors/hpe/README.md:
--------------------------------------------------------------------------------
1 | # hpe
2 |
3 | Boots a HPE iLO machine to ISO or disk via redfish
--------------------------------------------------------------------------------
/roles/vendors/hpe/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
4 | bmc_user: "{{ hostvars[target_host]['bmc_user'] }}"
5 | bmc_password: "{{ hostvars[target_host]['bmc_password'] }}"
6 |
--------------------------------------------------------------------------------
/roles/vendors/hpe/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "HPE vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/kvm/README.md:
--------------------------------------------------------------------------------
1 | # kvm
2 |
3 | Boots a VM to ISO or disk via redfish (sushy tools)
--------------------------------------------------------------------------------
/roles/vendors/kvm/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | secure_sushy_tools: "{{ secure | default(true) }}"
4 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
5 | base_bmc_address: "{{ secure_sushy_tools | bool | ternary('https', 'http') }}://{{ bmc_address }}"
6 | bmc_user: "{{ hostvars[target_host]['bmc_user'] | default(omit) }}"
7 | bmc_password: "{{ hostvars[target_host]['bmc_password'] | default(omit) }}"
8 | vm_node_prefix: "{{ cluster_name }}_"
9 | vm_name: "{{ target_host.startswith(vm_node_prefix) | ternary(target_host, (vm_node_prefix + target_host)) }}"
10 |
--------------------------------------------------------------------------------
/roles/vendors/kvm/tasks/eject.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: KVM Eject Virtual Media (if any) {{ target_host }}
3 | ansible.builtin.uri:
4 | url: "{{ system_manager_url }}/VirtualMedia/Cd/Actions/VirtualMedia.EjectMedia"
5 | user: "{{ bmc_user }}"
6 | password: "{{ bmc_password }}"
7 | method: POST
8 | body_format: json
9 | body: {}
10 | status_code: [200, 204]
11 | validate_certs: false
12 | return_content: true
13 | register: redfish_reply
14 | ignore_errors: true
15 |
16 | - name: Debug
17 | ansible.builtin.debug:
18 | var: redfish_reply
19 | verbosity: 1
20 | ...
21 |
--------------------------------------------------------------------------------
/roles/vendors/kvm/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "KVM vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/lenovo/README.md:
--------------------------------------------------------------------------------
1 | # lenovo
2 |
3 | Boots a Lenovo machine to ISO or disk via redfish
--------------------------------------------------------------------------------
/roles/vendors/lenovo/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
4 | bmc_user: "{{ hostvars[target_host]['bmc_user'] }}"
5 | bmc_password: "{{ hostvars[target_host]['bmc_password'] }}"
6 |
--------------------------------------------------------------------------------
/roles/vendors/lenovo/tasks/eject.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Lenovo Eject Virtual Media {{ target_host }}
3 | ansible.builtin.uri:
4 | url: "https://{{ bmc_address }}/redfish/v1/Managers/1/VirtualMedia/EXT1"
5 | user: "{{ bmc_user }}"
6 | password: "{{ bmc_password }}"
7 | method: PATCH
8 | body_format: json
9 | body: {"Image": null, "Inserted": false}
10 | status_code: [200, 204]
11 | force_basic_auth: true
12 | validate_certs: false
13 | return_content: true
14 | register: redfish_reply
15 |
16 | - name: Debug
17 | ansible.builtin.debug:
18 | var: redfish_reply
19 | verbosity: 1
20 | ...
21 |
--------------------------------------------------------------------------------
/roles/vendors/lenovo/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "Lenovo vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/pxe/README.md:
--------------------------------------------------------------------------------
1 | # pxe
2 |
3 | Boots a machine to PXE or disk via IPMI
--------------------------------------------------------------------------------
/roles/vendors/pxe/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
4 | bmc_user: "{{ hostvars[target_host]['bmc_user'] }}"
5 | bmc_password: "{{ hostvars[target_host]['bmc_password'] }}"
6 | bmc_port: "{{ hostvars[target_host]['bmc_port'] | default(623) }}"
7 |
--------------------------------------------------------------------------------
/roles/vendors/pxe/tasks/disk.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: PXE server Power off servers
3 | community.general.ipmi_power:
4 | name: "{{ bmc_address }}"
5 | user: "{{ bmc_user }}"
6 | password: "{{ bmc_password }}"
7 | port: "{{ bmc_port }}"
8 | state: "off"
9 |
10 | - name: Set server to boot from disk
11 | community.general.ipmi_boot:
12 | name: "{{ bmc_address }}"
13 | user: "{{ bmc_user }}"
14 | password: "{{ bmc_password }}"
15 | port: "{{ bmc_port }}"
16 | bootdev: hd
17 | persistent: false
18 | uefiboot: true
19 |
20 | - name: PXE server Power on
21 | community.general.ipmi_power:
22 | name: "{{ bmc_address }}"
23 | user: "{{ bmc_user }}"
24 | password: "{{ bmc_password }}"
25 | port: "{{ bmc_port }}"
26 | state: boot
27 |
--------------------------------------------------------------------------------
/roles/vendors/pxe/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "PXE vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/pxe/tasks/iso.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: PXE server Power off servers
3 | community.general.ipmi_power:
4 | name: "{{ bmc_address }}"
5 | user: "{{ bmc_user }}"
6 | password: "{{ bmc_password }}"
7 | port: "{{ bmc_port }}"
8 | state: "off"
9 |
10 | - name: Set server to boot from network
11 | community.general.ipmi_boot:
12 | name: "{{ bmc_address }}"
13 | user: "{{ bmc_user }}"
14 | password: "{{ bmc_password }}"
15 | port: "{{ bmc_port }}"
16 | bootdev: network
17 | persistent: false
18 | uefiboot: true
19 |
20 | - name: PXE server Power on
21 | community.general.ipmi_power:
22 | name: "{{ bmc_address }}"
23 | user: "{{ bmc_user }}"
24 | password: "{{ bmc_password }}"
25 | port: "{{ bmc_port }}"
26 | state: boot
27 |
--------------------------------------------------------------------------------
/roles/vendors/supermicro/README.md:
--------------------------------------------------------------------------------
1 | # supermicro
2 |
3 | Boots a supermicro machine to ISO or disk via redfish
--------------------------------------------------------------------------------
/roles/vendors/supermicro/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
4 | bmc_user: "{{ hostvars[target_host]['bmc_user'] }}"
5 | bmc_password: "{{ hostvars[target_host]['bmc_password'] }}"
6 |
--------------------------------------------------------------------------------
/roles/vendors/supermicro/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "SuperMicro vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/zt/README.md:
--------------------------------------------------------------------------------
1 | # zt
2 |
3 | Boots a ZT machine to ISO or disk via redfish
--------------------------------------------------------------------------------
/roles/vendors/zt/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | boot_iso_url: "{{ discovery_iso_server }}/{{ discovery_iso_name }}"
3 | bmc_address: "{{ hostvars[target_host]['bmc_address'] }}"
4 | bmc_user: "{{ hostvars[target_host]['bmc_user'] }}"
5 | bmc_password: "{{ hostvars[target_host]['bmc_password'] }}"
6 | bmc_resource_id: "{{ hostvars[target_host]['bmc_resource_id'] }}"
7 |
--------------------------------------------------------------------------------
/roles/vendors/zt/tasks/exists.yml:
--------------------------------------------------------------------------------
1 | - name: Debug
2 | ansible.builtin.debug:
3 | msg: "ZT vendor role exists"
4 | verbosity: 1
5 |
--------------------------------------------------------------------------------
/roles/vendors/zt/tasks/set_resource_id.yml:
--------------------------------------------------------------------------------
1 | - name: Fetch resource_id
2 | ansible.builtin.import_tasks:
3 | file: "./get_resource_id.yml"
4 | when: hostvars[target_host]['bmc_resource_id'] is not defined
5 | # Long form used otherwise condition will fail at the templating
6 | # stage instead of the test if bmc_resource_id is not defined
7 |
8 | - name: Set bmc_resource_id as resource_id
9 | ansible.builtin.set_fact:
10 | resource_id: "{{ bmc_resource_id }}"
11 | when: hostvars[target_host]['bmc_resource_id'] is defined
12 | # Long form used otherwise condition will fail at the templating
13 | # stage instead of the test if bmc_resource_id is not defined
14 |
--------------------------------------------------------------------------------
/roles/verify_tests/defaults/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | skip_absent_testfiles: false
3 | ...
4 |
--------------------------------------------------------------------------------
/roles/verify_tests/tasks/main.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Install junitparser needed to parse the tests
3 | ansible.builtin.pip:
4 | name:
5 | - junitparser
6 | become: true
7 |
8 | - name: Loop over mandatory test results
9 | ansible.builtin.include_tasks: read_junit_files.yml
10 | loop: "{{ tests_to_verify }}"
11 | loop_control:
12 | loop_var: t
13 | ...
14 |
--------------------------------------------------------------------------------
/roles/verify_tests/tasks/read_junit_files.yml:
--------------------------------------------------------------------------------
1 | ---
2 | - name: Loop through the the given JUnit files
3 | ansible.builtin.include_tasks: parse_junit_file.yml
4 | with_fileglob:
5 | - "{{ job_logs.path }}/{{ t.filename }}"
6 | register: junit_file
7 |
8 | - name: "Fail if file does not match any junit file"
9 | ansible.builtin.fail:
10 | msg: "{{ t.filename }} does not match any junit file"
11 | when:
12 | - junit_file.skipped is defined
13 | # Do not fail when the option to skip missing files is on
14 | - not skip_absent_testfiles | bool
15 |
--------------------------------------------------------------------------------
/roles/ztp/setup_cluster_image_set/defaults/main.yml:
--------------------------------------------------------------------------------
1 | scis_branch: main
2 |
--------------------------------------------------------------------------------
/tests/config.yml:
--------------------------------------------------------------------------------
1 | ---
2 | # ansible-test configuration file (stable-2.12+)
3 | # See template for more information:
4 | # https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml
5 |
6 | modules:
7 | # Only perform tests on python versions >= 3.6
8 | python_requires: '>=3.6'
9 |
--------------------------------------------------------------------------------
/tests/integration/targets/copy_and_render/files/file1.txt:
--------------------------------------------------------------------------------
1 | plain file
2 |
--------------------------------------------------------------------------------
/tests/integration/targets/copy_and_render/files/template.j2:
--------------------------------------------------------------------------------
1 | HELLO: {{ 'Hi' }}
2 |
--------------------------------------------------------------------------------
/tests/integration/targets/copy_and_render/runme.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | dir=$(dirname "$0")
4 |
5 | exec ansible-playbook -v -i $dir/../../inventory -e car_source_dir=$dir/files $dir/copy_and_render.yml
6 |
7 | # runme.sh ends here
8 |
--------------------------------------------------------------------------------
/tests/unit/data/test_ocp_compatibility_data.json:
--------------------------------------------------------------------------------
1 | [
2 | {
3 | "name": "events.v1beta1.events.k8s.io",
4 | "removedInRelease": "1.25",
5 | "serviceAccounts": ["system:serviceaccount:default:eventtest-operator-service-account"]
6 | },
7 | {
8 | "name": "flowschemas.v1beta1.flowcontrol.apiserver.k8s.io",
9 | "removedInRelease": "1.26",
10 | "serviceAccounts": ["system:serviceaccount:openshift-cluster-version:default"]
11 | },
12 | {
13 | "name": "podsecuritypolicies.v1beta1.policy",
14 | "removedInRelease": "1.25",
15 | "serviceAccounts": ["system:kube-controller-manager"]
16 | },
17 | {
18 | "name": "prioritylevelconfigurations.v1beta1.flowcontrol.apiserver.k8s.io",
19 | "removedInRelease": "1.26",
20 | "serviceAccounts": ["system:serviceaccount:openshift-cluster-version:default"]
21 | }
22 | ]
23 |
--------------------------------------------------------------------------------
/tests/unit/requirements.txt:
--------------------------------------------------------------------------------
1 | junit_xml
2 | junitparser
3 | jmespath
4 | python-dateutil
5 | semver
6 |
--------------------------------------------------------------------------------