├── playbooks ├── satellite │ ├── files │ │ └── limits.conf │ ├── capsules-populate.yaml │ ├── client-scripts.yaml │ ├── sosreport_gatherer.yaml │ ├── satellite-populate.yaml │ ├── capsules_specific.yaml │ ├── satellite_specific.yaml │ ├── satellite-capsule_tune.yaml │ ├── ebtables-flush.yaml │ ├── foremanctl_specific.yaml │ ├── roles │ │ ├── client-scripts │ │ │ ├── templates │ │ │ │ ├── podman-login.sh.j2 │ │ │ │ └── podman-pull-rhosp.sh.j2 │ │ │ ├── files │ │ │ │ ├── host-registration_prepare.yaml │ │ │ │ └── host-registration.yaml │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── enable-remote-exec-by-ip │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── workaround-local-sat-repo │ │ │ ├── defaults │ │ │ │ └── main.yaml │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── satellite-populate │ │ │ ├── tasks │ │ │ │ ├── restart_and_refresh.yaml │ │ │ │ ├── try_enable_repo.yaml │ │ │ │ ├── enable_repo.yaml │ │ │ │ └── main.yaml │ │ │ └── defaults │ │ │ │ └── main.yaml │ │ ├── pbench_client │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── puppet-autosign │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── haproxy │ │ │ ├── vars │ │ │ │ └── main.yaml │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── upgrade-restart │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ └── apply_custom-hiera │ │ │ └── tasks │ │ │ └── main.yaml │ ├── capsule_lbs.yaml │ ├── satellite-capsule_common.yaml │ ├── capsules.yaml │ ├── capsules-check.yaml │ ├── hostgroup-create.yaml │ ├── metrics.yaml │ ├── installation.yaml │ ├── satellite-remove-hosts.yaml │ └── subnet-create.yaml ├── common │ ├── roles │ │ ├── remove-home-extend-root │ │ │ ├── defaults │ │ │ │ └── main.yaml │ │ │ └── tasks │ │ │ │ ├── main.yaml │ │ │ │ ├── remove_home_partition.yaml │ │ │ │ └── extend_root_partition.yaml │ │ ├── add-epel │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── enlarge-inotify-limits │ │ │ ├── files │ │ │ │ └── 40-max-user-watches.conf │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── common │ │ │ └── defaults │ │ │ │ └── main.yaml │ │ ├── enlarge-arp-table │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ └── vars │ │ │ │ └── main.yaml │ │ ├── static_repo │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── epel-not-present │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── upgrade-restart │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── plain-network │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ └── vars │ │ │ │ └── main.yaml │ │ ├── common-eth1 │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ ├── scalelab-generic-cleanup │ │ │ └── tasks │ │ │ │ └── main.yaml │ │ └── rhsm │ │ │ └── tasks │ │ │ └── main.yaml │ ├── common.yaml │ ├── prepare_host.yaml │ └── free-sdb.yaml ├── tests │ ├── FAM │ │ ├── repositories.yaml │ │ ├── content_views.yaml │ │ ├── activation_keys.yaml │ │ ├── content_view_publish.yaml │ │ ├── lifecycle_environments.yaml │ │ ├── manifest_download.yaml │ │ ├── product_create.yaml │ │ ├── flatpak_remote_scan.yaml │ │ ├── manifest_test.yaml │ │ ├── cv_publish.yaml │ │ ├── lce_create.yaml │ │ ├── ak_content_override.yaml │ │ ├── ak_create.yaml │ │ ├── flatpak_remote_repo_mirror.yaml │ │ ├── cv_filter_create.yaml │ │ ├── flatpak_remote_create.yaml │ │ ├── manifest-excercise.yaml │ │ ├── cv_create.yaml │ │ ├── repo_create.yaml │ │ ├── manifest_import.yaml │ │ ├── cv_filter_rule_create.yaml │ │ ├── wait_for_task.yaml │ │ ├── reposet_enable.yaml │ │ ├── repo_sync.yaml │ │ ├── job_invocation_create.yaml │ │ ├── cv_version_promote.yaml │ │ └── includes │ │ │ └── manifest_test.yaml │ ├── files │ │ ├── requirements.yaml │ │ └── init.pp │ ├── roles │ │ ├── wait-for-task-script │ │ │ ├── tasks │ │ │ │ └── main.yaml │ │ │ └── files │ │ │ │ └── wait-for-task.sh │ │ └── grafana-graph-download │ │ │ └── tasks │ │ │ └── main.yaml │ ├── openSCAP-host-prep.yaml │ ├── includes │ │ ├── update_used_containers.yaml │ │ ├── show_grepper.yaml │ │ ├── prepare_clients_ini.yaml │ │ ├── generate-applicability.yaml │ │ ├── puppet-big-test-register.yaml │ │ ├── puppet-big-test-deploy.yaml │ │ └── manifest-excercise.yaml │ ├── candlepin_identity_regenerate.yaml │ ├── downloadtest-cleanup.yaml │ ├── lce_create_test.yaml │ ├── continuous-rex.yaml │ ├── hammer-list.yaml │ ├── containers.yaml │ ├── api-create-organization.yaml │ ├── openSCAP-role.yaml │ ├── openSCAP-sat-prep.yaml │ ├── downloadtest-syncrepo.yaml │ ├── api-get-task-duration.yaml │ ├── openSCAP-test.yaml │ ├── webui-pages.yaml │ ├── puppet-big-test.yaml │ ├── continuous-reg.yaml │ ├── downloadtest.yaml │ └── puppet-single-setup.yaml └── katello │ ├── installation.yaml │ ├── roles │ ├── configure_firewall │ │ └── tasks │ │ │ └── main.yaml │ ├── add_rhsm_repos │ │ └── tasks │ │ │ └── main.yaml │ ├── setup │ │ ├── defaults │ │ │ └── main.yaml │ │ └── tasks │ │ │ └── main.yaml │ ├── add_host_to_hostfile │ │ └── tasks │ │ │ └── main.yaml │ └── add_katello_repos │ │ └── tasks │ │ └── main.yaml │ ├── katello_mirror_installation.yaml │ ├── katello_nightly_installation.yaml │ ├── katello_stable_installation_3.14.yaml │ └── katello_stable_installation.yaml ├── experiment ├── README.md ├── test.sh ├── backup.sh ├── util-backup.sh ├── util-restore.sh ├── rex-mqtt.sh ├── webui.sh ├── sync-iso.sh ├── sync-docker.sh ├── sync-yum.sh ├── sync-ansible-collections.sh ├── sync-mixed-one-cv.sh ├── sync-mixed-two-cv.sh ├── only_reg-number.sh ├── rex_stateless.sh ├── rex-constrained.sh ├── reg-average.py ├── sync.sh └── rex.sh ├── docs ├── extra │ ├── links │ └── capsule_vm.xml ├── sample_ec2_deployment ├── matplotlib_installation_instructions ├── satperf.log.example ├── TODO.md └── get_EC2_instances_IP.sh ├── cleanup ├── ansible.cfg ├── .gitignore ├── scripts ├── populate_container_registry-Containerfile ├── cv_publish_scale.sh ├── 2023-build_and_push_repos │ ├── README.md │ └── script.sh ├── sync_content.sh ├── sync_capsules.sh ├── build-lots-of-packages-updateinfo.sh ├── populate_container_registry.sh ├── grep_production_log.py ├── download_repo.sh ├── create_lots_of_subnets.sh ├── build-lots-of-packages.py └── run-puppet-workload-recorder.sh ├── requirements.yml ├── rel-eng ├── build.sh └── satellite-performance.spec ├── infra └── kibana │ └── saved_objects │ ├── README.md │ ├── dashboard_e5764f00-5e85-11ec-8cd3-5d989c3b9841.json │ ├── visualization_73397dc1-6b75-4066-bb1b-bdb1e53d1970.json │ ├── visualization_739cfbd4-6cff-4394-bc3f-6e181d498f31.json │ ├── visualization_7a172852-8056-4706-b542-c6b7e2955e56.json │ └── visualization_7f8d3f60-5e84-11ec-8cd3-5d989c3b9841.json └── conf ├── hosts.ini └── satperf.yaml /playbooks/satellite/files/limits.conf: -------------------------------------------------------------------------------- 1 | [Service] 2 | LimitNOFILE=1000000 3 | -------------------------------------------------------------------------------- /experiment/README.md: -------------------------------------------------------------------------------- 1 | Experiments 2 | =========== 3 | 4 | Scripts to run long-running experiments 5 | -------------------------------------------------------------------------------- /playbooks/common/roles/remove-home-extend-root/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | extend_root_partition: False 3 | remove_home_partition: False 4 | ... 5 | -------------------------------------------------------------------------------- /playbooks/satellite/capsules-populate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: capsules 3 | gather_facts: false 4 | roles: 5 | - capsule-populate 6 | ... 7 | -------------------------------------------------------------------------------- /playbooks/satellite/client-scripts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: container_hosts 3 | gather_facts: false 4 | roles: 5 | - role: client-scripts 6 | ... 7 | -------------------------------------------------------------------------------- /playbooks/satellite/sosreport_gatherer.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6,capsules 3 | gather_facts: no 4 | roles: 5 | - name: sosreport_gatherer 6 | ... 7 | -------------------------------------------------------------------------------- /playbooks/satellite/satellite-populate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | roles: 5 | - puppet-autosign 6 | - satellite-populate 7 | ... 8 | -------------------------------------------------------------------------------- /playbooks/common/roles/add-epel/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Install EPEL yum repo package 3 | yum: 4 | name: "{{ epel_repo_installer }}" 5 | state: present 6 | -------------------------------------------------------------------------------- /playbooks/satellite/capsules_specific.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Specific configuration steps for capsule(s) servers" 3 | hosts: capsules 4 | 5 | roles: 6 | - role: capsule 7 | ... 8 | -------------------------------------------------------------------------------- /playbooks/satellite/satellite_specific.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Specific configuration steps for Satellite servers 3 | hosts: satellite6 4 | 5 | roles: 6 | - role: setup 7 | ... 8 | -------------------------------------------------------------------------------- /docs/extra/links: -------------------------------------------------------------------------------- 1 | 2 | sat_repo pulp endpoint 3 | http://sat-r220-02.lab.eng.rdu2.redhat.com/pulp/repos/Sat6-CI/QA/Satellite_RHEL7/custom/Red_Hat_Satellite_6_2_Composes/RHEL7_Satellite_x86_64_os/ 4 | -------------------------------------------------------------------------------- /playbooks/satellite/satellite-capsule_tune.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Tuning configuration steps for Satellite/capsules servers 3 | hosts: satellite6,capsules 4 | 5 | roles: 6 | - role: tune 7 | ... 8 | -------------------------------------------------------------------------------- /playbooks/satellite/ebtables-flush.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: False 4 | tasks: 5 | - name: "Flush ebtables to fix routing" 6 | command: 7 | ebtables -t nat --flush 8 | ... 9 | -------------------------------------------------------------------------------- /playbooks/satellite/foremanctl_specific.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Specific configuration steps for Satellite servers deployed with `foremanctl` 3 | hosts: satellite6 4 | 5 | roles: 6 | - role: foremanctl 7 | ... 8 | -------------------------------------------------------------------------------- /cleanup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | rm -rf *~ log/ *.pyc */*.pyc */*.pyc __pycache__ lextab.py yacctab.py 4 | find . -name \*.retry -delete 5 | find . -name \*.swp -delete 6 | 7 | mkdir -p OLD-LOGS 8 | mv run-20* logs-20* *.log OLD-LOGS/ 9 | -------------------------------------------------------------------------------- /experiment/test.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section 'Checking environment' 7 | generic_environment_check false 8 | # unset skip_measurement 9 | # set +e 10 | 11 | 12 | junit_upload 13 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/repositories.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | roles: 6 | - role: theforeman.foreman.repositories 7 | vars: 8 | foreman_products: "{{ products }}" 9 | ... 10 | -------------------------------------------------------------------------------- /playbooks/tests/files/requirements.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | collections: 3 | - name: ansible.posix 4 | version: 1.5.4 5 | - name: ansible.utils 6 | version: 5.0.0 7 | - name: community.general 8 | version: 9.1.0 9 | ... 10 | -------------------------------------------------------------------------------- /playbooks/tests/roles/wait-for-task-script/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Deploy wait-for-task.sh" 3 | copy: 4 | remote_src: no 5 | src: files/wait-for-task.sh 6 | dest: /root/wait-for-task.sh 7 | mode: 0700 8 | ... 9 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/content_views.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | roles: 6 | - role: theforeman.foreman.content_views 7 | vars: 8 | foreman_content_views: "{{ content_views }}" 9 | ... 10 | -------------------------------------------------------------------------------- /docs/sample_ec2_deployment: -------------------------------------------------------------------------------- 1 | aws ec2 run-instances --image-id ami-f2b17f92 --key-name id_rsa_perf --instance-type m3.large --placement AvailabilityZone=us-west-2c --security-group-ids sg-b0e2b1d6 --subnet-id subnet-808a2fd8 --associate-public-ip-address 2 | -------------------------------------------------------------------------------- /playbooks/common/roles/enlarge-inotify-limits/files/40-max-user-watches.conf: -------------------------------------------------------------------------------- 1 | # See the story on https://jhutar.blogspot.cz/2017/12/error-too-many-open-files-when-inside.html 2 | 3 | fs.inotify.max_user_instances=8192 4 | fs.inotify.max_user_watches=1048576 5 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/client-scripts/templates/podman-login.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | 6 | REGISTRY={{ tests_registration_target }} 7 | 8 | podman login --tls-verify=false --username {{ sat_user }} --password {{ sat_pass }} $REGISTRY 9 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/activation_keys.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | roles: 6 | - role: theforeman.foreman.activation_keys 7 | vars: 8 | foreman_activation_keys: "{{ activation_keys }}" 9 | ... 10 | -------------------------------------------------------------------------------- /ansible.cfg: -------------------------------------------------------------------------------- 1 | [defaults] 2 | forks = 10 3 | host_key_checking = False 4 | inventory = conf/hosts.ini 5 | interpreter_python = auto_silent 6 | 7 | [ssh_connection] 8 | ssh_args = -o ControlMaster=auto -o ControlPersist=300s 9 | control_path_dir = /tmp/cp 10 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/content_view_publish.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | roles: 6 | - role: theforeman.foreman.content_view_publish 7 | vars: 8 | foreman_content_views: "{{ content_views }}" 9 | ... 10 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/enable-remote-exec-by-ip/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Enable remote execution by IP instead of hostname" 3 | shell: 4 | hammer -u "{{sat_user}}" -p "{{sat_pass}}" settings set --name remote_execution_connect_by_ip --value true 5 | ... 6 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/lifecycle_environments.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | roles: 6 | - role: theforeman.foreman.lifecycle_environments 7 | vars: 8 | foreman_lifecycle_environments: "{{ lifecycle_environments }}" 9 | ... 10 | -------------------------------------------------------------------------------- /docs/matplotlib_installation_instructions: -------------------------------------------------------------------------------- 1 | 2 | ### INSTALLATION PREREQUISITES 3 | 4 | you must have following yum packages before you install matplotlib from `pip install -r requirements.txt` 5 | 6 | ``` 7 | gcc-gfortran gcc-c++ libpng-devel freetype-devel python-devel openssl-devel 8 | ``` 9 | -------------------------------------------------------------------------------- /playbooks/common/roles/common/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | system_selinux_policy: targeted 3 | system_selinux_state: enforcing 4 | timesync_ntp_servers: 5 | - hostname: "{{ ntp_server | default('time.nist.gov') }}" 6 | iburst: yes 7 | prefer: yes 8 | trust: yes 9 | ... 10 | -------------------------------------------------------------------------------- /playbooks/tests/openSCAP-host-prep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: container_hosts 3 | gather_facts: no 4 | tasks: 5 | - name: "Create /etc/cron.d folder" 6 | shell: 7 | ansible all --private-key /root/id_rsa_key -i clients.ini -m file -a "name=/etc/cron.d state=directory" 8 | 9 | -------------------------------------------------------------------------------- /playbooks/satellite/capsule_lbs.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Configure capsule(s) load-balancer" 3 | hosts: capsule_lbs 4 | gather_facts: false 5 | 6 | roles: 7 | - role: linux-system-roles.firewall 8 | - role: ../common/roles/enlarge-arp-table 9 | - role: haproxy 10 | ... 11 | -------------------------------------------------------------------------------- /playbooks/tests/files/init.pp: -------------------------------------------------------------------------------- 1 | class {{ content_puppet_module_name }} { 2 | file { "{{ content_puppet_module_file }}": 3 | ensure => file, 4 | mode => "755", 5 | owner => root, 6 | group => root, 7 | content => "{{ content_puppet_module_file_content }}", 8 | } 9 | } 10 | -------------------------------------------------------------------------------- /playbooks/common/common.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6:capsules:capsule_lbs:container_hosts 3 | gather_facts: false 4 | 5 | roles: 6 | - role: common 7 | - role: linux-system-roles.timesync 8 | - role: linux-system-roles.rhc 9 | - role: upgrade-restart 10 | - role: epel-not-present 11 | ... 12 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/workaround-local-sat-repo/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | workaround_local_sat_repo_repos: 3 | - Sat6-CI_Red_Hat_Satellite_6_10_Composes_Satellite_6_10_RHEL7 4 | - Sat6-CI_Red_Hat_Satellite_6_10_Composes_Satellite_Tools_6_10_RHEL7 5 | - Sat6-CI_Red_Hat_Satellite_6_10_Composes_Satellite_Maintenance_Next_RHEL7 6 | ... 7 | -------------------------------------------------------------------------------- /playbooks/common/roles/enlarge-inotify-limits/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Configure larger inotify limits" 3 | copy: 4 | src: 40-max-user-watches.conf 5 | dest: /etc/sysctl.d/40-max-user-watches.conf 6 | - name: "Reload sysctl config" 7 | command: 8 | sysctl -p /etc/sysctl.d/40-max-user-watches.conf 9 | ... 10 | -------------------------------------------------------------------------------- /playbooks/common/prepare_host.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: False 4 | roles: 5 | - ../common/roles/scalelab-nic-cleanup 6 | - ../common/roles/common 7 | - ../common/roles/remove-home-extend-root 8 | - ../common/roles/epel-not-present 9 | - ../common/roles/rhsm 10 | - linux-system-roles.timesync 11 | ... 12 | -------------------------------------------------------------------------------- /playbooks/common/roles/enlarge-arp-table/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Configure larger ARP table" 3 | ansible.posix.sysctl: 4 | sysctl_file: "/etc/sysctl.d/50-enlarge-arp-table.conf" 5 | name: "{{ item.key }}" 6 | value: "{{ item.value }}" 7 | reload: True 8 | loop: "{{ common_sysctl_config_default | dict2items }}" 9 | ... 10 | -------------------------------------------------------------------------------- /experiment/backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section "BackupTest" 7 | ap 00-backup.log playbooks/tests/sat-backup.yaml 8 | e BackupOffline $logs/00-backup.log 9 | e RestoreOffline $logs/00-backup.log 10 | e BackupOnline $logs/00-backup.log 11 | e RestoreOnline $logs/00-backup.log 12 | 13 | 14 | junit_upload 15 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/satellite-populate/tasks/restart_and_refresh.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Restart Satellite" 3 | command: 4 | katello-service restart 5 | - name: "Refresh manifest" 6 | command: 7 | hammer --username '{{ sat_user }}' --password '{{ sat_pass }}' subscription refresh-manifest --organization '{{ organization }}' 8 | ... 9 | -------------------------------------------------------------------------------- /playbooks/satellite/satellite-capsule_common.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Common configuration steps for Satellite and capsule(s) servers" 3 | hosts: satellite6:capsules 4 | gather_facts: false 5 | 6 | roles: 7 | - role: linux-system-roles.firewall 8 | - role: linux-system-roles.storage 9 | - role: ../common/roles/enlarge-arp-table 10 | ... 11 | -------------------------------------------------------------------------------- /playbooks/katello/installation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: katello 3 | gather_facts: False 4 | roles: 5 | - ../common/roles/add-epel 6 | - ../common/roles/rhsm 7 | - linux-system-roles.timesync 8 | - ../common/roles/remove-home-extend-root 9 | - add_rhsm_repos 10 | - add_katello_repos 11 | - configure_firewall 12 | - add_host_to_hostfile 13 | - setup 14 | -------------------------------------------------------------------------------- /playbooks/common/roles/static_repo/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Add repository" 3 | yum_repository: 4 | name: "{{ item.name }}" 5 | description: "Repo {{ item.name }}" 6 | baseurl: "{{ item.baseurl }}" 7 | gpgcheck: "{{ item.gpgcheck|default('yes') }}" 8 | file: "{{ item.name|regex_replace('[^a-zA-Z0-9_-]', '_') }}" 9 | loop: "{{ repos|default([]) }}" 10 | ... 11 | -------------------------------------------------------------------------------- /playbooks/satellite/capsules.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Configure capsule(s)" 3 | hosts: capsules 4 | gather_facts: false 5 | 6 | roles: 7 | - role: linux-system-roles.firewall 8 | - role: linux-system-roles.storage 9 | - role: ../common/roles/enlarge-arp-table 10 | - role: repo_setup 11 | - role: capsule 12 | - role: capsule-location 13 | - role: puppet-autosign 14 | ... 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | ansible-container-host-mgr 2 | ansible-kvm-host-mgr 3 | conf/contperf* 4 | satellite-monitoring 5 | latest-junit.xml 6 | OLD 7 | 8 | *.pyc 9 | *~ 10 | 11 | *.retry 12 | 13 | *.swp 14 | 15 | conf/*local.yaml 16 | conf/id_rsa* 17 | conf/*.zip 18 | manifest.zip 19 | log/ 20 | logs-*/ 21 | 22 | run-*/ 23 | 24 | lextab.py 25 | yacctab.py 26 | 27 | __pycache__ 28 | venv/ 29 | env/ 30 | 31 | .vscode/ 32 | -------------------------------------------------------------------------------- /playbooks/common/roles/remove-home-extend-root/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - ansible.builtin.dnf: 3 | name: lvm2 4 | state: present 5 | - setup: 6 | gather_subset: 7 | - '!all' 8 | - hardware 9 | - include_tasks: remove_home_partition.yaml 10 | when: 'remove_home_partition|bool == True' 11 | - include_tasks: extend_root_partition.yaml 12 | when: 'extend_root_partition|bool == True' 13 | ... 14 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/pbench_client/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Setup pbench repo" 3 | get_url: 4 | url="{{ pbench_repo_file }}" 5 | dest=/etc/yum.repos.d/pbench.repo 6 | force=yes 7 | - name: "Install pbench-agent" 8 | yum: 9 | name=pbench-agent 10 | state=present 11 | ## TODO: I assume we can delete this? 12 | #- shell: 13 | # source /opt/pbench-agent/config 14 | ... 15 | -------------------------------------------------------------------------------- /experiment/util-backup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section "Backup" 7 | a 00-backup.log satellite6 -m "shell" -a "rm -rf /root/backup /tmp/backup; mkdir /tmp/backup; satellite-maintain backup offline --skip-pulp-content --assumeyes /tmp/backup; mv /tmp/backup /root/" 8 | a 00-hammer-ping.log satellite6 -m "shell" -a "hammer -u {{ sat_user }} -p {{ sat_pass }} ping" 9 | 10 | 11 | junit_upload 12 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/satellite-populate/tasks/try_enable_repo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - include_tasks: enable_repo.yaml 3 | 4 | #### If it fails, restart Satellite, refresh manifest and try again 5 | ###- include_tasks: restart_and_refresh.yaml 6 | ### when: enable_repo.rc is not defined or enable_repo.rc != 0 7 | ###- include_tasks: enable_repo.yaml 8 | ### when: enable_repo.rc is not defined or enable_repo.rc != 0 9 | ... 10 | -------------------------------------------------------------------------------- /playbooks/katello/roles/configure_firewall/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check if firewalld present 3 | command: 4 | rpm -q firewalld 5 | register: firewalld_installed 6 | 7 | - name: "Open network ports in the firewall " 8 | firewalld: 9 | port: "{{ item }}" 10 | state: enabled 11 | permanent: true 12 | immediate: true 13 | when: firewalld_installed.rc == 0 14 | with_items: "{{ katello_ports }}" 15 | -------------------------------------------------------------------------------- /playbooks/katello/roles/add_rhsm_repos/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Add RHEL {{ ansible_distribution_major_version }} Optional repo" 3 | command: 4 | subscription-manager repos --enable 'rhel-{{ ansible_distribution_major_version }}-server-optional-rpms' 5 | 6 | - name: "Add RHEL {{ ansible_distribution_major_version }} Extras repo" 7 | command: 8 | subscription-manager repos --enable 'rhel-{{ ansible_distribution_major_version }}-server-extras-rpms' 9 | -------------------------------------------------------------------------------- /playbooks/tests/includes/update_used_containers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Update count of used containers 3 | # 4 | # Expects these variables to be set: 5 | # used_count ... to which number we should set the counter 6 | # update_used ... bool determining if we should do this at all 7 | 8 | - name: "Increment number of already used containers" 9 | shell: # FIXME: Use lineinfile or so 10 | echo "{{ used_count|int }}" >/root/container-used-count 11 | when: "update_used|bool" 12 | ... 13 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/puppet-autosign/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Add '*' autosign entry to Puppet CA of integrated capsule 3 | - name: "If file exists, it is enabled" 4 | ansible.builtin.stat: 5 | path: /etc/puppetlabs/puppet/autosign.conf 6 | register: stat_result 7 | 8 | - name: "Add '*' autosign entry" 9 | ansible.builtin.lineinfile: 10 | path: "/etc/puppetlabs/puppet/autosign.conf" 11 | line: "*" 12 | when: 13 | - stat_result.stat.exists 14 | ... 15 | -------------------------------------------------------------------------------- /scripts/populate_container_registry-Containerfile: -------------------------------------------------------------------------------- 1 | FROM docker.io/fedora:latest 2 | MAINTAINER Jan Hutar 3 | 4 | RUN date -Ins >/something 5 | RUN date -Ins >>/something 6 | RUN date -Ins >>/something 7 | RUN date -Ins >>/something 8 | RUN date -Ins >>/something 9 | RUN date -Ins >>/something 10 | RUN date -Ins >>/something 11 | RUN date -Ins >>/something 12 | RUN date -Ins >>/something 13 | RUN date -Ins >>/something 14 | 15 | ENV LANG en_US.utf8 16 | USER root 17 | CMD [ "/sbin/init" ] 18 | -------------------------------------------------------------------------------- /playbooks/common/roles/remove-home-extend-root/tasks/remove_home_partition.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get rid of /home" 3 | mount: 4 | name: /home 5 | state: unmounted 6 | - name: "Get rid of /home from /etc/fstab" 7 | mount: 8 | name: /home 9 | state: absent 10 | - name: "Remove /home LV" 11 | lvol: 12 | vg: "{{ item.value.vg }}" 13 | lv: "{{ item.key }}" 14 | state: absent 15 | force: yes 16 | with_dict: "{{ ansible_lvm.lvs }}" 17 | when: item.key in "lv_home" 18 | ... 19 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/haproxy/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | capsule_lbs_sysctl_config_default: 3 | # https://www.haproxy.com/documentation/hapee/latest/getting-started/system-tuning/ 4 | 5 | net.ipv4.tcp_rmem: '4096 16060 262144' 6 | net.ipv4.tcp_wmem: '4096 16060 262144' 7 | net.ipv4.tcp_tw_reuse: 1 8 | net.ipv4.ip_local_port_range: '1024 65023' 9 | net.ipv4.tcp_max_syn_backlog: 60000 10 | net.ipv4.tcp_fin_timeout: 30 11 | net.ipv4.tcp_synack_retries: 3 12 | net.ipv4.ip_nonlocal_bind: 1 13 | net.core.somaxconn: 60000 14 | ... 15 | -------------------------------------------------------------------------------- /playbooks/tests/candlepin_identity_regenerate.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: False 4 | serial: 5 | - 5 6 | - 10 7 | - 25 8 | - 50 9 | - 100 10 | - 150 11 | - 200 12 | tasks: 13 | - name: Run the Identiy regeneration 14 | shell: 15 | subscription-manager identity --regenerate 16 | register: regen 17 | - name: Get the timings 18 | debug: 19 | msg="Identity regen took {{ regen.start }} to {{ regen.end }}" 20 | - pause: 21 | seconds: 10 22 | 23 | 24 | -------------------------------------------------------------------------------- /scripts/cv_publish_scale.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | echo "----------------------------------" 3 | echo "[$(date -R)] content view publish starts" 4 | for cvnum in `seq 1 $NUMCV`; do 5 | #time hammer -u "${ADMIN_USER}" -p "${ADMIN_PASSWORD}" content-view publish --name=cv$cvnum --organization="${ORG}" 2>&1 & 6 | hammer -u "${ADMIN_USER}" -p "${ADMIN_PASSWORD}" content-view publish --name=cv$cvnum --organization="${ORG}" --async 7 | done 8 | echo "[$(date -R)] Waiting for CV publishing to complete" 9 | wait 10 | echo "[$(date -R)] CV Publish finished" 11 | echo 12 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/manifest_download.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | vars: 5 | foreman_manifest_path: "{{ manifest_path | default('~/.ansible/tmp/manifest.zip') }}" 6 | 7 | tasks: 8 | - name: Download the manifest 9 | theforeman.foreman.redhat_manifest: 10 | validate_certs: "{{ foreman_rhsm_validate_certs }}" 11 | uuid: "{{ foreman_manifest_uuid }}" 12 | username: "{{ foreman_rhsm_username }}" 13 | password: "{{ foreman_rhsm_password }}" 14 | path: "{{ foreman_manifest_path }}" 15 | ... 16 | -------------------------------------------------------------------------------- /experiment/util-restore.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | branch="${PARAM_branch:-satcpt}" 6 | inventory="${PARAM_inventory:-conf/contperf/inventory.${branch}.ini}" 7 | 8 | opts="--forks 100 -i $inventory" 9 | opts_adhoc="$opts" 10 | 11 | 12 | section "Restore" 13 | a 00-backup.log satellite6 -m "shell" -a "rm -rf /tmp/backup; cp -r /root/backup /tmp/; satellite-maintain restore --assumeyes /tmp/backup/*; rm -rf /tmp/backup" 14 | a 00-hammer-ping.log satellite6 -m "shell" -a "hammer -u {{ sat_user }} -p {{ sat_pass }} ping" 15 | 16 | 17 | junit_upload 18 | -------------------------------------------------------------------------------- /scripts/2023-build_and_push_repos/README.md: -------------------------------------------------------------------------------- 1 | Build and push repositories 2 | =========================== 3 | 4 | For some experiment we needed lots of publically accessible fake repositories. 5 | 6 | We used rpmfluff library to generate RPMs (see `script.py`) and run it in the 7 | loop and uploaded final repository to IBM Cloud component storage bucket (see 8 | `script.sh`). For 10k repos it was running multiple days, so we re-logged on 9 | every iteration even if that was unnecessary. 10 | 11 | For possible improvements some paralelisation would be nice, but this was just 12 | one shot thing. 13 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/product_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ProductCreate 15 | theforeman.foreman.product: 16 | name: "{{ product }}" 17 | ... 18 | -------------------------------------------------------------------------------- /playbooks/tests/downloadtest-cleanup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: container_hosts 3 | gather_facts: no 4 | tasks: 5 | - name: "Collect directory names" 6 | find: 7 | paths: /test-folder/ 8 | patterns: "*.com" 9 | file_type: directory 10 | register: dirs_to_delete 11 | - name: "Delete directories" 12 | file: 13 | path: "{{ item.path }}" 14 | state: absent 15 | with_items: "{{ dirs_to_delete.files }}" 16 | register: clean_download_folder 17 | until: clean_download_folder is not failed 18 | retries: 5 19 | delay: 20 20 | -------------------------------------------------------------------------------- /playbooks/common/roles/enlarge-arp-table/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | common_sysctl_config_default: 3 | # Configure larger ARP table 4 | # See https://ubuntuforums.org/showthread.php?t=2305091 5 | 6 | net.ipv4.neigh.default.gc_thresh1: 16384 7 | net.ipv4.neigh.default.gc_thresh2: 32768 8 | net.ipv4.neigh.default.gc_thresh3: 65536 9 | net.ipv6.neigh.default.gc_thresh1: 16384 10 | net.ipv6.neigh.default.gc_thresh2: 32768 11 | net.ipv6.neigh.default.gc_thresh3: 65536 12 | net.netfilter.nf_conntrack_buckets: 204800 13 | net.netfilter.nf_conntrack_max: 1000000 14 | net.nf_conntrack_max: 1000000 15 | ... 16 | -------------------------------------------------------------------------------- /playbooks/common/roles/epel-not-present/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "epel-release package should not be installed" 3 | yum: 4 | name: epel-release 5 | state: absent 6 | 7 | - name: "EPEL repo file should not be there" 8 | file: 9 | path: "{{ item }}" 10 | state: absent 11 | with_items: 12 | - /etc/yum.repos.d/epel.repo 13 | - /etc/yum.repos.d/epel-testing.repo 14 | 15 | - name: "EPEL should not be in the yum repolist output" 16 | ansible.builtin.shell: 17 | yum repolist | grep -i 'EPEL' 18 | register: epel_repo_check 19 | failed_when: "epel_repo_check.rc == 0" 20 | changed_when: false 21 | ... 22 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/flatpak_remote_scan.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: FlatpakRemoteScan 15 | theforeman.foreman.flatpak_remote_scan: 16 | flatpak_remote: "{{ flatpak_remote }}" 17 | ... 18 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/manifest_test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | vars: 5 | foreman_manifest_path: "{{ manifest_path | default('~/.ansible/tmp/manifest.zip') }}" 6 | test_runs: "{{ runs | default(1) }}" 7 | 8 | tasks: 9 | - name: Run test cyle of import + refresh + delete the manifest 10 | include_tasks: includes/manifest_test.yaml 11 | vars: 12 | foreman_manifest_download: false 13 | foreman_manifest_import: true 14 | foreman_manifest_refresh: true 15 | foreman_manifest_delete: true 16 | loop: "{{ range(test_runs | int) | list }}" 17 | ignore_errors: true 18 | ... 19 | -------------------------------------------------------------------------------- /scripts/2023-build_and_push_repos/script.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash -ex 2 | 3 | for r in $( seq 10000 ); do 4 | ./ibmcloud login --apikey "$IBM_API_KEY" 5 | repo="repo$r" 6 | rm -rf "$repo" 7 | echo "$( date --utc -Ins ) $repo Building packages" 8 | python script.py $r >>LOG 9 | echo "$( date --utc -Ins ) $repo Creating repodata" 10 | createrepo_c $repo >>LOG 11 | echo "$( date --utc -Ins ) $repo Pushing content" 12 | for f in $( find $repo -type f ); do 13 | ./ibmcloud cos upload --bucket satellitetestrepos --region us-east --key "$f" --file "$f" >>LOG 14 | done 15 | echo "$( date --utc -Ins ) $repo Done" 16 | done 17 | 18 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/cv_publish.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ContentViewPublish 15 | theforeman.foreman.content_view_version: 16 | content_view: "{{ cv }}" 17 | version: "{{ version | default(omit) }}" 18 | ... 19 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/lce_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: CreateLifecyleEnvironment 15 | theforeman.foreman.lifecycle_environment: 16 | name: "{{ lce }}" 17 | prior: "{{ prior }}" 18 | # state: 'present' 19 | ... 20 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/ak_content_override.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ActivationKeyContentOverrides 15 | theforeman.foreman.activation_key: 16 | name: "{{ ak }}" 17 | content_overrides: "{{ content_overrides | default(omit) }}" 18 | ... 19 | -------------------------------------------------------------------------------- /playbooks/tests/lce_create_test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | # roles: 5 | # - role: theforeman.foreman.lifecycle_environments 6 | tasks: 7 | - name: CreateLifecyleEnvironment_{{ lce }} 8 | theforeman.foreman.lifecycle_environment: 9 | username: "{{ foreman_username | default(omit) }}" 10 | password: "{{ foreman_password | default(omit) }}" 11 | server_url: "{{ foreman_server_url | default(omit) }}" 12 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 13 | organization: "{{ foreman_organization }}" 14 | name: "{{ lce }}" 15 | prior: "{{ prior }}" 16 | # state: 'present' 17 | ... 18 | -------------------------------------------------------------------------------- /docs/satperf.log.example: -------------------------------------------------------------------------------- 1 | [2016-07-27 03:30:32,042] [WARNING ] [satellite.py:112:install_satellite()] - [Py Ansible API v2.1] is unstable 2 | [2016-07-27 03:30:32,762] [INFO ] [satellite.py:118:install_satellite()] - satellite.sample.com : {'failures': 1} 3 | [2016-07-27 03:30:33,356] [INFO ] [satellite.py:126:install_capsule()] - capsule.sample.com : {'failures': 1} 4 | [2016-07-27 03:30:34,097] [INFO ] [satellite.py:134:prepare_docker_hosts()] - docker1.sample.com : {'failures': 1} 5 | [2016-07-27 03:30:34,097] [INFO ] [satellite.py:134:prepare_docker_hosts()] - docker2.sample.com : {'failures': 1} 6 | [2016-07-27 03:30:34,097] [INFO ] [satellite.py:134:prepare_docker_hosts()] - docker3.sample.com : {'failures': 1} 7 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/ak_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ActivationKeyCreate 15 | theforeman.foreman.activation_key: 16 | name: "{{ ak }}" 17 | content_view: "{{ content_view }}" 18 | lifecycle_environment: "{{ lifecycle_environment }}" 19 | ... 20 | -------------------------------------------------------------------------------- /playbooks/satellite/capsules-check.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: capsules 3 | roles: 4 | ### - common 5 | tasks: 6 | #- name: "katello-service restart" 7 | # command: 8 | # katello-service restart 9 | 10 | #- name: "katello-service stop" 11 | # command: 12 | # katello-service stop 13 | 14 | #- command: 15 | # systemctl restart qdrouterd 16 | 17 | - name: "katello-service status" 18 | command: 19 | katello-service status 20 | register: katello_status 21 | ignore_errors: true 22 | - debug: var=katello_status.stdout_lines|last 23 | - name: "df -h" 24 | command: 25 | df -h 26 | register: df_h 27 | - debug: var=df_h.stdout_lines 28 | -------------------------------------------------------------------------------- /scripts/sync_content.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ ! $# == 2 ]; then 3 | echo "Usage: ./sync_content.sh " 4 | echo "Example: time ./sync_content.sh 2 testing-sat61gas2-005" 5 | exit 6 | fi 7 | numRepos=$1 8 | testname=$2 9 | 10 | echo "----------------------------------" 11 | sleep 5 12 | # Perform Sync and time results 13 | for repoid in `seq 1 ${numRepos}`; do 14 | echo "[$(date -R)] Starting content Synchronize: ${numsync}" 15 | time hammer -u "${ADMIN_USER}" -p "${ADMIN_PASSWORD}" repository synchronize --id $repoid --organization="${ORG}" 2>&1 & 16 | done 17 | echo "[$(date -R)] Waiting for Sync repos" 18 | wait 19 | echo "[$(date -R)] Syncs finished" 20 | echo "----------------------------------" 21 | -------------------------------------------------------------------------------- /playbooks/common/roles/remove-home-extend-root/tasks/extend_root_partition.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # We rely on root LV name always containing string "root" 3 | # If now, this is not going to work 4 | - name: "Extend root LV to all available VG space" 5 | lvol: 6 | vg: "{{ item.value.vg }}" 7 | lv: "{{ item.key }}" 8 | size: +100%FREE 9 | with_dict: "{{ ansible_lvm.lvs }}" 10 | when: item.key in "lv_root" 11 | 12 | # Ansible can't loop over block. Argh!! So we have to do the loop twice 13 | - name: "Resize the root filesystem" 14 | filesystem: 15 | dev: "/dev/{{ item.value.vg }}/{{ item.key }}" 16 | fstype: xfs 17 | resizefs: yes 18 | with_dict: "{{ ansible_lvm.lvs }}" 19 | when: item.key in "lv_root" 20 | ... 21 | -------------------------------------------------------------------------------- /playbooks/common/roles/upgrade-restart/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set timezone to UTC 3 | community.general.timezone: 4 | name: UTC 5 | 6 | - name: "Upgrade" 7 | ansible.builtin.dnf: 8 | name: '*' 9 | state: latest 10 | register: dnf_upgrade_cmd 11 | 12 | - name: "Show upgrade result" 13 | ansible.builtin.debug: 14 | var: dnf_upgrade_cmd 15 | 16 | - name: "Check if reboot is necessary" 17 | ansible.builtin.command: 18 | cmd: 19 | dnf needs-restarting -r 20 | register: dnf_needs_restarting_cmd 21 | changed_when: false 22 | failed_when: 23 | - dnf_needs_restarting_cmd.rc not in [0, 1] 24 | 25 | - name: "Reboot" 26 | ansible.builtin.reboot: 27 | when: 28 | - dnf_needs_restarting_cmd.rc == 1 29 | ... 30 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/upgrade-restart/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set timezone to UTC 3 | community.general.timezone: 4 | name: UTC 5 | 6 | - name: "Upgrade" 7 | ansible.builtin.dnf: 8 | name: '*' 9 | state: latest 10 | register: dnf_upgrade_cmd 11 | 12 | - name: "Show upgrade result" 13 | ansible.builtin.debug: 14 | var: dnf_upgrade_cmd 15 | 16 | - name: "Check if reboot is necessary" 17 | ansible.builtin.command: 18 | cmd: 19 | dnf needs-restarting -r 20 | register: dnf_needs_restarting_cmd 21 | changed_when: false 22 | failed_when: 23 | - dnf_needs_restarting_cmd.rc not in [0, 1] 24 | 25 | - name: "Reboot" 26 | ansible.builtin.reboot: 27 | when: 28 | - dnf_needs_restarting_cmd.rc == 1 29 | ... 30 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/flatpak_remote_repo_mirror.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: FlatpakRemoteRepoMirror 15 | theforeman.foreman.flatpak_remote_repository_mirror: 16 | product: "{{ product }}" 17 | flatpak_remote: "{{ flatpak_remote }}" 18 | flatpak_remote_repository: "{{ flatpak_remote_repository }}" 19 | ... 20 | -------------------------------------------------------------------------------- /requirements.yml: -------------------------------------------------------------------------------- 1 | --- 2 | roles: 3 | - name: linux-system-roles.firewall 4 | 5 | - name: linux-system-roles.network 6 | 7 | - name: linux-system-roles.rhc 8 | 9 | - name: linux-system-roles.storage 10 | 11 | - name: linux-system-roles.timesync 12 | 13 | - name: repo_setup 14 | src: git+https://github.com/jhutar/ansible-role-repo_setup.git 15 | version: origin/main 16 | 17 | - name: sosreport_gatherer 18 | src: git+https://github.com/jhutar/ansible-role-sosreport_gatherer.git 19 | version: origin/main 20 | 21 | collections: 22 | - name: ansible.posix 23 | 24 | - name: ansible.utils 25 | 26 | - name: community.general 27 | 28 | - name: theforeman.foreman 29 | version: ">=5.7.0" 30 | 31 | - name: theforeman.operations 32 | ... 33 | -------------------------------------------------------------------------------- /playbooks/satellite/hostgroup-create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | tasks: 5 | - name: "Create OS" 6 | shell: 7 | hammer os create --architectures x86_64 --family Redhat --name "{{ containers_os_name }}" --major "{{ containers_os_major }}" --minor "{{ containers_os_minor }}" 8 | ignore_errors: yes 9 | - name: "Create Hostgroup" 10 | shell: 11 | hammer hostgroup create --content-view "{{ organization }} View" --lifecycle-environment Library --name "{{ hostgroup_name }}" --query-organization "{{ organization }}" --subnet "{{ subnet_name }}" --operatingsystem "{{ containers_os_name }} {{ containers_os_major}}.{{containers_os_minor}}" --group-parameters-attributes "name=kt_activation_keys\,value=ActivationKey" 12 | ... 13 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/cv_filter_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ContentViewFilterCreate 15 | theforeman.foreman.content_view_filter: 16 | name: "{{ cv_filter }}" 17 | content_view: "{{ cv }}" 18 | filter_type: "{{ cv_filter_type }}" 19 | inclusion: "{{ inclusion | default(false) | ternary(true, false) }}" 20 | ... 21 | -------------------------------------------------------------------------------- /playbooks/common/roles/plain-network/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Make sure we have basic facts needed by linux-system-roles.network" 3 | setup: 4 | gather_subset: min 5 | when: ansible_python is not defined 6 | 7 | - name: "Showing config" 8 | debug: 9 | msg: 10 | - "Public network: {{ public_nic }} with {% if public_ip is not defined %}DHCP{% else %}{{ (public_ip + '/' + public_netmask) | ansible.utils.ipaddr('host/prefix') }}{% endif %}" 11 | - "Private network: {{ private_nic }} with {{ (private_ip + '/' + private_netmask) | ansible.utils.ipaddr('host/prefix') }}" 12 | 13 | - name: "Setup networking" 14 | include_role: 15 | name: linux-system-roles.network 16 | vars: 17 | network_allow_restart: yes 18 | network_connections: "{{ plain_network_yaml|from_yaml }}" 19 | ... 20 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/flatpak_remote_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: FlatpakRemoteCreate 15 | theforeman.foreman.flatpak_remote: 16 | name: "{{ flatpak_remote }}" 17 | url: "{{ flatpak_remote_url | default(omit) }}" 18 | remote_username: "{{ flatpak_remote_username | default(omit) }}" 19 | token: "{{ flatpak_remote_token | default(omit) }}" 20 | ... 21 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/manifest-excercise.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: False 4 | vars: 5 | source_manifest: "{{ manifest | default('../../conf/contperf/manifest_SCA.zip') }}" 6 | 7 | tasks: 8 | - name: "Deploy manifest {{ source_manifest }} to remote host" 9 | copy: 10 | src: "{{ source_manifest }}" 11 | dest: /root/manifest-auto.zip 12 | force: yes 13 | 14 | - name: Include tasks to be executed 15 | include_tasks: 16 | file: includes/manifest-excercise.yaml 17 | loop: "{{ range(5)|list }}" 18 | ignore_errors: yes 19 | 20 | - name: Cleanup manifest if it is there 21 | shell: 22 | hammer -u '{{ sat_user }}' -p '{{ sat_pass }}' subscription delete-manifest --organization '{{ organization }}' 23 | ignore_errors: yes 24 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/satellite-populate/tasks/enable_repo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Enable {{ item.product }} -> {{ item.reposet }}" 3 | command: 4 | hammer 5 | --username '{{ sat_user }}' 6 | --password '{{ sat_pass }}' 7 | repository-set enable 8 | --organization '{{ organization }}' 9 | --product '{{ item.product }}' 10 | --name '{{ item.reposet }}' 11 | {% if item.basearch is defined %}--basearch '{{ item.basearch }}'{% endif %} 12 | {% if item.releasever is defined %}--releasever '{{ item.releasever }}'{% endif %} 13 | ignore_errors: True 14 | register: enable_repo 15 | - name: "Enabling of the repo failed, here is some debug" 16 | debug: 17 | var: enable_repo 18 | when: "enable_repo.rc is not defined or enable_repo.rc != 0" 19 | ... 20 | -------------------------------------------------------------------------------- /playbooks/katello/roles/setup/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | foreman_repositories_version: nightly 3 | foreman_repository_base: "http://koji.katello.org/releases/yum/foreman-{{ foreman_repositories_version }}/RHEL/{{ ansible_distribution_major_version }}/x86_64/" 4 | foreman_plugin_repository_base: "http://koji.katello.org/releases/yum/foreman-plugins-{{ foreman_repositories_version }}/RHEL/{{ ansible_distribution_major_version }}/x86_64/" 5 | katello_repositories_version: nightly 6 | katello_repositories_pulp_version: 2.15 7 | katello_repositories_pulp_release: stable 8 | epel_repo_installer: https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 9 | katello_installer_cmd: foreman-installer --scenario katello 10 | katello_user: admin 11 | katello_password: changeme 12 | katello_ports: 13 | - 80/tcp 14 | - 443/tcp 15 | - 5647/tcp 16 | - 9090/tcp 17 | ... 18 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/cv_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ContentViewCreate 15 | theforeman.foreman.content_view: 16 | name: "{{ cv }}" 17 | repositories: "{{ repositories | default(omit) }}" 18 | composite: "{{ components | default(composite) | default(false) | ternary(true, false) }}" 19 | components: "{{ components | default(omit) }}" 20 | auto_publish: "{{ auto_publish | default(omit) }}" 21 | ... 22 | -------------------------------------------------------------------------------- /playbooks/common/roles/plain-network/vars/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | plain_network_yaml: | 3 | - name: public 4 | type: ethernet 5 | interface_name: "{{ public_nic }}" 6 | autoconnect: yes 7 | ip: 8 | {% if public_ip is not defined %} 9 | dhcp4: yes 10 | {% else %} 11 | dhcp4: no 12 | gateway4: "{{ public_gateway }}" 13 | address: 14 | - "{{ (public_ip + '/' + public_netmask) | ansible.utils.ipaddr('host/prefix') }}" 15 | dns: 16 | - "{{ public_nameserver }}" 17 | {% endif %} 18 | state: up 19 | 20 | - name: private 21 | type: ethernet 22 | interface_name: "{{ private_nic }}" 23 | autoconnect: yes 24 | ip: 25 | dhcp4: no 26 | address: 27 | - "{{ (private_ip + '/' + private_netmask) | ansible.utils.ipaddr('host/prefix') }}" 28 | state: up 29 | 30 | - persistent_state: absent # remove all other profiles 31 | ... 32 | -------------------------------------------------------------------------------- /experiment/rex-mqtt.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section 'Checking environment' 7 | generic_environment_check false false 8 | # unset skip_measurement 9 | # set +e 10 | 11 | 12 | section "Remote execution" 13 | job_template_ansible_default='Run Command - Ansible Default' 14 | job_template_ssh_default='Run Command - Script Default' 15 | 16 | skip_measurement='true' h 10-rex-set-via-ip.log "settings set --name remote_execution_connect_by_ip --value true" 17 | skip_measurement='true' a 11-rex-cleanup-know_hosts.log satellite6 -m "shell" -a "rm -rf /usr/share/foreman-proxy/.ssh/known_hosts*" 18 | 19 | skip_measurement='true' h 12-rex-update.log "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='dnf -y update' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 20 | j $logs/12-rex-update.log 21 | 22 | 23 | junit_upload 24 | -------------------------------------------------------------------------------- /playbooks/tests/continuous-rex.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | resting: 0 6 | save_graphs: true 7 | tasks: 8 | - name: "Determine job description" 9 | set_fact: 10 | job_desc: "Continuous ReX 'date' at {{ lookup('pipe', 'date -u -Iseconds') }}" 11 | - name: "Start the 'date' job via Ansible '{{ job_desc }}'" 12 | command: 13 | hammer -u "{{ sat_user }}" -p "{{ sat_pass }}" job-invocation create --description-format "{{ job_desc }}" --dynamic --search-query 'name ~ container' --job-template 'Run Command - Ansible Default' --inputs command='date' 14 | - name: "Start the 'date' job via SSH '{{ job_desc }}'" 15 | command: 16 | hammer -u "{{ sat_user }}" -p "{{ sat_pass }}" job-invocation create --description-format "{{ job_desc }}" --dynamic --search-query 'name ~ container' --job-template 'Run Command - SSH Default' --inputs command='date' 17 | ... 18 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/client-scripts/templates/podman-pull-rhosp.sh.j2: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -eu 4 | 5 | 6 | # RHEL_MAJOR_VERSION="$(awk -F'[=".]' '/VERSION_ID/ {print $3}' /etc/os-release)" 7 | RHEL_MAJOR_VERSION=9 8 | RHOSP_MAJOR_VERSION=17 9 | {% if sat_version == 'stream' or sat_version.split('.') | map('int') | list >= [6, 17, 0] %} 10 | RHOSP_IMAGE_LOCATION=default_organization/test/ccv_${RHEL_MAJOR_VERSION}/rhosp/rhosp-rhel${RHEL_MAJOR_VERSION}_openstack-base 11 | {% else %} 12 | RHOSP_IMAGE_LOCATION=default_organization-test-ccv_${RHEL_MAJOR_VERSION}-rhosp-rhosp-rhel${RHEL_MAJOR_VERSION}_openstack-base 13 | {% endif %} 14 | REGISTRY={{ tests_registration_target }} 15 | RHOSP_TAG="$(podman search --tls-verify=false --list-tags --format="{% raw %}{{.Tag}}{% endraw %}" --limit 250 $REGISTRY/$RHOSP_IMAGE_LOCATION | sort -u | grep ^$RHOSP_MAJOR_VERSION | head -n1)" 16 | 17 | podman pull --tls-verify=false $REGISTRY/$RHOSP_IMAGE_LOCATION:$RHOSP_TAG 18 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/repo_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: RepoCreate 15 | theforeman.foreman.repository: 16 | product: "{{ product }}" 17 | name: "{{ repo_name }}" 18 | content_type: "{{ content_type }}" 19 | url: "{{ repo_url | default(omit) }}" 20 | docker_upstream_name: "{{ docker_upstream_name | default(omit) }}" 21 | upstream_username: "{{ upstream_username | default(omit) }}" 22 | upstream_password: "{{ upstream_password | default(omit) }}" 23 | ... 24 | -------------------------------------------------------------------------------- /playbooks/common/free-sdb.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: kvm-hosts 3 | gather_facts: False 4 | roles: 5 | tasks: 6 | - name: "Collect only minimum of facts" 7 | setup: 8 | gather_subset: 9 | - '!all' 10 | - min 11 | 12 | - include_role: 13 | name: ../common/roles/remove-home-extend-root 14 | vars: 15 | extend_root_partition: False 16 | 17 | - name: "Move VG content from /dev/sdb1" 18 | command: pvmove "{{ vms_storage_pvs }}" 19 | register: pvmove_cmd 20 | failed_when: pvmove_cmd.rc != 0 and 'No data to move for' not in pvmove_cmd.stderr 21 | 22 | - name: "Remove /dev/sdb1 from VG" 23 | command: vgreduce "{{ ansible_lvm.lvs.root.vg }}" "{{ vms_storage_pvs }}" 24 | 25 | #- name: "Extend root to remaining space in VG" 26 | # lvol: 27 | # vg: "{{ ansible_lvm.lvs.root.vg }}" 28 | # lv: "root" 29 | # size: 100%PVS 30 | # resizefs: true 31 | ... 32 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/manifest_import.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | vars: 5 | foreman_manifest_path: "{{ manifest_path | default('~/.ansible/tmp/manifest.zip') }}" 6 | 7 | module_defaults: 8 | group/theforeman.foreman.foreman: 9 | server_url: "{{ foreman_server_url | default(omit) }}" 10 | username: "{{ foreman_username | default(omit) }}" 11 | password: "{{ foreman_password | default(omit) }}" 12 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 13 | organization: "{{ foreman_organization }}" 14 | 15 | tasks: 16 | - name: Import the manifest 17 | theforeman.foreman.subscription_manifest: 18 | manifest_path: "{{ foreman_manifest_path }}" 19 | # state: 'present' 20 | 21 | - name: Refresh the manifest 22 | theforeman.foreman.subscription_manifest: 23 | manifest_path: "{{ foreman_manifest_path }}" 24 | state: refreshed 25 | ... 26 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/cv_filter_rule_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ContentViewFilterRuleCreate 15 | theforeman.foreman.content_view_filter_rule: 16 | name: "{{ cv_filter_rule }}" 17 | content_view: "{{ cv }}" 18 | content_view_filter: "{{ cv_filter }}" 19 | date_type: "{{ date_type | default('updated') }}" 20 | types: "{{ types | default(['bugfix', 'enhancement', 'security']) }}" 21 | start_date: "{{ start_date | default(omit) }}" 22 | end_date: "{{ end_date | default(omit) }}" 23 | ... 24 | -------------------------------------------------------------------------------- /playbooks/tests/hammer-list.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: false 4 | tasks: 5 | - name: "Run 'hammer host list' command" 6 | ansible.builtin.shell: 7 | hammer -u "{{ sat_user }}" -p "{{ sat_pass }}" host list --organization "{{ organization }}" --page 1 --per-page 100 >/dev/null 8 | environment: 9 | TZ: UTC 10 | register: run_hammer 11 | loop: "{{ range(30) | list }}" 12 | 13 | - name: "Calculate command duration" 14 | ansible.builtin.set_fact: 15 | combined: "{{ combined + [item | combine( { 'duration': (item.end | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() - (item.start | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() } )] }}" 16 | vars: 17 | combined: [] 18 | loop: "{{ run_hammer.results }}" 19 | 20 | - name: "Print results" 21 | ansible.builtin.debug: 22 | msg: "HammerHostList {{ item.start }} to {{ item.end }} taking {{ item.duration }} seconds" 23 | loop: "{{ combined }}" 24 | ... 25 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/wait_for_task.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | vars: 12 | label: "{{ task_label }}" 13 | timeout: "{{ task_timeout | default(omit) }}" 14 | 15 | tasks: 16 | - name: Search4Tasks 17 | theforeman.foreman.resource_info: 18 | organization: "{{ foreman_organization }}" 19 | resource: foreman_tasks 20 | search: "label = {{ label }} and state = running and result = pending" 21 | register: __tasks 22 | 23 | - name: Wait4Tasks 24 | theforeman.foreman.wait_for_task: 25 | task: "{{ item }}" 26 | timeout: "{{ timeout }}" 27 | loop: "{{ __tasks.resources | map(attribute='id') | list }}" 28 | ... 29 | -------------------------------------------------------------------------------- /rel-eng/build.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -x 4 | set -e 5 | 6 | name=$( grep '^Name:' rel-eng/satellite-performance.spec | sed 's/^Name:\s*\([a-z-]\+\)\s*$/\1/' ) 7 | version=$( grep '^Version:' rel-eng/satellite-performance.spec | sed 's/^Version:\s*\([0-9a-z.]\+\)\s*$/\1/' ) 8 | directory="$name-$version" 9 | archive="$directory.tar.gz" 10 | 11 | if [ "$version" = 'master' ]; then 12 | echo "ERROR: Make sure you are in release branch and update version number in rel-eng/satellite-performance.spec first" >&2 13 | exit 1 14 | fi 15 | 16 | # Prepare directory 17 | rm -rf /tmp/$directory 18 | mkdir /tmp/$directory 19 | 20 | # Copy content to the directory 21 | ./cleanup 22 | cp -r * /tmp/$directory/ 23 | 24 | # Create tarball 25 | tar -czf $archive -C /tmp $directory 26 | rm -rf /tmp/$directory 27 | 28 | # Put files to rpmbuild directories 29 | cp $archive ~/rpmbuild/SOURCES/ 30 | rm $archive 31 | cp rel-eng/satellite-performance.spec ~/rpmbuild/SPECS/ 32 | 33 | # Build rpm 34 | rpmbuild -ba ~/rpmbuild/SPECS/satellite-performance.spec 35 | -------------------------------------------------------------------------------- /playbooks/tests/containers.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_hosts 3 | gather_facts: False 4 | tasks: 5 | - name: Get the ssh hosts 6 | shell: 7 | cat /root/.ssh/known_hosts 8 | register: known_hosts 9 | - name: Create the containers file if does not exists 10 | shell: 11 | touch /root/containers.txt 12 | delegate_to: localhost 13 | - name: Write the data to the containers file 14 | shell: 15 | echo "{{ known_hosts.stdout }}" >> /root/containers.txt 16 | delegate_to: localhost 17 | - name: Get the ip addresses of the containers 18 | shell: 19 | cut -d' ' -f1 /root/containers.txt 20 | register: ips 21 | delegate_to: localhost 22 | - name: Save the ips 23 | shell: 24 | echo "{{ ips.stdout }}" > /root/container-ips.txt 25 | delegate_to: localhost 26 | - name: Randomize the placement of the ips 27 | shell: 28 | sort -R /root/container-ips.txt > /root/container-ips.shuffled 29 | delegate_to: localhost 30 | 31 | -------------------------------------------------------------------------------- /playbooks/tests/api-create-organization.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Create organization using the REST API" 3 | hosts: satellite6 4 | gather_facts: false 5 | tasks: 6 | - name: "Set default variable(s) values" 7 | ansible.builtin.set_fact: 8 | user: "{{ user | default('{{ sat_user }}') | default('admin') }}" 9 | password: "{{ password | default('{{ sat_pass }}') }}" 10 | organization: "{{ organization | default('{{ sat_org }}') | default('Default Organization') }}" 11 | 12 | - name: "Create organization" 13 | # XXX: Submit PR 14 | throttle: 1 15 | ansible.builtin.uri: 16 | url: "https://{{ groups['satellite6'] | first }}/katello/api/organizations" 17 | method: POST 18 | validate_certs: false 19 | force_basic_auth: true 20 | user: "{{ user }}" 21 | password: "{{ password }}" 22 | body_format: json 23 | body: "{{ {'organization': {'name': organization}} }}" 24 | status_code: 25 | - 201 26 | - 422 # "Name has already been taken" 27 | ... 28 | -------------------------------------------------------------------------------- /playbooks/satellite/metrics.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Setup Performance Co-Pilot to gather metrics of a Satellite installation 3 | hosts: satellite6 4 | 5 | tasks: 6 | - name: Setup Foreman repositories to have access to `foreman-pcp` 7 | ansible.builtin.include_role: 8 | name: theforeman.operations.foreman_repositories 9 | vars: 10 | foreman_repositories_version: nightly 11 | when: 12 | - product is defined 13 | - product == 'foreman' or ( product == 'satellite' and sat_version is defined and sat_version == 'foremanctl' ) 14 | 15 | - name: Metrics role (without Grafana) 16 | include_role: 17 | name: theforeman.operations.metrics 18 | vars: 19 | foreman_metrics_grafana_enabled: false 20 | 21 | - name: Disable the Foreman repositories 22 | ansible.builtin.dnf: 23 | name: foreman-release 24 | state: absent 25 | when: 26 | - product is defined 27 | - product == 'foreman' or ( product == 'satellite' and sat_version is defined and sat_version == 'foremanctl' ) 28 | ... 29 | -------------------------------------------------------------------------------- /playbooks/tests/roles/grafana-graph-download/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Ensure that start and end timestamps are present 3 | assert: 4 | that: 5 | - "start_time is defined" 6 | - "end_time is defined" 7 | - name: Verify if the username and password pairs are present for grafana 8 | assert: 9 | that: 10 | - "grafana_username is defined" 11 | - "grafana_password is defined" 12 | - name: Generate a prefix for output file name 13 | set_fact: 14 | output_filename_prefix: "{{ start_time }}_{{ end_time }}" 15 | - name: Download the graphs 16 | get_url: 17 | url: "http://{{ grafana_username }}:{{ grafana_password }}@grafana.perf.lab.eng.bos.redhat.com/render/dashboard-solo/db/satellite6-general-system-performance?from={{ start_time }}&to={{ end_time }}&var-Cloud=satellite62&var-Node={{ satellite_hostname }}&var-Interface=interface-em2&var-Disk=disk-sda&var-cpus0=All&var-cpus00=All&panelId={{ item.value }}&width=1000&height=500" 18 | dest: "{{ output_filename_prefix }}_{{ item.key }}" 19 | with_dict: "{{ grafana_panels }}" 20 | ... 21 | -------------------------------------------------------------------------------- /playbooks/satellite/installation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Configure Satellite" 3 | hosts: satellite6 4 | gather_facts: false 5 | roles: 6 | - role: ../common/roles/epel-not-present 7 | - role: ../common/roles/scalelab-generic-cleanup 8 | when: 9 | - "'scalelab' in inventory_hostname" 10 | - role: ../common/roles/plain-network 11 | when: 12 | - configure_plain_network is defined and configure_plain_network | bool == True 13 | - role: ../common/roles/common 14 | - role: ../common/roles/remove-home-extend-root 15 | when: 16 | - (remove_home_partition is defined and remove_home_partition | bool == True) or 17 | (extend_root_partition is defined and extend_root_partition | bool == True) 18 | - role: ../common/roles/enlarge-arp-table 19 | - role: linux-system-roles.timesync 20 | - role: linux-system-roles.rhc 21 | - role: upgrade-restart 22 | - role: linux-system-roles.firewall 23 | - role: linux-system-roles.storage 24 | - role: repo_setup 25 | - role: setup 26 | - role: enable-remote-exec-by-ip 27 | - role: puppet-autosign 28 | ... 29 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/reposet_enable.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: RepoSetEnable 15 | theforeman.foreman.repository_set: 16 | name: "{{ reposet_name }}" 17 | product: "{{ product }}" 18 | repositories: 19 | - basearch: "{{ basearch | default(omit) }}" 20 | releasever: "{{ releasever | default(omit) }}" 21 | when: 22 | - releasever is defined or basearch is defined 23 | 24 | - name: RepoSetEnable 25 | theforeman.foreman.repository_set: 26 | name: "{{ reposet_name }}" 27 | product: "{{ product }}" 28 | all_repositories: true 29 | when: 30 | - basearch is not defined 31 | - releasever is not defined 32 | ... 33 | -------------------------------------------------------------------------------- /scripts/sync_capsules.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | if [ ! $# == 2 ]; then 3 | echo "Usage: ./sync-capsule.sh " 4 | echo "Example: time ./sync-capsule.sh 2 testing-sat61gas2-005" 5 | exit 6 | fi 7 | numcapsules=$1 8 | testname=$2 9 | 10 | echo "----------------------------------" 11 | sleep 5 12 | # Perform Sync and time results 13 | for numcap in `seq 1 ${numcapsules}`; do 14 | capid=`expr ${numcap} + 1` 15 | echo "[$(date -R)] Starting Capsule Synchronize: ${numcap}" 16 | time hammer -u admin -p changeme capsule content synchronize --id ${capid} >> ${testname}.${capid} 2>&1 & 17 | done 18 | echo "[$(date -R)] Waiting for Sync to finish on Capsules" 19 | wait 20 | echo "[$(date -R)] Syncs finished on Capsules" 21 | sleep 5 22 | for numcap in `seq 1 ${numcapsules}`; do 23 | capid=`expr ${numcap} + 1` 24 | echo "[$(date -R)] SCP sync latency to ." 25 | echo "[$(date -R)] Real Timing:" 26 | tail -n 3 ${testname}.${capid} | awk '{split($2, mtos,"m");split(mtos[2], seconds, "s"); total = (mtos[1] * 60) + seconds[1]; if (total != 0) {print $1 ", " total }}' 27 | done 28 | echo "----------------------------------" 29 | -------------------------------------------------------------------------------- /playbooks/katello/roles/setup/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Update system packages 3 | yum: 4 | name: "*" 5 | state: latest 6 | 7 | - name: Install katello package 8 | yum: 9 | name: katello 10 | state: latest 11 | register: installing 12 | 13 | - name: Install foreman-ansible plugin 14 | yum: 15 | name: "{{ item }}" 16 | state: latest 17 | with_items: 18 | - tfm-rubygem-foreman_ansible 19 | - tfm-rubygem-foreman-tasks 20 | - tfm-rubygem-foreman_bootdisk 21 | - tfm-rubygem-foreman_templates 22 | - tfm-rubygem-foreman_hooks 23 | - tfm-rubygem-hammer_cli_foreman_docker 24 | - tfm-rubygem-foreman_docker 25 | 26 | - name: Run foreman installer 27 | shell: > 28 | {{ katello_installer_cmd }} 29 | --foreman-admin-email {{ sat_email }} 30 | --foreman-admin-username {{ sat_user }} 31 | --foreman-admin-password {{ sat_pass }} 32 | --enable-foreman-plugin-discovery 33 | --enable-foreman-plugin-remote-execution 34 | --enable-foreman-proxy-plugin-remote-execution-ssh 35 | --enable-foreman-plugin-openscap 36 | when: installing.changed 37 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/repo_sync.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: Product 15 | when: 16 | - product is defined and product | length > 0 17 | block: 18 | - name: ProductSync 19 | theforeman.foreman.repository_sync: 20 | product: "{{ product }}" 21 | repository: "{{ repository | default(omit) }}" 22 | async: 14400 23 | poll: 0 24 | register: __product_sync 25 | 26 | - name: Wait4ProductSync 27 | ansible.builtin.async_status: 28 | jid: "{{ __product_sync.ansible_job_id }}" 29 | register: __product_sync_job_result 30 | until: __product_sync_job_result is finished 31 | retries: 99999 32 | delay: 10 33 | ... 34 | -------------------------------------------------------------------------------- /experiment/webui.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | branch="${PARAM_branch:-satcpt}" 6 | inventory="${PARAM_inventory:-conf/contperf/inventory.${branch}.ini}" 7 | 8 | ui_pages_concurrency="${PARAM_ui_pages_concurrency:-10}" 9 | ui_pages_duration="${PARAM_ui_pages_duration:-300}" 10 | 11 | dl="Default Location" 12 | 13 | opts="--forks 100 -i $inventory" 14 | opts_adhoc="$opts" 15 | 16 | 17 | #section "Checking environment" 18 | #generic_environment_check false 19 | 20 | section "WebUI test" 21 | rm -f /tmp/status-data-webui-pages.json 22 | skip_measurement='true' ap 10-webui-pages.log \ 23 | -e "ui_pages_concurrency=$ui_pages_concurrency" \ 24 | -e "ui_pages_duration=$ui_pages_duration" \ 25 | playbooks/tests/webui-pages.yaml 26 | STATUS_DATA_FILE=/tmp/status-data-webui-pages.json e WebUIPagesTest_c${ui_pages_concurrency}_d${ui_pages_duration} $logs/10-webui-pages.log 27 | 28 | skip_measurement='true' ap 20-webui-static-distributed.log \ 29 | -e "duration=$ui_pages_duration" \ 30 | -e "concurrency=$ui_pages_concurrency" \ 31 | -e "spawn_rate=10" \ 32 | -e "max_static_size=1024" \ 33 | playbooks/tests/webui-static-distributed.yaml 34 | 35 | junit_upload 36 | -------------------------------------------------------------------------------- /experiment/sync-iso.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_iso_count="${PARAM_test_sync_iso_count:-8}" 6 | test_sync_iso_url_template="${PARAM_test_sync_iso_url_template:-http://storage.example.com/iso-repos*}" 7 | test_sync_iso_max_sync_secs="${PARAM_test_sync_iso_max_sync_secs:-600}" 8 | 9 | 10 | section 'Checking environment' 11 | generic_environment_check false true 12 | # unset skip_measurement 13 | # set +e 14 | 15 | 16 | section "Sync file repo" 17 | ap 10-test-sync-iso.log \ 18 | -e "organization='{{ sat_org }}'" \ 19 | -e "test_sync_iso_count=$test_sync_iso_count" \ 20 | -e "test_sync_iso_url_template=$test_sync_iso_url_template" \ 21 | -e "test_sync_iso_max_sync_secs=$test_sync_iso_max_sync_secs" \ 22 | playbooks/tests/sync-iso.yaml 23 | 24 | 25 | section "Summary" 26 | e SyncRepositories $logs/10-test-sync-iso.log 27 | e PublishContentViews $logs/10-test-sync-iso.log 28 | e PromoteContentViews $logs/10-test-sync-iso.log 29 | 30 | 31 | section "Sosreport" 32 | skip_measurement='true' ap sosreporter-gatherer.log \ 33 | -e "sosreport_gatherer_local_dir='../../$logs/sosreport/'" \ 34 | playbooks/satellite/sosreport_gatherer.yaml 35 | 36 | junit_upload 37 | -------------------------------------------------------------------------------- /playbooks/tests/includes/show_grepper.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # On all hosts running playbook, extract lines matching grepper patter from 3 | # certain file and print them. 4 | # 5 | # Expects these variables to be set: 6 | # grepper ... string to grep for 7 | # grepper_log ... filename of a log to grep 8 | 9 | - name: "Get how long '{{ grepper }}' took" 10 | shell: 11 | grep '"msg". "{{ grepper }}' "{{ grepper_log }}" | cut -d '"' -f 4 12 | register: log_grepper_timings 13 | when: "grepper != ''" 14 | - name: "Make sure central list is empty" 15 | set_fact: 16 | grepper_times: [] 17 | - name: "Append '{{ grepper }}' timings to central list" 18 | set_fact: 19 | grepper_times: "{{ grepper_times|default([]) + hostvars[item]['log_grepper_timings']['stdout_lines'] }}" 20 | with_items: "{{ ansible_play_batch }}" 21 | run_once: true 22 | when: "grepper != ''" 23 | - name: "Show how long '{{ grepper }}' took" 24 | debug: 25 | var: grepper_times 26 | run_once: true 27 | when: "grepper != ''" 28 | - name: "Show number of successful '{{ grepper }}' events" 29 | debug: 30 | var: grepper_times|length 31 | run_once: true 32 | when: "grepper != ''" 33 | ... 34 | -------------------------------------------------------------------------------- /experiment/sync-docker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_docker_count="${PARAM_test_sync_docker_count:-8}" 6 | test_sync_docker_url_template="${PARAM_test_sync_docker_url_template:-https://registry-1.docker.io}" 7 | test_sync_docker_max_sync_secs="${PARAM_test_sync_docker_max_sync_secs:-600}" 8 | 9 | 10 | section 'Checking environment' 11 | generic_environment_check false true 12 | # unset skip_measurement 13 | # set +e 14 | 15 | 16 | section "Sync docker repo" 17 | ap 10-test-sync-docker.log \ 18 | -e "organization='{{ sat_org }}'" \ 19 | -e "test_sync_docker_count=$test_sync_docker_count" \ 20 | -e "test_sync_docker_url_template=$test_sync_docker_url_template" \ 21 | -e "test_sync_docker_max_sync_secs=$test_sync_docker_max_sync_secs" \ 22 | playbooks/tests/sync-docker.yaml 23 | 24 | 25 | section "Summary" 26 | e SyncRepositories $logs/10-test-sync-docker.log 27 | e PublishContentViews $logs/10-test-sync-docker.log 28 | e PromoteContentViews $logs/10-test-sync-docker.log 29 | 30 | 31 | section "Sosreport" 32 | skip_measurement='true' ap sosreporter-gatherer.log \ 33 | -e "sosreport_gatherer_local_dir='../../$logs/sosreport/'" \ 34 | playbooks/satellite/sosreport_gatherer.yaml 35 | 36 | junit_upload 37 | -------------------------------------------------------------------------------- /playbooks/tests/includes/prepare_clients_ini.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This will load number of used containers and then based on requested 3 | # number of containers creates clients.ini file with fresh containers. 4 | # 5 | # Expects these variables to be set: 6 | # use_only_fresh ... only consider used containers counter if this is true 7 | # size ... how many containers should be in clients.ini 8 | 9 | - name: "Load number of already registered containers" 10 | command: 11 | cat /root/container-used-count 12 | register: containers_used_count_cmd 13 | ignore_errors: true 14 | when: "use_only_fresh|bool" 15 | - name: "Set number of used containers to 0" 16 | set_fact: 17 | containers_used_count: 0 18 | - name: "Set number of used containers based on file" 19 | set_fact: 20 | containers_used_count: "{{ containers_used_count_cmd.stdout|int }}" # Warning: this is still string: https://github.com/ansible/ansible/issues/15249 21 | when: "use_only_fresh|bool and (containers_used_count_cmd.rc is defined and containers_used_count_cmd.rc == 0)" 22 | 23 | - name: "Generate list of containers we are going to use" 24 | shell: 25 | head -n "{{ containers_used_count|int + size|int }}" /root/container-ips.shuffled | tail -n "{{ size }}" | cut -d ' ' -f 2 > clients.ini 26 | ... 27 | -------------------------------------------------------------------------------- /docs/TODO.md: -------------------------------------------------------------------------------- 1 | # General 2 | 3 | ### Priority 4 | - [ ] add cleanup scripts 5 | 6 | ### Backlog 7 | 8 | - [ ] 9 | 10 | ----- 11 | 12 | # Playbooks 13 | 14 | ## Monitoring 15 | 16 | ### Priority 17 | - [ ] carbon-cache: /etc/graphite-web/local_settings.py -> change SECRET_KEY and ALLOWED_HOSTS 18 | - [x] groupadd collectd 19 | - [x] collectd.conf 20 | - [x] hostname change from IP -> something 21 | - [ ] prepend satellite62.ec2_satellite62 (change this) 22 | - [x] debug turbostat (unsupported cpu on ec2 - msg: not APERF) and also, postgresql-gutterman failure 23 | 24 | ### Backlog 25 | 26 | - [x] connect monitoring to satperf 27 | - [ ] Add iptables -F filters 28 | - [ ] grafana dashboard -> add datasource 29 | 30 | ## Satellite 31 | 32 | ### Priority 33 | 34 | - [ ] connect generic.yaml and cred.yaml to variables loaded from conf/satperf.conf 35 | - [ ] sort attach pool id, RHN registration creds etc with content hosts 36 | - [x] sort capsules var set in satperf.conf and hosts.ini 37 | - [ ] handle Dockerpod_file 38 | - [ ] connect scripts/ to satellite playbooks 39 | - [ ] create vms from satperf 40 | 41 | ### Backlog 42 | 43 | - [ ] integrate pbench 44 | - [ ] integrate content-view-promote 45 | - [ ] scripts/ missing. 46 | - [ ] integrate content-view-publish 47 | - [ ] connect scripts 48 | - [ ] integrate health check 49 | -------------------------------------------------------------------------------- /experiment/sync-yum.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_repositories_count="${PARAM_test_sync_repositories_count:-8}" 6 | test_sync_repositories_url_template="${PARAM_test_sync_repositories_url_template:-http://repos.example.com/repo*}" 7 | test_sync_repositories_max_sync_secs="${PARAM_test_sync_repositories_max_sync_secs:-600}" 8 | 9 | 10 | section 'Checking environment' 11 | generic_environment_check false true 12 | # unset skip_measurement 13 | # set +e 14 | 15 | 16 | section "Sync test" 17 | ap 10-test-sync-repositories.log \ 18 | -e "organization='{{ sat_org }}'" \ 19 | -e "test_sync_repositories_count=$test_sync_repositories_count" \ 20 | -e "test_sync_repositories_url_template=$test_sync_repositories_url_template" \ 21 | -e "test_sync_repositories_max_sync_secs=$test_sync_repositories_max_sync_secs" \ 22 | playbooks/tests/sync-repositories.yaml 23 | 24 | 25 | section "Summary" 26 | e SyncRepositories $logs/10-test-sync-repositories.log 27 | e PublishContentViews $logs/10-test-sync-repositories.log 28 | e PromoteContentViews $logs/10-test-sync-repositories.log 29 | 30 | 31 | section "Sosreport" 32 | skip_measurement='true' ap sosreporter-gatherer.log \ 33 | -e "sosreport_gatherer_local_dir='../../$logs/sosreport/'" \ 34 | playbooks/satellite/sosreport_gatherer.yaml 35 | 36 | 37 | junit_upload 38 | -------------------------------------------------------------------------------- /playbooks/tests/includes/generate-applicability.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Run generate applicability task and wait for it to finish 3 | # 4 | # Expects these variables to be set: 5 | # test_generate_applicability_timeout - timeout in seconds when waiting for task to finish 6 | - name: "Measure generate applicability task" 7 | ansible.builtin.shell: | 8 | echo "ForemanTasks.async_task(::Actions::Katello::Host::GenerateApplicability, Host.all)" | foreman-rake console 9 | task=$( hammer --output yaml -u "{{ sat_user }}" -p "{{ sat_pass }}" task list --search "label = Actions::Katello::Host::GenerateApplicability" --page 1 --per-page 1 --order 'started at' | grep '^- ID:' | cut -d ' ' -f 3 ) 10 | /root/wait-for-task.sh "{{ sat_user }}" "{{ sat_pass }}" "$task" "{{ test_generate_applicability_timeout }}" 11 | register: run_applicability 12 | environment: 13 | TZ: UTC 14 | ignore_errors: yes 15 | 16 | - ansible.builtin.debug: 17 | var: run_applicability 18 | when: "run_applicability.rc is defined and run_applicability.rc != 0" 19 | 20 | - name: "Print results" 21 | ansible.builtin.debug: 22 | msg: "GenerateApplicability {{ run_applicability.stdout_lines[-3] }} to {{ run_applicability.stdout_lines[-2] }} taking {{ run_applicability.stdout_lines[-1] }} seconds" 23 | when: "run_applicability.rc is defined and run_applicability.rc == 0" 24 | ... 25 | -------------------------------------------------------------------------------- /playbooks/katello/roles/add_host_to_hostfile/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Make sure IPv4 localhost is present in hosts file 3 | lineinfile: 4 | dest: /etc/hosts 5 | regexp: '^127\.0\.0\.1\s+localhost localhost\.localdomain localhost4 localhost4\.localdomain4$' 6 | line: "127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4" 7 | state: present 8 | 9 | - name: Make sure IPv6 localhost is present in hosts file 10 | lineinfile: 11 | dest: /etc/hosts 12 | regexp: '^::1\s+localhost localhost\.localdomain localhost6 localhost6\.localdomain6$' 13 | line: "::1 localhost localhost.localdomain localhost6 localhost6.localdomain6" 14 | state: present 15 | 16 | # Bit of hack here - we always set the default (public) ip address first... 17 | - name: Add default Katello host ip to the hosts file 18 | lineinfile: 19 | dest: /etc/hosts 20 | regexp: ".*{{ ansible_fqdn }}$" 21 | line: "{{ ansible_default_ipv4.address }} {{ ansible_fqdn }}" 22 | 23 | # ...and here we replace it with the private ip used to communicate using the 24 | # private 10gb network, if present 25 | - name: Add private ip address to the hosts file if defined 26 | lineinfile: 27 | dest: /etc/hosts 28 | regexp: ".*{{ ansible_fqdn }}$" 29 | line: "{{ ip }} {{ ansible_fqdn }}" 30 | when: "ip is defined" 31 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/client-scripts/files/host-registration_prepare.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: false 4 | 5 | tasks: 6 | - name: Detect RHEL release 7 | ansible.builtin.setup: 8 | filter: 9 | - ansible_distribution_major_version 10 | when: 11 | - ansible_facts['distribution_major_version'] is not defined 12 | 13 | - name: Set AK name 14 | ansible.builtin.set_fact: 15 | ak: "AK_{{ ansible_facts['distribution_major_version'] }}_Test" 16 | 17 | - name: Cleanup 18 | ansible.builtin.shell: 19 | cmd: | 20 | rm -f /etc/yum.repos.d/*.repo 21 | 22 | - name: Upload host registration script to content host 23 | ansible.builtin.copy: 24 | src: /root/host-registration.{{ ak }}.sh 25 | dest: /root/host-registration.sh 26 | mode: '0500' 27 | 28 | - name: Upload podman related scripts to content host 29 | ansible.builtin.copy: 30 | src: "{{ item }}" 31 | dest: "{{ item }}" 32 | mode: '0500' 33 | loop: 34 | - /root/podman-login.sh 35 | - /root/podman-pull-rhosp.sh 36 | 37 | - name: Introduce issue to be fixed by 'Remediations' service 38 | ansible.builtin.file: 39 | path: /etc/ssh/sshd_config 40 | mode: '0777' 41 | when: 42 | - enable_iop | default(false) | bool 43 | ... 44 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/haproxy/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Get group name of our location" 3 | ansible.builtin.set_fact: 4 | location_groupname: "location_{{ location | lower }}" 5 | 6 | - name: "Get capsule names" 7 | ansible.builtin.set_fact: 8 | lb_capsules: "{{ groups['capsules'] | intersect(groups[location_groupname]) | sort }}" 9 | 10 | - name: "Tune sysctl" 11 | ansible.posix.sysctl: 12 | sysctl_file: "/etc/sysctl.d/30-hapee-2.8.conf" 13 | name: "{{ item.key }}" 14 | value: "{{ item.value }}" 15 | reload: True 16 | loop: "{{ capsule_lbs_sysctl_config_default | dict2items }}" 17 | 18 | - name: "Install HAProxy" 19 | ansible.builtin.dnf: 20 | name: haproxy 21 | state: latest 22 | 23 | - name: "Install policycoreutils-python-utils (`semanage` package)" 24 | ansible.builtin.dnf: 25 | name: policycoreutils-python-utils 26 | state: latest 27 | 28 | - name: "Configure SELinux to allow HAProxy to bind any port" 29 | ansible.posix.seboolean: 30 | name: haproxy_connect_any 31 | state: true 32 | persistent: true 33 | 34 | - name: "Configure HAproxy" 35 | ansible.builtin.template: 36 | src: haproxy.cfg.j2 37 | dest: /etc/haproxy/haproxy.cfg 38 | mode: '0644' 39 | 40 | - name: "Start and enable HAProxy systemd service unit" 41 | ansible.builtin.systemd_service: 42 | name: haproxy 43 | state: restarted 44 | enabled: true 45 | ... 46 | -------------------------------------------------------------------------------- /playbooks/tests/openSCAP-role.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | tasks: 5 | - name: "Install openSCAP client" 6 | shell: | 7 | hammer job-invocation create --search-query 'container' --job-template 'Ansible Roles - Ansible Default' --async & 8 | sleep 30 9 | - name: "Get last job ID" 10 | shell: 11 | hammer --csv job-invocation list | cut -d ',' -f 1 | grep '^[0-9]*$' | sort -n | tail -n 1 12 | register: last_job_id_cmd 13 | - debug: var=last_job_id_cmd 14 | - name: "Set last job ID" 15 | set_fact: 16 | last_job_id: "{{ last_job_id_cmd.stdout_lines[0]|int }}" 17 | - name: "Copy script" 18 | copy: 19 | src: files/wait-for-job.py 20 | dest: /root/wait-for-job.py 21 | mode: "u=rwx,g=rx,o=rx" 22 | force: yes 23 | - name: Install simpleJson 24 | pip: 25 | name: simpleJson 26 | executable: pip3 27 | - name: Install requests 28 | pip: 29 | name: requests 30 | executable: pip3 31 | - name: "Wait for job {{ last_job_id }} to finish" 32 | shell: 33 | /root/wait-for-job.py "{{ sat_user }}" "{{ sat_pass }}" "https://{{ groups['satellite6']|first }}" "{{ last_job_id }}" "{{ max_age_task }}" 34 | register: wait_for_job_cmd 35 | ignore_errors: yes 36 | until: wait_for_job_cmd is not failed 37 | retries: 5 38 | delay: 60 39 | -------------------------------------------------------------------------------- /playbooks/common/roles/common-eth1/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Configure bridge we are going to use 3 | # https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/6/html/Deployment_Guide/s2-networkscripts-interfaces_network-bridge.html 4 | - lineinfile: 5 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ nic_private }}" 6 | create: yes 7 | regexp: "^ONBOOT=" 8 | line: "ONBOOT=yes" 9 | - lineinfile: 10 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ nic_private }}" 11 | regexp: "^DEVICE=" 12 | line: "DEVICE={{ nic_private }}" 13 | - lineinfile: 14 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ nic_private }}" 15 | regexp: "^TYPE=" 16 | line: "TYPE=Ethernet" 17 | - lineinfile: 18 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ nic_private }}" 19 | regexp: "^BOOTPROTO=" 20 | line: "BOOTPROTO=static" 21 | - lineinfile: 22 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ nic_private }}" 23 | regexp: "^IPADDR=" 24 | line: "IPADDR={{ ip }}" 25 | - lineinfile: 26 | dest: "/etc/sysconfig/network-scripts/ifcfg-{{ nic_private }}" 27 | regexp: "^NETMASK=" 28 | line: "NETMASK=255.0.0.0" 29 | - name: "Kill all dhclients (network service was not restarting)" 30 | command: 31 | killall dhclient 32 | ignore_errors: yes 33 | - name: "Restart network" 34 | command: 35 | service network restart 36 | ... 37 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/apply_custom-hiera/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Check that provided path is a real file 3 | ansible.builtin.stat: 4 | path: "{{ content_file }}" 5 | delegate_to: localhost 6 | register: st 7 | 8 | - when: 9 | - st.stat.exists 10 | block: 11 | - name: Copy content over to /etc/foreman-installer/custom-hiera.yaml 12 | ansible.builtin.blockinfile: 13 | block: "{{ lookup('ansible.builtin.file', '{{ content_file }}') }}" 14 | path: /etc/foreman-installer/custom-hiera.yaml 15 | prepend_newline: true 16 | append_newline: true 17 | backup: true 18 | when: 19 | - marker_postfix is not defined or marker_postfix | length == 0 20 | 21 | - name: Copy content over to /etc/foreman-installer/custom-hiera.yaml 22 | ansible.builtin.blockinfile: 23 | block: "{{ lookup('ansible.builtin.file', '{{ content_file }}') }}" 24 | path: /etc/foreman-installer/custom-hiera.yaml 25 | marker: "# {mark} ANSIBLE MANAGED BLOCK - {{ marker_postfix }}" 26 | prepend_newline: true 27 | append_newline: true 28 | backup: true 29 | when: 30 | - marker_postfix is defined and marker_postfix | length > 0 31 | 32 | - name: Remove file now that its content have been copied 33 | ansible.builtin.file: 34 | path: "{{ content_file }}" 35 | state: absent 36 | delegate_to: localhost 37 | ... 38 | -------------------------------------------------------------------------------- /experiment/sync-ansible-collections.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_ansible_collections_count="${PARAM_test_sync_ansible_collections_count:-8}" 6 | test_sync_ansible_collections_url_template="${PARAM_test_sync_ansible_collections_url_template:-https://galaxy.ansible.com/}" 7 | test_sync_ansible_collections_max_sync_secs="${PARAM_test_sync_ansible_collections_max_sync_secs:-600}" 8 | 9 | 10 | section 'Checking environment' 11 | generic_environment_check false false 12 | # unset skip_measurement 13 | # set +e 14 | 15 | 16 | section "Sync test" 17 | ap 10-test-sync-ansible-collections.log \ 18 | -e "organization='{{ sat_org }}'" \ 19 | -e "test_sync_ansible_collections_count=$test_sync_ansible_collections_count" \ 20 | -e "test_sync_ansible_collections_url_template=$test_sync_ansible_collections_url_template" \ 21 | -e "test_sync_ansible_collections_max_sync_secs=$test_sync_ansible_collections_max_sync_secs" \ 22 | playbooks/tests/sync-ansible-collections.yaml 23 | 24 | 25 | section "Summary" 26 | e SyncRepositories $logs/10-test-sync-ansible-collections.log 27 | e PublishContentViews $logs/10-test-sync-ansible-collections.log 28 | e PromoteContentViews $logs/10-test-sync-ansible-collections.log 29 | 30 | 31 | section "Sosreport" 32 | skip_measurement='true' ap sosreporter-gatherer.log \ 33 | -e "sosreport_gatherer_local_dir='../../$logs/sosreport/'" \ 34 | playbooks/satellite/sosreport_gatherer.yaml 35 | 36 | 37 | junit_upload 38 | -------------------------------------------------------------------------------- /playbooks/tests/roles/wait-for-task-script/files/wait-for-task.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | user=$1 4 | pass=$2 5 | task=$3 6 | timeout=$4 7 | log="$( mktemp )" 8 | 9 | echo "DEBUG user: $user, pass: $pass, task: $task, timeout: $timeout, log: $log" 10 | 11 | while true; do 12 | hammer --output yaml -u "$user" -p "$pass" task info --id "$task" >$log 13 | 14 | # Check if we are in correct state 15 | if grep --quiet '^State:\s\+stopped' $log && 16 | grep --quiet '^Result:\s\+success' $log; then 17 | echo "INFO Task $task is in stopped/success state now" 18 | break 19 | fi 20 | 21 | # Check for timeout 22 | started_at="$( date -u -d "$( grep 'Started at:' $log | sed 's/^[^:]\+: //' )" +%s )" 23 | now="$( date -u +%s )" 24 | if (( $(( now - started_at )) > timeout )); then 25 | rm -f $log 26 | echo "TIMEOUT waiting on task $task" >&2 27 | exit 1 28 | fi 29 | 30 | # Check if we are in some incorrect state 31 | if grep --quiet '^State:\s\+stopped' $log && 32 | grep --quiet '^Result:\s\+warning' $log; then 33 | rm -f $log 34 | echo "ERROR Task $task is in stopped/warning state" >&2 35 | exit 2 36 | fi 37 | 38 | # Wait and try again 39 | sleep 10 40 | done 41 | 42 | grep '^Started at:' $log | sed -e 's/^[^:]\+: //' -e 's/ UTC//' 43 | grep '^Ended at:' $log | sed -e 's/^[^:]\+: //' -e 's/ UTC//' 44 | awk '/^Duration:/ {print $NF}' $log | cut -d"'" -f2 45 | 46 | rm -f $log 47 | -------------------------------------------------------------------------------- /playbooks/common/roles/scalelab-generic-cleanup/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Cleanup interfaces" 3 | shell: | 4 | ls /etc/sysconfig/network-scripts/ifcfg-*.10? 5 | if [ $? -eq 0 ]; then 6 | /root/clean-interfaces.sh --nuke 7 | nmcli c delete enp1s0f0.101 || true 8 | nmcli c delete enp1s0f1.102 || true 9 | nmcli c delete enp2s0f1.103 || true 10 | else 11 | echo "Interfaces seem to be cleaned" 12 | fi 13 | register: cleanup_interfaces_script 14 | changed_when: "cleanup_interfaces_script.rc == 0 and 'Interfaces seem to be cleaned' not in cleanup_interfaces_script.stdout" 15 | 16 | - name: "Detect RHEL release" 17 | setup: 18 | filter: ansible_distribution_major_version 19 | when: ansible_distribution_major_version is not defined 20 | 21 | - name: "Fix firewalld config" 22 | shell: | 23 | lock=/root/.fix-firewalld-config.lock 24 | if [ -f "$lock" ]; then 25 | echo "Firewalld config seem to be already fixed" 26 | else 27 | if rpm -q firewalld; then 28 | systemctl unmask firewalld.service 29 | firewall-cmd --add-service ssh 30 | rpm -q iptables-services && dnf remove -y iptables-services 31 | fi 32 | date >"$lock" 33 | fi 34 | when: "ansible_distribution_major_version|int == 8" 35 | register: fix_firewalld_config_script 36 | changed_when: "fix_firewalld_config_script.rc == 0 and 'Firewalld config seem to be already fixed' not in fix_firewalld_config_script.stdout" 37 | ... 38 | -------------------------------------------------------------------------------- /playbooks/tests/includes/puppet-big-test-register.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: "Determine log name for registration preparation" 3 | set_fact: 4 | prepare_cmd_log: "/root/out-puppet-big-prepare-{{ lookup('pipe', 'date -u -Iseconds') }}.log" 5 | run_once: yes 6 | - name: "Run puppet-big-client.yaml (log = {{ prepare_cmd_log }}; tags = PREPARE)" 7 | shell: 8 | ansible-playbook --private-key /root/id_rsa_key -f "{{ forks|default(size) }}" -i clients.ini --extra-vars "server='{{ tests_registration_target|default(groups['satellite6']|first) }}' content_puppet_env='{{ content_puppet_env }}'" --tags "PREPARE" puppet-big-client.yaml &> "{{ prepare_cmd_log }}" 9 | register: prepare_cmd 10 | environment: 11 | TZ: UTC 12 | ignore_errors: true 13 | 14 | - name: "Determine log name for register" 15 | set_fact: 16 | register_cmd_log: "/root/out-puppet-big-register-{{ lookup('pipe', 'date -u -Iseconds') }}.log" 17 | run_once: yes 18 | - name: "Run puppet-big-client.yaml (log = {{ register_cmd_log }}; tags = REGISTER)" 19 | shell: 20 | ansible-playbook --private-key /root/id_rsa_key -f "{{ forks|default(size) }}" -i clients.ini --tags "REGISTER" puppet-big-client.yaml &> "{{ register_cmd_log }}" 21 | register: register_cmd 22 | environment: 23 | TZ: UTC 24 | ignore_errors: true 25 | 26 | - import_tasks: includes/show_grepper.yaml 27 | vars: 28 | grepper: "RegisterPuppet" 29 | grepper_log: "{{ register_cmd_log }}" 30 | ... 31 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/job_invocation_create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | # organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: JobInvocationCreate 15 | theforeman.foreman.job_invocation: 16 | job_template: "{{ job_template | default(omit) }}" 17 | search_query: "{{ search_query | default(omit) }}" 18 | command: "{{ command | default(omit) }}" 19 | description_format: "{{ description_format | default(omit) }}" 20 | inputs: "{{ inputs | default(omit) }}" 21 | feature: "{{ feature | default(omit) }}" 22 | register: __job_invocation 23 | 24 | - name: Get task ID 25 | ansible.builtin.set_fact: 26 | task_id: "{{ __job_invocation.entity.job_invocations[0].task.id }}" 27 | 28 | - name: Wait for forked task to finish 29 | theforeman.foreman.wait_for_task: 30 | task: "{{ task_id }}" 31 | timeout: "{{ task_timeout | default(900) }}" 32 | ignore_errors: true 33 | 34 | - name: GetForemanTaskResourceInfo 35 | theforeman.foreman.resource_info: 36 | resource: foreman_tasks 37 | search: "id = {{ task_id }}" 38 | ... 39 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/satellite-populate/defaults/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | sat_repos_sync: 'async' # Set to 'async' if you want to sync repos with '--async', or to 'sync' if you do not want that option 3 | sat_repos: 4 | - product: "Red Hat Enterprise Linux Server" 5 | reposet: "Red Hat Enterprise Linux 7 Server (RPMs)" 6 | repo: "Red Hat Enterprise Linux 7 Server RPMs x86_64 7Server" 7 | basearch: "x86_64" 8 | releasever: "7Server" 9 | - product: "Red Hat Enterprise Linux Server" 10 | reposet: "Red Hat Satellite Tools {{ sat_version }} for RHEL 7 Server RPMs" 11 | repo: "Red Hat Satellite Tools {{ sat_version }} for RHEL 7 Server RPMs x86_64" 12 | basearch: "x86_64" 13 | - product: "Red Hat Satellite Capsule" 14 | reposet: "Red Hat Satellite Capsule {{ sat_version }} (for RHEL 7 Server) (RPMs)" 15 | repo: "Red Hat Satellite Capsule {{ sat_version }} for RHEL 7 Server RPMs x86_64" 16 | basearch: "x86_64" 17 | - product: "Red Hat Software Collections (for RHEL Server)" 18 | reposet: "Red Hat Software Collections RPMs for Red Hat Enterprise Linux 7 Server" 19 | repo: "Red Hat Software Collections RPMs for Red Hat Enterprise Linux 7 Server x86_64 7Server" 20 | basearch: "x86_64" 21 | releasever: "7Server" 22 | - product: "Red Hat Enterprise Linux Server" 23 | reposet: "Red Hat Enterprise Linux 7 Server - Optional (RPMs)" 24 | repo: "Red Hat Enterprise Linux 7 Server - Optional RPMs x86_64 7Server" 25 | basearch: "x86_64" 26 | releasever: "7Server" 27 | ... 28 | -------------------------------------------------------------------------------- /scripts/build-lots-of-packages-updateinfo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script generates updateinfo.xml file for your repodata 4 | # 5 | # $ bash ../scripts/build-lots-of-packages-updateinfo.sh >updateinfo.xml 6 | # $ modifyrepo updateinfo.xml repodata/ 7 | 8 | echo '' 9 | echo '' 10 | 11 | name='foo' 12 | ver='0.1' 13 | ver_new='0.2' 14 | rel='50' 15 | arch='x86_64' 16 | 17 | i=1 18 | for f in $( ls *.rpm | grep "$name[0-9]\+-$ver-$rel\.$arch\.rpm" ); do 19 | name=$( echo "$f" | cut -d '-' -f 1 ) 20 | 21 | echo " 22 | RHBA-$( date +'%Y' ):$( printf '%04d' $i ) 23 | Foo$i erratum title 24 | 1 25 | 26 | $name erratum description 27 | critical 28 | 29 | 30 | 1 31 | 32 | $name-$ver_new-$rel.$arch.rpm 33 | 34 | 35 | $name-sub0-$ver_new-$rel.$arch.rpm 36 | 37 | 38 | 39 | " 40 | 41 | let i+=1 42 | ###break 43 | done 44 | 45 | echo '' 46 | -------------------------------------------------------------------------------- /scripts/populate_container_registry.sh: -------------------------------------------------------------------------------- 1 | set -e 2 | 3 | repos=20 4 | images=100 5 | 6 | concurency=10 7 | registry_host=${REGISTRY_HOST:-registry.example.com:5000} 8 | 9 | function doit() { 10 | tag="$1" 11 | log="$( echo "$tag" | sed 's/[^a-zA-Z0-9-]/_/g' ).log" 12 | echo "DEBUG: Started build of $tag with log in $log" 13 | { 14 | podman build -f populate_docker_registry-Containerfile . --tag $tag --no-cache --rm=true 15 | rc_build=$? 16 | podman push $tag --tls-verify=false 17 | rc_push=$? 18 | podman rmi $tag 19 | rc_rmi=$? 20 | } >$log 21 | echo "$( date -Ins ) $tag $rc_build $rc_push $rc_build" >>aaa.log 22 | } 23 | 24 | for repo in $( seq $repos ); do 25 | for image in $( seq $images ); do 26 | doit "$registry_host/test_repo$repo:ver$image" & 27 | 28 | # If number of background processes raises to set concurency lvl, 29 | # block untill some process ends 30 | background=( $(jobs -p) ) 31 | if (( ${#background[@]} >= $concurency )); then 32 | echo "DEBUG: Reached concurency level, waiting for some task to finish" 33 | new_background=( $(jobs -p) ) 34 | while [ "${background[*]}" = "${new_background[*]}" ]; do 35 | echo "DEBUG: Waiting: ${new_background[*]}" 36 | new_background=( $(jobs -p) ) 37 | sleep 1 38 | done 39 | fi 40 | done 41 | done 42 | 43 | echo "DEBUG: Waiting for ramaining tasks" 44 | wait 45 | -------------------------------------------------------------------------------- /rel-eng/satellite-performance.spec: -------------------------------------------------------------------------------- 1 | Name: satellite-performance 2 | Version: master 3 | Release: 1%{?dist} 4 | Summary: Red Hat Satellite 6 Performance testing framework and tests 5 | License: GPLv2 6 | Group: Development/Tools 7 | URL: https://github.com/redhat-performance/satellite-performance 8 | Source0: https://github.com/redhat-performance/satellite-performance/archive/%{name}-%{version}.tar.gz 9 | BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) 10 | BuildArch: noarch 11 | Requires: ansible 12 | 13 | 14 | %description 15 | Red Hat Satellite 6 Performance testing framework and tests 16 | 17 | 18 | %prep 19 | %setup -qc 20 | pwd 21 | ls -al 22 | 23 | 24 | %build 25 | 26 | 27 | %install 28 | rm -rf %{buildroot} 29 | pushd %{name}-%{version} 30 | mkdir -p %{buildroot}/%{_datadir}/%{name} 31 | cp README.md %{buildroot}/%{_datadir}/%{name} 32 | cp LICENSE %{buildroot}/%{_datadir}/%{name} 33 | cp cleanup %{buildroot}/%{_datadir}/%{name} 34 | cp ansible.cfg %{buildroot}/%{_datadir}/%{name} 35 | cp -r playbooks %{buildroot}/%{_datadir}/%{name} 36 | mkdir %{buildroot}/%{_datadir}/%{name}/conf 37 | cp conf/hosts.ini %{buildroot}/%{_datadir}/%{name}/conf 38 | cp conf/satperf.yaml %{buildroot}/%{_datadir}/%{name}/conf 39 | popd 40 | 41 | 42 | %clean 43 | rm -rf %{buildroot} 44 | 45 | 46 | %files 47 | %defattr(-,root,root,-) 48 | %{_datadir}/%{name} 49 | 50 | 51 | %changelog 52 | * Wed May 31 2017 Jan Hutar 1.1-1 53 | - Init 54 | -------------------------------------------------------------------------------- /experiment/sync-mixed-one-cv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_mixed_count="${PARAM_test_sync_mixed_count:-8}" 6 | test_sync_mixed_max_sync_secs="${PARAM_test_sync_mixed_max_sync_secs:-1200}" 7 | test_sync_docker_url_template="${PARAM_test_sync_docker_url_template:-https://registry-1.docker.io}" 8 | test_sync_repositories_url_template="${PARAM_test_sync_repositories_url_template:-http://repos.example.com/repo*}" 9 | test_sync_iso_url_template="${PARAM_test_sync_iso_url_template:-http://storage.example.com/iso-repos*}" 10 | 11 | 12 | section 'Checking environment' 13 | generic_environment_check 14 | # unset skip_measurement 15 | # set +e 16 | 17 | 18 | section "Sync mixed repo" 19 | ap 10-test-sync-mixed.log \ 20 | -e "organization='{{ sat_org }}'" \ 21 | -e "test_sync_mixed_count=$test_sync_mixed_count" \ 22 | -e "test_sync_repositories_url_template=$test_sync_repositories_url_template" \ 23 | -e "test_sync_iso_url_template=$test_sync_iso_url_template" \ 24 | -e "test_sync_docker_url_template=$test_sync_docker_url_template" \ 25 | -e "test_sync_mixed_max_sync_secs=$test_sync_mixed_max_sync_secs" \ 26 | playbooks/tests/sync-mixed-repos-one-cvs.yaml 27 | 28 | 29 | section "Summary" 30 | e SyncRepositoriesYum $logs/10-test-sync-mixed.log 31 | e SyncRepositoriesDocker $logs/10-test-sync-mixed.log 32 | e SyncRepositoriesISO $logs/10-test-sync-mixed.log 33 | e PublishContentViews $logs/10-test-sync-mixed.log 34 | e PromoteContentViews $logs/10-test-sync-mixed.log 35 | 36 | 37 | junit_upload 38 | -------------------------------------------------------------------------------- /experiment/sync-mixed-two-cv.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_mixed_count="${PARAM_test_sync_mixed_count:-8}" 6 | test_sync_mixed_max_sync_secs="${PARAM_test_sync_mixed_max_sync_secs:-1200}" 7 | test_sync_docker_url_template="${PARAM_test_sync_docker_url_template:-https://registry-1.docker.io}" 8 | test_sync_repositories_url_template="${PARAM_test_sync_repositories_url_template:-http://repos.example.com/repo*}" 9 | test_sync_iso_url_template="${PARAM_test_sync_iso_url_template:-http://storage.example.com/iso-repos*}" 10 | 11 | 12 | section 'Checking environment' 13 | generic_environment_check 14 | # unset skip_measurement 15 | # set +e 16 | 17 | 18 | section "Sync mixed repo" 19 | ap 10-test-sync-mixed.log \ 20 | -e "organization='{{ sat_org }}'" \ 21 | -e "test_sync_mixed_count=$test_sync_mixed_count" \ 22 | -e "test_sync_repositories_url_template=$test_sync_repositories_url_template" \ 23 | -e "test_sync_iso_url_template=$test_sync_iso_url_template" \ 24 | -e "test_sync_docker_url_template=$test_sync_docker_url_template" \ 25 | -e "test_sync_mixed_max_sync_secs=$test_sync_mixed_max_sync_secs" \ 26 | playbooks/tests/sync-mixed-repos-two-cvs.yaml 27 | 28 | 29 | section "Summary" 30 | e SyncRepositoriesYum $logs/10-test-sync-mixed.log 31 | e SyncRepositoriesDocker $logs/10-test-sync-mixed.log 32 | e SyncRepositoriesISO $logs/10-test-sync-mixed.log 33 | e PublishContentViews $logs/10-test-sync-mixed.log 34 | e PromoteContentViews $logs/10-test-sync-mixed.log 35 | 36 | 37 | junit_upload 38 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/workaround-local-sat-repo/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | # You need to `ansible-galaxy collection install community.general` before running this 2 | 3 | --- 4 | - name: Install webserver that mirror repos 5 | delegate_to: 127.0.0.1 6 | yum: 7 | name: yum-utils , createrepo 8 | state: latest 9 | 10 | - name: Creates directory 11 | delegate_to: 127.0.0.1 12 | file: 13 | path: /tmp/sat_repos 14 | state: directory 15 | 16 | - name: Sync packages from sat repo 17 | delegate_to: 127.0.0.1 18 | shell: | 19 | reposync --nogpgcheck --repoid={{ item }} --download-path=/tmp/sat_repos/{{ item }} --norepopath 20 | createrepo -v /tmp/sat_repos/{{ item }} 21 | loop: "{{ workaround_local_sat_repo_repos }}" 22 | 23 | - name: Compress directory sat repos 24 | delegate_to: 127.0.0.1 25 | community.general.archive: 26 | path: /tmp/sat_repos 27 | dest: /tmp/sat_repos.tgz 28 | 29 | - name: Copy sat_repos file to remote location 30 | copy: 31 | src: /tmp/sat_repos.tgz 32 | dest: /tmp/sat_repos.tgz 33 | 34 | - name: Extract 35 | ansible.builtin.unarchive: 36 | src: /tmp/sat_repos.tgz 37 | dest: /tmp/ 38 | 39 | - name: Configure the local repo 40 | copy: 41 | dest: /etc/yum.repos.d/workaround-local-sat-repo-{{ item }}.repo 42 | content: | 43 | [workaround-local-sat-repo-{{ item }}] 44 | name = workaround-local-sat-repo-{{ item }} 45 | baseurl = file:///tmp/sat_repos/{{ item }} 46 | gpgcheck = 0 47 | loop: "{{ workaround_local_sat_repo_repos }}" 48 | 49 | - name: Remove sat repo 50 | set_fact: 51 | sat_repo_file: "" 52 | ... 53 | -------------------------------------------------------------------------------- /playbooks/tests/openSCAP-sat-prep.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | tasks: 5 | - name: "Prep satellite for openSCAP test" 6 | shell: | 7 | yum info scap-security-guide 8 | hammer scap-content bulk-upload --type default 9 | hammer ansible roles import --proxy-id "{{ proxy_id }}" --role-names theforeman.foreman_scap_client 10 | hammer ansible roles sync --proxy-id "{{ proxy_id }}" --role-names theforeman.foreman_scap_client 11 | - name: Set scap-content and profile as per OS 12 | set_fact: 13 | scap_content: "{% if 'rhel8' in containers_image %}Red Hat rhel8 default content{% else %}Red Hat rhel7 default content{% endif %}" 14 | scap_content_profile: "{% if 'rhel8' in containers_image %}51{% else %}31{% endif %}" 15 | run_once: yes 16 | - name: "Get hostgroup id" 17 | shell: 18 | hammer --csv --no-headers hostgroup list | grep "{{ tests_registration_target|default(groups['satellite6']|first) }}" | sort -n | head -n 1 | awk -F ',' '{ print $1 }' 19 | register: hostgroup_id 20 | run_once: yes 21 | - name: Add role to hostgroup 22 | shell: 23 | hammer hostgroup update --id "{{ hostgroup_id.stdout }}" --content-source-id 1 --openscap-proxy-id 1 --ansible-roles theforeman.foreman_scap_client 24 | - name: Create policy for rhel container 25 | shell: 26 | hammer policy create --deploy-by 'ansible' --name 'openSCAP-test' --scap-content "{{ scap_content }}" --scap-content-profile-id "{{ scap_content_profile }}" --period monthly --day-of-month 1 --hostgroup-ids "{{ hostgroup_id.stdout }}" 27 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/cv_version_promote.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: localhost 3 | gather_facts: false 4 | 5 | module_defaults: 6 | group/theforeman.foreman.foreman: 7 | server_url: "{{ foreman_server_url | default(omit) }}" 8 | username: "{{ foreman_username | default(omit) }}" 9 | password: "{{ foreman_password | default(omit) }}" 10 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 11 | organization: "{{ foreman_organization }}" 12 | 13 | tasks: 14 | - name: ContentViewVersionPromote 15 | theforeman.foreman.content_view_version: 16 | content_view: "{{ cv }}" 17 | version: "{{ version | default(omit) }}" 18 | lifecycle_environments: "{{ lifecycle_environments | default(omit) }}" 19 | current_lifecycle_environment: "{{ current_lifecycle_environment | default(omit) }}" 20 | when: 21 | - cv is defined and cv | length > 0 22 | 23 | - name: CompositeContentViewVersionPromote 24 | theforeman.foreman.content_view_version: 25 | content_view: "{{ content_view.name | default(content_view.content_view) | default(content_view) }}" 26 | version: "{{ content_view.version | default(omit) }}" 27 | lifecycle_environments: "{{ lifecycle_environments | default(omit) }}" 28 | current_lifecycle_environment: "{{ current_lifecycle_environment | default(omit) }}" 29 | # XXX: We want to promote only CCVs, not CVs 30 | loop: "{{ content_views | from_json | selectattr('components', 'defined') | list }}" 31 | loop_control: 32 | loop_var: content_view 33 | when: 34 | - content_views is defined and content_views | length > 0 35 | ... 36 | -------------------------------------------------------------------------------- /experiment/only_reg-number.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | expected_concurrent_registrations=${PARAM_expected_concurrent_registrations:-125} 6 | 7 | 8 | section 'Checking environment' 9 | generic_environment_check 10 | # unset skip_measurement 11 | # set +e 12 | 13 | 14 | section "Register" 15 | number_container_hosts="$( ansible $opts_adhoc --list-hosts container_hosts 2>/dev/null | grep -cv '^ hosts' )" 16 | number_containers_per_container_host="$( ansible $opts_adhoc -m debug -a "var=containers_count" container_hosts[0] | awk '/ "containers_count":/ {print $NF}' )" 17 | total_number_containers="$(( number_container_hosts * number_containers_per_container_host ))" 18 | needed_concurrent_registrations_per_container_host="$(( ( expected_concurrent_registrations + number_container_hosts -1 ) / number_container_hosts ))" # We want ceiling rounding: Ceiling( X / Y ) = ( X + Y – 1 ) / Y 19 | real_concurrent_registrations="$(( needed_concurrent_registrations_per_container_host * number_container_hosts ))" 20 | 21 | log "Going to register $real_concurrent_registrations contents hosts" 22 | 23 | skip_measurement='true' ap register-00-$real_concurrent_registrations.log \ 24 | -e "size='${needed_concurrent_registrations_per_container_host}'" \ 25 | -e "registration_logs='../../$logs/register-00-container-host-client-logs'" \ 26 | -e "sat_version='$sat_version'" \ 27 | playbooks/tests/registrations.yaml 28 | e Register $logs/register-00-$real_concurrent_registrations.log 29 | 30 | 31 | section "Sosreport" 32 | ap sosreporter-gatherer.log \ 33 | -e "sosreport_gatherer_local_dir='../../$logs/sosreport/'" \ 34 | playbooks/satellite/sosreport_gatherer.yaml 35 | 36 | 37 | junit_upload 38 | -------------------------------------------------------------------------------- /experiment/rex_stateless.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section 'Checking environment' 7 | # generic_environment_check false false 8 | unset skip_measurement 9 | set +e 10 | 11 | 12 | section 'Remote execution' 13 | # job_template_ansible_default='Run Command - Ansible Default' 14 | job_template_ssh_default='Run Command - Script Default' 15 | 16 | skip_measurement='true' h 10-rex-set-via-ip.log \ 17 | 'settings set --name remote_execution_connect_by_ip --value true' 18 | skip_measurement='true' a 11-rex-cleanup-know_hosts.log \ 19 | -m 'ansible.builtin.shell' \ 20 | -a 'rm -rf /usr/share/foreman-proxy/.ssh/known_hosts*' \ 21 | satellite6 22 | 23 | test=15-rex-date 24 | skip_measurement=true h "${test}.log" \ 25 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='date' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 26 | jsr "${logs}/${test}.log" 15 27 | j "${logs}/${test}.log" & 28 | 29 | test=16-rex-dnf_uploadprofile 30 | skip_measurement=true h "${test}.log" \ 31 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='dnf uploadprofile --force-upload' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 32 | jsr "${logs}/${test}.log" 15 33 | j "${logs}/${test}.log" & 34 | 35 | test=19-rex-sleep_300_uptime 36 | skip_measurement=true h "${test}.log" \ 37 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='sleep 300; uptime' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 38 | jsr "${logs}/${test}.log" 30 39 | j "${logs}/${test}.log" & 40 | 41 | 42 | wait 43 | 44 | 45 | junit_upload 46 | -------------------------------------------------------------------------------- /playbooks/tests/FAM/includes/manifest_test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: ManifestDownload 3 | theforeman.foreman.redhat_manifest: 4 | validate_certs: "{{ foreman_rhsm_validate_certs }}" 5 | uuid: "{{ foreman_manifest_uuid }}" 6 | username: "{{ foreman_rhsm_username }}" 7 | password: "{{ foreman_rhsm_password }}" 8 | path: "{{ foreman_manifest_path }}" 9 | when: foreman_manifest_download 10 | 11 | - name: ManifestImport 12 | theforeman.foreman.subscription_manifest: 13 | username: "{{ foreman_username | default(omit) }}" 14 | password: "{{ foreman_password | default(omit) }}" 15 | server_url: "{{ foreman_server_url | default(omit) }}" 16 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 17 | organization: "{{ foreman_organization }}" 18 | manifest_path: "{{ foreman_manifest_path }}" 19 | # state: present 20 | when: foreman_manifest_import 21 | 22 | - name: ManifestRefresh 23 | theforeman.foreman.subscription_manifest: 24 | username: "{{ foreman_username | default(omit) }}" 25 | password: "{{ foreman_password | default(omit) }}" 26 | server_url: "{{ foreman_server_url | default(omit) }}" 27 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 28 | organization: "{{ foreman_organization }}" 29 | state: refreshed 30 | when: foreman_manifest_refresh 31 | 32 | - name: ManifestDelete 33 | theforeman.foreman.subscription_manifest: 34 | username: "{{ foreman_username | default(omit) }}" 35 | password: "{{ foreman_password | default(omit) }}" 36 | server_url: "{{ foreman_server_url | default(omit) }}" 37 | validate_certs: "{{ foreman_validate_certs | default(omit) }}" 38 | organization: "{{ foreman_organization }}" 39 | state: absent 40 | when: foreman_manifest_delete 41 | ... 42 | -------------------------------------------------------------------------------- /playbooks/tests/downloadtest-syncrepo.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | repo_count_download_test: 1 6 | download_test_product: "DownTestProduct" 7 | download_test_repo_template: "{{ download_test_repo_template }}*" 8 | dorg: "Default Organization" 9 | tasks: 10 | - name: "Create product" 11 | shell: 12 | cmd: hammer product create --organization "{{ dorg }}" --name "{{ download_test_product }}" 13 | register: create_product 14 | failed_when: "create_product.rc != 0 and 'Name has already been taken for a product in this organization' not in create_product.stderr" 15 | changed_when: "create_product.rc == 0 and 'Product created' in create_product.stdout" 16 | - name: "Create repos" 17 | shell: 18 | cmd: hammer repository create --organization "{{ dorg }}" --product "{{ download_test_product }}" --content-type yum --name "{{ download_test_repo_template|replace('*', item) }}" --url "{{ repo_download_test|replace('*', item) }}" 19 | register: create_repo 20 | loop: "{{ range(1, repo_count_download_test|int+1)|list }}" 21 | - name: "Immediate" 22 | shell: 23 | cmd: hammer repository update --organization "{{ dorg }}" --product "{{ download_test_product }}" --name "{{ download_test_repo_template|replace('*', item) }}" --download-policy 'immediate' 24 | loop: "{{ range(1, repo_count_download_test|int+1)|list }}" 25 | ignore_errors: true 26 | - name: "Start sync" 27 | shell: 28 | cmd: hammer repository synchronize --organization "{{ dorg }}" --product "{{ download_test_product }}" --name "{{ download_test_repo_template|replace('*', item) }}" 29 | loop: "{{ range(1, repo_count_download_test|int+1)|list }}" 30 | register: start_sync 31 | ignore_errors: true 32 | -------------------------------------------------------------------------------- /playbooks/satellite/satellite-remove-hosts.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | organization_id: "{{ org_id | default(1) }}" 6 | search: "{{ search_query | default('container') }}" 7 | tasks: 8 | - name: "Delete hosts in bulk" 9 | ansible.builtin.uri: 10 | url: "https://{{ groups['satellite6']|first }}/api/hosts/bulk/destroy" 11 | method: PUT 12 | user: "{{ sat_user }}" 13 | password: "{{ sat_pass }}" 14 | force_basic_auth: yes 15 | validate_certs: no 16 | headers: 17 | Content-Type: application/json 18 | Accept: application/json 19 | body: > 20 | {'organization_id': {{ organization_id }}, 'included': {'search': "{{ search }}"}} 21 | body_format: json 22 | # If host list is empty katello will return 403 :S 23 | # https://github.com/Katello/katello/blob/master/app/controllers/katello/concerns/api/v2/bulk_hosts_extensions.rb 24 | status_code: 25 | - 202 26 | - 403 27 | - name: "Wait for the hosts to be deleted" 28 | ansible.builtin.shell: | 29 | content_host_num="$(hammer --csv --no-headers host list --fields name | grep -c '{{ search }}')" 30 | # We expect to delete around 250 hosts / min -> 125 hosts / 30 sec 31 | expected_iterations="$(( content_host_num / 125 ))" 32 | iter=0 33 | 34 | while (( content_host_num != 0 )); do 35 | sleep 30 36 | 37 | content_host_num="$(hammer --csv --no-headers host list --fields name | grep -c '{{ search }}')" 38 | (( iter++ )) 39 | 40 | # We iterate up to 5 times (conservative number) more than expected before giving up 41 | if (( iter > ( expected_iterations * 5 ) && content_host_num != 0 )); then 42 | exit 43 | fi 44 | done 45 | ... 46 | -------------------------------------------------------------------------------- /playbooks/tests/api-get-task-duration.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | tasks: 5 | - name: "Set default variable(s) values" 6 | ansible.builtin.set_fact: 7 | user: "{{ user | default('{{ sat_user }}') | default('admin') }}" 8 | password: "{{ password | default('{{ sat_pass }}') }}" 9 | action: "{{ action }}" 10 | 11 | - name: "Get foreman_tasks information" 12 | vars: 13 | # body: "search=action ~ {{ action }}" 14 | body: "search=label = Actions::Katello::CapsuleContent::Sync" 15 | ansible.builtin.uri: 16 | url: "https://{{ groups['satellite6'] | first }}/foreman_tasks/api/tasks" 17 | method: GET 18 | validate_certs: False 19 | force_basic_auth: True 20 | user: "{{ user }}" 21 | password: "{{ password }}" 22 | body: "{{ body }}" 23 | register: foreman_tasks_json 24 | failed_when: foreman_tasks_json.json.results | length == 0 25 | 26 | - debug: 27 | var: foreman_tasks_json 28 | 29 | # - name: "Get first stopped task id (when 'action' is defined)" 30 | # ansible.builtin.set_fact: 31 | # task_duration: "{{ foreman_tasks_json.json | community.general.json_query(query) }}" 32 | # vars: 33 | # query: "results[?pending==`false` && action=='{{ action }}'].duration | [0]" 34 | # when: 35 | # - action is defined and action | length > 0 36 | 37 | # - name: "Get first stopped task id" 38 | # ansible.builtin.set_fact: 39 | # task_duration: "{{ foreman_tasks_json.json | community.general.json_query(query) }}" 40 | # vars: 41 | # query: "results[?pending==`false`].duration | [0]" 42 | # when: 43 | # - action is not defined or action | length == 0 44 | 45 | # - debug: 46 | # # var: last_task 47 | # var: task_duration 48 | ... 49 | -------------------------------------------------------------------------------- /experiment/rex-constrained.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section 'Checking environment' 7 | generic_environment_check false false 8 | # unset skip_measurement 9 | # set +e 10 | 11 | 12 | section "Remote execution" 13 | job_template_ansible_default='Run Command - Ansible Default' 14 | job_template_ssh_default='Run Command - Script Default' 15 | 16 | skip_measurement='true' h 10-rex-set-via-ip.log \ 17 | 'settings set --name remote_execution_connect_by_ip --value true' 18 | skip_measurement='true' a 11-rex-cleanup-know_hosts.log \ 19 | -m 'ansible.builtin.shell' \ 20 | -a 'rm -rf /usr/share/foreman-proxy/.ssh/known_hosts*' \ 21 | satellite6 22 | 23 | skip_measurement='true' h 17-rex-katello_package_install-podman.log \ 24 | "job-invocation create --async --description-format 'Install %{package} (%{template_name})' --feature katello_package_install --inputs package='podman' --search-query 'name ~ container'" 25 | jsr "$logs/17-rex-katello_package_install-podman.log" 26 | j "$logs/17-rex-katello_package_install-podman.log" 27 | 28 | skip_measurement='true' h 17-rex-podman_pull.log \ 29 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='bash -x /root/podman-pull.sh' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 30 | jsr "$logs/17-rex-podman_pull.log" 31 | j "$logs/17-rex-podman_pull.log" 32 | 33 | skip_measurement='true' h 18-rex-fake_dnf_upgrade.log \ 34 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='TMPDIR=\"\$(mktemp -d)\" && dnf upgrade -y --downloadonly --destdir=\$TMPDIR && dnf clean all && rm -rf \$TMPDIR' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 35 | jsr "$logs/18-rex-fake_dnf_upgrade.log" 36 | j "$logs/18-rex-fake_dnf_upgrade.log" 37 | 38 | 39 | junit_upload 40 | -------------------------------------------------------------------------------- /scripts/grep_production_log.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import sys 4 | import re 5 | 6 | # When you want to grep production.log for some path but you are interested 7 | # in all other lines related to that request (using that request ID (not 8 | # sure how it is called) 9 | # 10 | # Example: 11 | # 12 | # tail -f /var/log/foreman/production.log | ./grep_production_log.py '/api/v2/job_invocations/[0-9]+/outputs' 13 | # 14 | # Output: 15 | # 16 | # 2021-02-19T09:42:35 [I|app|c68bc264] Started GET "/api/v2/job_invocations/27/outputs?search_query=name+%5E+(satperf006container70.usersys.redhat.com)" for 127.0.0.1 at 2021-02-19 09:42:35 -0500 17 | # 2021-02-19T09:42:35 [I|app|c68bc264] Processing by Api::V2::JobInvocationsController#outputs as JSON 18 | # 2021-02-19T09:42:35 [I|app|c68bc264] Parameters: {"search_query"=>"name ^ (satperf006container70.usersys.redhat.com)", "apiv"=>"v2", "id"=>"27"} 19 | # 2021-02-19T09:42:35 [I|app|c68bc264] Completed 200 OK in 311ms (Views: 0.5ms | ActiveRecord: 40.9ms | Allocations: 83301) 20 | # [...] 21 | # 22 | # First line is shown because it matches provided pattern, rest of 23 | # the lines is shown because they have same request ID 'c68bc264'. 24 | 25 | regexp = sys.argv[1] 26 | tracker = [] 27 | tracker_max = 1000 28 | req_id_regexp = '^[^ ]+ \[[^\|]+\|[^\|]+\|([a-zA-Z0-9]+)\] .*' 29 | 30 | for line in sys.stdin: 31 | line = line.strip() 32 | if re.search(regexp, line): 33 | print(line) 34 | found = re.search(req_id_regexp, line) 35 | if found: 36 | req_id = found.group(1) 37 | if req_id: 38 | if req_id not in tracker: 39 | tracker.append(req_id) 40 | if len(tracker) > tracker_max: 41 | tracker.pop(0) 42 | else: 43 | for t in reversed(tracker): 44 | if '|' + t + '] ' in line: 45 | print(line) 46 | break 47 | -------------------------------------------------------------------------------- /playbooks/tests/includes/puppet-big-test-deploy.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Expects these variables to be set: 3 | # nickname ... just a marker to include in a log name 4 | # content_puppet_env ... puppet environment label you want to assign to the hosts 5 | # content_puppet_module_name ... modules to configure deploy on a hosts (must be in the environment) 6 | 7 | - name: "Determine log name for setup" 8 | set_fact: 9 | setup_cmd_log: "/root/out-puppet-{{ nickname }}-setup-{{ lookup('pipe', 'date -u -Iseconds') }}.log" 10 | run_once: yes 11 | - name: "Run puppet-big-client.yaml (log = {{ setup_cmd_log }}; tags = SETUP)" 12 | shell: 13 | ansible-playbook --private-key /root/id_rsa_key -f "{{ forks|default(size) }}" -i clients.ini --extra-vars "server='{{ tests_registration_target|default(groups['satellite6']|first) }}' content_puppet_env='{{ content_puppet_env }}' content_puppet_module_name='{{ content_puppet_module_name }}'" --tags "SETUP" puppet-big-client.yaml &> "{{ setup_cmd_log }}" 14 | register: setup_cmd 15 | environment: 16 | TZ: UTC 17 | ###ignore_errors: true 18 | 19 | - import_tasks: includes/show_grepper.yaml 20 | vars: 21 | grepper: "SetupPuppet" 22 | grepper_log: "{{ setup_cmd_log }}" 23 | 24 | - name: "Determine log name for deploy" 25 | set_fact: 26 | deploy_cmd_log: "/root/out-puppet-{{ nickname }}-deploy-{{ lookup('pipe', 'date -u -Iseconds') }}.log" 27 | run_once: yes 28 | - name: "Run puppet-big-client.yaml (log = {{ deploy_cmd_log }}; tags = DEPLOY)" 29 | shell: 30 | ansible-playbook --private-key /root/id_rsa_key -f "{{ forks|default(size) }}" -i clients.ini --tags "DEPLOY" puppet-big-client.yaml &> "{{ deploy_cmd_log }}" 31 | register: deploy_cmd 32 | environment: 33 | TZ: UTC 34 | ignore_errors: true 35 | 36 | - import_tasks: includes/show_grepper.yaml 37 | vars: 38 | grepper: "PickupPuppet" 39 | grepper_log: "{{ deploy_cmd_log }}" 40 | ... 41 | -------------------------------------------------------------------------------- /scripts/download_repo.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function download_cdn() { 4 | repo=$1 5 | base=$( echo $repo | sed 's|^\(.*\)/content/.*$|\1|' ) 6 | path=$( echo $repo | sed 's|^.*/\(content/.*\)$|\1|' | sed 's|\(^/\|/$\)||g' ) 7 | mkdir -p $path/repodata 8 | cwd=$( pwd ) 9 | 10 | # Download treeinfo files for the tree 11 | loop_path=$path 12 | while true; do 13 | wget --no-verbose --no-check-certificate $base/$loop_path/listing -O $loop_path/listing 14 | [ "$loop_path" = '.' ] && break 15 | loop_path=$( dirname $loop_path ) 16 | done 17 | 18 | # Download repodata 19 | cd $cwd 20 | cd $path/repodata 21 | pwd 22 | wget --no-verbose --no-check-certificate $repo/repodata/productid 23 | wget --no-verbose --no-check-certificate $repo/repodata/repomd.xml 24 | for f in $( curl $repo/repodata/repomd.xml | grep 'location href=' | cut -d '"' -f 2 ); do 25 | wget --no-verbose --no-check-certificate $repo/$f 26 | done 27 | cd $cwd 28 | } 29 | 30 | # Download repodata 31 | cd /var/www/html/pub/ 32 | download_cdn http://cdn.stage.redhat.com/content/dist/rhel/server/6/6.5/x86_64/os/ 33 | download_cdn http://cdn.stage.redhat.com/content/dist/rhel/server/7/7.5/x86_64/optional/os/ 34 | download_cdn http://cdn.stage.redhat.com/content/dist/rhel/server/7/7.5/x86_64/os/ 35 | 36 | # Download packages 37 | cd /var/www/html/pub/ 38 | wget -c -r -l 1 --accept rpm http://cdn.stage.redhat.com/content/dist/rhel/server/7/7.5/x86_64/os/Packages/ 39 | mkdir content/dist/rhel/server/7/7.5/x86_64/os/Packages 40 | mv cdn.stage.redhat.com/content/dist/rhel/server/7/7.5/x86_64/os/Packages/*.rpm content/dist/rhel/server/7/7.5/x86_64/os/Packages/ 41 | rm -rf cdn.stage.redhat.com/ 42 | 43 | ## Enable it on Satellite side 44 | #hammer -u admin -p changeme organization add-location --name "Default Organization" --location "Default Location" 45 | #hammer -u admin -p changeme organization update --name "Default Organization" --redhat-repository-url http://localhost/pub/ 46 | -------------------------------------------------------------------------------- /playbooks/tests/openSCAP-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | tasks: 5 | - name: "Start openSCAP job" 6 | shell: | 7 | hammer job-invocation create --search-query 'container' --job-template 'Run OpenSCAP scans' --async & 8 | sleep 30 9 | - name: "Get last job ID" 10 | shell: 11 | hammer --csv job-invocation list | cut -d ',' -f 1 | grep '^[0-9]*$' | sort -n | tail -n 1 12 | register: last_job_id_cmd 13 | - debug: var=last_job_id_cmd 14 | - name: "Set last job ID" 15 | set_fact: 16 | last_job_id: "{{ last_job_id_cmd.stdout_lines[0]|int }}" 17 | - name: "Wait for job {{ last_job_id }} to finish" 18 | shell: 19 | /root/wait-for-job.py "{{ sat_user }}" "{{ sat_pass }}" "https://{{ groups['satellite6']|first }}" "{{ last_job_id }}" "{{ max_age_task }}" 20 | register: wait_for_job_cmd 21 | ignore_errors: yes 22 | until: wait_for_job_cmd is not failed 23 | retries: 5 24 | delay: 60 25 | - name: "Parse script result" 26 | set_fact: 27 | pass_count: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[2]|int }}" 28 | total_count: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[4]|int }}" 29 | start_time: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[6] + ' ' + ((wait_for_job_cmd.stdout_lines|last).split())[7] }}" 30 | end_time: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[9] + ' ' + ((wait_for_job_cmd.stdout_lines|last).split())[10] }}" 31 | test_time: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[13]|int }}" 32 | avg_duration: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[15]|int }}" 33 | - name: "Show what we have parsed" 34 | debug: 35 | msg: "RESULT: pass_count: {{ pass_count }}; total_count: {{ total_count }}; start_time: {{ start_time }}; end_time: {{ end_time }}; test_time: {{ test_time }}; avg_duration: {{ avg_duration }}" 36 | - name: "Give server some time to rest" 37 | pause: 38 | seconds: 30 39 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/client-scripts/files/host-registration.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: all 3 | gather_facts: false 4 | 5 | tasks: 6 | - name: Register 7 | ansible.builtin.shell: 8 | cmd: | 9 | set -o pipefail 10 | 11 | if [[ -f /root/registration.log && ! -f /root/registration.log.orig ]]; then 12 | cp -p /root/registration.log /root/registration.log.orig 13 | fi 14 | 15 | /root/host-registration.sh &>/root/registration.log 16 | ret=$? 17 | 18 | echo '-' >>/root/registration.log 19 | echo "Return code: $ret" >>/root/registration.log 20 | echo '-----' >>/root/registration.log 21 | 22 | cat /root/registration.log 23 | exit $ret 24 | environment: 25 | TZ: UTC # make sure returned times are in UTC 26 | register: registration 27 | # XXX: Ignore errors in order to be able to show `registration.stdout_lines` 28 | ignore_errors: true 29 | 30 | - name: Register - output 31 | ansible.builtin.debug: 32 | var: registration.stdout_lines 33 | vars: 34 | regexp: '^.*(Traceback |curl: |Internal Server Error).*$' 35 | failed_when: 36 | - registration is failed or registration.stdout | regex_search(regexp, multiline=true) 37 | 38 | - name: Calculate registration duration 39 | ansible.builtin.set_fact: 40 | reg_duration: "{{ (registration.end | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() - (registration.start | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() }}" 41 | 42 | - name: Register - timings 43 | ansible.builtin.debug: 44 | msg: "Register {{ registration.start }} to {{ registration.end }} taking {{ reg_duration }} seconds" 45 | 46 | # - name: "Disable insights-client automatic scheduling" 47 | # ansible.builtin.shell: 48 | # cmd: | 49 | # set -o pipefail 50 | 51 | # if [[ -x /usr/bin/insights-client ]]; then 52 | # insights-client --disable-schedule 53 | # fi 54 | ... 55 | -------------------------------------------------------------------------------- /playbooks/satellite/subnet-create.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | tasks: 5 | - name: "Get subnet from inventory (using private network IPs)" 6 | ansible.builtin.set_fact: 7 | capsule_subnet: "{{ (hostvars[host_name].private_ip + '/' + hostvars[host_name].private_netmask) | ansible.utils.ipaddr('network/prefix') }}" 8 | capsule_network: "{{ (hostvars[host_name].private_ip + '/' + hostvars[host_name].private_netmask) | ansible.utils.ipaddr('network') }}" 9 | capsule_netmask: "{{ hostvars[host_name].private_netmask }}" 10 | when: 11 | - "hostvars[host_name].private_ip is defined and hostvars[host_name].private_ip | length > 0" 12 | - "hostvars[host_name].private_netmask is defined and hostvars[host_name].private_netmask | length > 0" 13 | 14 | - name: "Get subnet from inventory (using public network IPs)" 15 | ansible.builtin.set_fact: 16 | capsule_subnet: "{{ (hostvars[host_name].public_ip + '/' + hostvars[host_name].public_netmask) | ansible.utils.ipaddr('network/prefix') }}" 17 | capsule_network: "{{ (hostvars[host_name].public_ip + '/' + hostvars[host_name].public_netmask) | ansible.utils.ipaddr('network') }}" 18 | capsule_netmask: "{{ hostvars[host_name].public_netmask }}" 19 | when: 20 | - "hostvars[host_name].private_ip is not defined and hostvars[host_name].private_netmask is not defined" 21 | - "hostvars[host_name].public_ip is defined and hostvars[host_name].public_netmask is defined" 22 | 23 | - name: "Create subnet for {{ capsule_subnet }}" 24 | ansible.builtin.shell: 25 | hammer subnet create \ 26 | --organization "{{ organization }}" \ 27 | --name "Subnet for {{ capsule_subnet }}" \ 28 | --network {{ capsule_network }} \ 29 | --mask {{ capsule_netmask }} \ 30 | --ipam None \ 31 | --domains {{ domain }} 32 | register: cmd 33 | failed_when: "cmd.rc != 0 and 'Name has already been taken' not in cmd.stderr" 34 | changed_when: "cmd.rc == 0" 35 | ... 36 | -------------------------------------------------------------------------------- /scripts/create_lots_of_subnets.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | # Usage: 4 | # $locations_to - $locations_from = number of locations to create 5 | # One loc -> 100 domains -> 300 subnets 6 | # Recomendation: from 100 to 150 7 | # 8 | # # Create lots of locations/domains/subnets 9 | # ansible -u root --private-key conf/contperf/id_rsa_perf -i conf/contperf/inventory.ini -m copy -a "src=scripts/create_lots_of_subnets.sh dest=/root/create_lots_of_subnets.sh force=yes" satellite6 10 | # ansible -u root --private-key conf/contperf/id_rsa_perf -i conf/contperf/inventory.ini -m command -a "bash /root/create_lots_of_subnets.sh 100 150" satellite6 11 | # ansible -u root --private-key conf/contperf/id_rsa_perf -i conf/contperf/inventory.ini -m shell -a "hammer -u admin -p changeme location list | wc -l; hammer -u admin -p changeme domain list | wc -l; hammer -u admin -p changeme subnet list | wc -l" satellite6 12 | 13 | locations_from=$1 14 | locations_to=$2 15 | 16 | ip_a=0 # denotes location (will start from 100) 17 | ip_b=0 # denotes domain 18 | ip_c=0 # denotes subnet 19 | ip_d=0 20 | for ip_a in $( seq $locations_from $locations_to ); do 21 | location="Location $ip_a" 22 | hammer -u admin -p changeme location create --description "Description of $location" --name "$location" 23 | for ip_b in $( seq 1 100 ); do 24 | domain="loc${ip_a}dom${ip_b}.local" 25 | hammer -u admin -p changeme domain create --description "Description of $domain" --locations "$location" --name "$domain" --organizations 'Default Organization' 26 | for ip_c in $( seq 1 3 ); do 27 | from="$ip_a.$ip_b.$ip_c.$ip_d" 28 | to="$ip_a.$ip_b.$ip_c.255" 29 | hammer -u admin -p changeme subnet create --domains "$domain" --from "$from" --gateway "$from" --ipam DHCP --locations "$location" --organizations 'Default Organization' --mask 255.255.255.0 --name "Subnet $from-$to for $domain in $location" --network "$from" --to "$to" & 30 | # Limit number of background processes 31 | [ "$( jobs | wc -l | cut -d ' ' -f 1 )" -ge 10 ] && wait 32 | done 33 | done 34 | done 35 | -------------------------------------------------------------------------------- /infra/kibana/saved_objects/README.md: -------------------------------------------------------------------------------- 1 | Satellite CPT Kibana saved objects storage 2 | ========================================== 3 | 4 | To update objects here from Kibana or to update objects in Kibana with 5 | configs here, please follow guide in: 6 | 7 | 8 | 9 | 10 | To clone dashboard visualizations from ver X to Y 11 | ------------------------------------------------- 12 | 13 | First, make sure you have latest visualization JSONs using 14 | `kibana_objects_tool` refferenced above. 15 | 16 | Lets say I want to create Sat 6.11 visualizations based on their 17 | 6.10 versions. I need to locate them: 18 | 19 | $ where/is/kibana_objects_tool/kibana_objects_tool.py list_objects | grep 6.10 20 | visualization_a1b1186a-7df8-47d8-92f1-2e909293d261.json visualization 'Sat 6.10 webui-pages' 3375 B 21 | visualization_d3531489-3487-4637-9783-0fc6a0e356a7.json visualization 'Sat 6.10 hammer-list' 3360 B 22 | visualization_038ba516-86ab-4cba-97cb-0bce1f20f0b3.json visualization 'Sat 6.10 Sync RHEL6 from mirror' 3434 B 23 | [...] 24 | 25 | Now I can clone them: 26 | 27 | for f in $( where/is/kibana_objects_tool/kibana_objects_tool.py list_objects | grep 6.10 | cut -d ' ' -f 1 ); do 28 | ./clone_kibana_vis.py \ 29 | --change-text 'Sat 6.10 ' 'Sat 6.11 ' \ 30 | --change-version-wildcard '*-6.10.*' '*-6.11.*' \ 31 | --change-version-match '6.10.z' '6.11.z' \ 32 | -d --filename $f 33 | done 34 | 35 | No traceback for none of them? Great. 36 | 37 | Do not git add and commit these `new-*` files, they are temporary only. 38 | 39 | Now you can create one NDJSON file from all of generated ones: 40 | 41 | for f in new-*.ndjson; do cat $f; echo; done >new.ndjson 42 | 43 | And import it to Kibana (well, we have 'OpenSearch Dashboards' now): 44 | 45 | curl -X POST 'http://kibana.example.com/api/saved_objects/_import?overwrite=false' -H "osd-xsrf: true" --form file=@new.ndjson 46 | 47 | Now you can delete temporary ones with `rm new*` and use `kibana_objects_tool` 48 | process to get JSONs (these you just imported into Kibana) from Kibana into git. 49 | -------------------------------------------------------------------------------- /conf/hosts.ini: -------------------------------------------------------------------------------- 1 | # Whats "ip=" variables for? 2 | # ========================== 3 | # We have used these for our statically configured network without DHCP/DNS 4 | # server which was running on 10Gb ethernet (compared 1Gb etherned which had 5 | # all the comfort of DHCP/DNS). This 10Gb ethernet was preffered to run our 6 | # tests. If you have only one network in your setup with functional DNS/DHCP 7 | # servers, you most probably do not need these values. 8 | # 9 | # Variable "containers=" 10 | # ====================== 11 | # Only applicable to "docker-host" group member and defines number of 12 | # containers docker-tierup.yaml should start on a given host. 13 | # 14 | # Variable "not_yet_10g_nic=" 15 | # =========================== 16 | # Set this to 'true' if there is no 10G network interface on the system yet, 17 | # so "common" role wont try to check it. 18 | # 19 | # Variable "docker_host_10gnic=" 20 | # ============================== 21 | # Set to name of the interface to add to the docker0 bridge. Defaults 22 | # to "eth1". 23 | # 24 | # Variable "docker_host_cidr_range=" 25 | # ================================== 26 | # How big should be docker's '--fixed-cidr' range? Defaults to 24 which is 27 | # enough for 255 containers. If you plan, say, 1000 containers, set to 22 28 | # and make sure to allocate space in IP space (if first docker host have 29 | # 30 | # Variable "ignore_satellite=" 31 | # ============================ 32 | # Set it to 'true' if we should skipp checks if exactly one satellite is 33 | # specified and if we can ping satellite via both "normal" and "10G" network 34 | # This is useful when you are setting docker hosts only. 35 | # 36 | # Variable "tests_registration_target=" 37 | # ===================================== 38 | # Into which server (satellite or capsule) should we register hosts from this 39 | # docker hosts? This is used by playbooks/tests/registrations.yaml. If not 40 | # specified, satellite is assumed. 41 | 42 | [docker_hosts] 43 | docker.example.com ip=172.17.52.1 44 | 45 | [capsules] 46 | capsule.example.com ip=172.17.51.1 47 | 48 | [satellite6] 49 | satellite.example.com ip=172.17.53.1 50 | 51 | [graphite] 52 | monitoring.example.com 53 | 54 | [grafana] 55 | monitoring.example.com 56 | -------------------------------------------------------------------------------- /infra/kibana/saved_objects/dashboard_e5764f00-5e85-11ec-8cd3-5d989c3b9841.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "attributes": { 4 | "description": "", 5 | "hits": 0, 6 | "kibanaSavedObjectMeta": { 7 | "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[]}" 8 | }, 9 | "optionsJSON": "{\"useMargins\":true,\"hidePanelTitles\":false}", 10 | "panelsJSON": "[{\"version\":\"7.10.0\",\"gridData\":{\"x\":0,\"y\":0,\"w\":48,\"h\":13,\"i\":\"3223c7c4-3b57-4123-bc4c-519867f7d66c\"},\"panelIndex\":\"3223c7c4-3b57-4123-bc4c-519867f7d66c\",\"embeddableConfig\":{},\"panelRefName\":\"panel_0\"},{\"version\":\"7.10.0\",\"gridData\":{\"x\":0,\"y\":13,\"w\":48,\"h\":15,\"i\":\"4e8c03c7-8467-4171-9218-2c253f28aa5d\"},\"panelIndex\":\"4e8c03c7-8467-4171-9218-2c253f28aa5d\",\"embeddableConfig\":{},\"panelRefName\":\"panel_1\"},{\"version\":\"7.10.0\",\"gridData\":{\"x\":0,\"y\":28,\"w\":48,\"h\":15,\"i\":\"ae29310a-45a9-4e76-b297-8974d08b0569\"},\"panelIndex\":\"ae29310a-45a9-4e76-b297-8974d08b0569\",\"embeddableConfig\":{},\"panelRefName\":\"panel_2\"}]", 11 | "refreshInterval": { 12 | "pause": true, 13 | "value": 0 14 | }, 15 | "timeFrom": "now-30d", 16 | "timeRestore": true, 17 | "timeTo": "now", 18 | "title": "Satellite AA", 19 | "version": 1 20 | }, 21 | "id": "e5764f00-5e85-11ec-8cd3-5d989c3b9841", 22 | "migrationVersion": { 23 | "dashboard": "7.9.3" 24 | }, 25 | "references": [ 26 | { 27 | "id": "7f8d3f60-5e84-11ec-8cd3-5d989c3b9841", 28 | "name": "panel_0", 29 | "type": "visualization" 30 | }, 31 | { 32 | "id": "87de7580-5e84-11ec-8cd3-5d989c3b9841", 33 | "name": "panel_1", 34 | "type": "visualization" 35 | }, 36 | { 37 | "id": "e210cac0-71ea-11ec-8e1a-cd6b988c18b9", 38 | "name": "panel_2", 39 | "type": "visualization" 40 | } 41 | ], 42 | "type": "dashboard", 43 | "updated_at": "2022-01-10T07:57:30.910Z", 44 | "version": "WzcwMCwxXQ==" 45 | } 46 | ] -------------------------------------------------------------------------------- /docs/get_EC2_instances_IP.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | user_interrupt(){ 6 | echo -e "\n\nKeyboard Interrupt detected." 7 | exit 1 8 | } 9 | 10 | trap user_interrupt SIGINT 11 | trap user_interrupt SIGTSTP 12 | 13 | [ $# = 0 ] && { 14 | echo -e "\nUsage: ./get_EC2_instances_IP.sh \n" 15 | echo -e "\t supports simple wildcard queries like: sat-*, *satellite*, .." 16 | echo -e "\nExample: \n$ ./get_EC2_instances_IP.sh sat-*capsule*" 17 | echo -e "..\nsat-capsule20\nsat-NG-capsule1\nsat-capsule16\n.." 18 | exit -1 19 | } 20 | 21 | keyword=$1 22 | 23 | echo "Fetching currently running instance [ID, IP]s for 'tag:Name $keyword" 24 | 25 | aws ec2 describe-instances --output text --query 'Reservations[].Instances[].[PublicIpAddress,Tags[0].Value]' --filters "Name=tag:Name,Values=$keyword" "Name=instance-state-name,Values=running" | sort > ec2_ips.txt 26 | 27 | echo "...saved to ec2_ips.txt" 28 | 29 | # filtered=$1 30 | # keyword=$2 31 | 32 | 33 | 34 | # # old method 35 | # # aws ec2 describe-tags | grep sat- | awk '{print $4" "$6" "$8}' | grep i- > ec2_temp_data.txt 36 | 37 | # # better method 38 | # aws ec2 describe-tags --output text --query 'Tags[].Value' --filters Name=tag:Name,Values=$filtered | sed -e 's/\s\+/\n/g' > ec2_temp_data.txt 39 | 40 | 41 | # echo "...saved to ec2_temp_data.txt" 42 | # echo 43 | # echo "Note: Not all related ID'ed instanced might be up and running." 44 | # echo "To check status of an instance, use: aws ec2 describe-instance-status [ instance ID,(s) ..]" 45 | # echo 46 | # echo "...filter IDs for $keyword" 47 | 48 | # egrep '^.*'$keyword'[0-9]*$' ec2_temp_data.txt | awk '{print $1}' > ec2_instance.$keyword.ids.txt 49 | 50 | # echo "...saved to ec2_instance.$keyword.ids.txt" 51 | # echo "...getting IPs" 52 | 53 | # # old method 54 | # # aws ec2 describe-instances --instance-ids $(cat ec2_instance.$keyword.ids.txt) | grep PublicIpAddress | awk '{print $(NF-1)}' > ec2_IP_Addresses.$keyword.instance.txt 55 | 56 | # # better method 57 | # aws ec2 describe-instances --instance-ids $(cat ec2_instance.$keyword.ids.txt) --output text --query 'Reservations[].Instances[].PublicIpAddress' | sed -e 's/\s\+/\n/g' > ec2_IP_Addresses.$keyword.instance.txt 58 | 59 | # # maybe useful later: tr " " "\n" 60 | 61 | # echo "...saved to ec2_IP_Addresses.$keyword.instance.txt" 62 | -------------------------------------------------------------------------------- /experiment/reg-average.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: UTF-8 -*- 3 | 4 | import argparse 5 | import datetime 6 | import logging 7 | import re 8 | import sys 9 | 10 | 11 | def parse_time(time_str): 12 | try: 13 | time_obj = datetime.datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S.%f') 14 | except ValueError: 15 | time_obj = datetime.datetime.strptime(time_str, '%Y-%m-%d %H:%M:%S') 16 | 17 | return time_obj.replace(tzinfo=datetime.timezone.utc) 18 | 19 | 20 | parser = argparse.ArgumentParser(description='Compute average from log') 21 | parser.add_argument('matcher', 22 | help='String to identify log file lines with timestamps') 23 | parser.add_argument('log_file', type=argparse.FileType('r'), 24 | help='Log file to process') 25 | parser.add_argument('-d', '--debug', action='store_true', 26 | help='Show debug output') 27 | 28 | args = parser.parse_args() 29 | 30 | if args.debug: 31 | logging.basicConfig(level=logging.DEBUG) 32 | 33 | logging.debug('Args: %s' % args) 34 | 35 | total = 0.0 36 | count = 0 37 | start_min = None 38 | end_max = None 39 | 40 | for line in args.log_file: 41 | if re.match('.*"%s .*' % args.matcher, line): 42 | logging.debug("Processing line %d: %s" % (count, line.strip())) 43 | m = re.match('^.*"%s (?P[0-9:. -]+) to (?P[0-9:. -]+) taking (?P[0-9.]+) seconds".*$' % args.matcher, line) 44 | 45 | start = parse_time(m.group('start')) 46 | end = parse_time(m.group('end')) 47 | duration = m.group('duration') 48 | logging.debug("Parsed start, end, duration times on line %d: %s, %s, %s" % (count, start, end, duration)) 49 | 50 | if start_min is None or start < start_min: 51 | start_min = start 52 | if end_max is None or end > end_max: 53 | end_max = end 54 | count += 1 55 | total += float(duration) 56 | 57 | if count == 0: 58 | logging.error("No matcher %s found in log %s" % (args.matcher, args.log_file.name)) 59 | sys.exit(1) 60 | else: 61 | print("min in %s: %s" % (args.log_file.name, start_min.timestamp())) 62 | print("max in %s: %s" % (args.log_file.name, end_max.timestamp())) 63 | print("%s in %s: %f / %d = %f" %(args.matcher, args.log_file.name, total, count, total / count)) 64 | -------------------------------------------------------------------------------- /playbooks/tests/includes/manifest-excercise.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # This is supposed to excercise manifest upload, reload and delete functionality 3 | # 4 | # Expects that /root/manifest-auto.zip file is available on a Satellite host 5 | 6 | - name: "Upload manifest" 7 | ansible.builtin.shell: 8 | hammer -u '{{ sat_user }}' -p '{{ sat_pass }}' subscription upload --file '/root/manifest-auto.zip' --organization '{{ organization }}' 9 | environment: 10 | TZ: UTC 11 | register: manifest_upload 12 | 13 | - name: "Calculate upload duration" 14 | ansible.builtin.set_fact: 15 | manifest_upload_duration: "{{ (manifest_upload.end | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() - (manifest_upload.start | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() }}" 16 | 17 | - name: "Print upload results" 18 | ansible.builtin.debug: 19 | msg: "ManifestUpload {{ manifest_upload.start }} to {{ manifest_upload.end }} taking {{ manifest_upload_duration }} seconds" 20 | 21 | - name: "Upload refresh" 22 | ansible.builtin.shell: 23 | hammer -u '{{ sat_user }}' -p '{{ sat_pass }}' subscription refresh-manifest --organization '{{ organization }}' 24 | environment: 25 | TZ: UTC 26 | register: manifest_refresh 27 | 28 | - name: "Calculate refresh duration" 29 | ansible.builtin.set_fact: 30 | manifest_refresh_duration: "{{ (manifest_refresh.end | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() - (manifest_refresh.start | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() }}" 31 | 32 | - name: "Print refresh results" 33 | ansible.builtin.debug: 34 | msg: "ManifestRefresh {{ manifest_refresh.start }} to {{ manifest_refresh.end }} taking {{ manifest_refresh_duration }} seconds" 35 | 36 | #- name: "Wait few seconds" 37 | # pause: 38 | # minutes: 0.2 39 | 40 | - name: "Delete manifest" 41 | ansible.builtin.shell: 42 | hammer -u '{{ sat_user }}' -p '{{ sat_pass }}' subscription delete-manifest --organization '{{ organization }}' 43 | environment: 44 | TZ: UTC 45 | register: manifest_delete 46 | 47 | - name: "Calculate delete duration" 48 | ansible.builtin.set_fact: 49 | manifest_delete_duration: "{{ (manifest_delete.end | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() - (manifest_delete.start | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() }}" 50 | 51 | - name: "Print delete results" 52 | ansible.builtin.debug: 53 | msg: "ManifestDelete {{ manifest_delete.start }} to {{ manifest_delete.end }} taking {{ manifest_delete_duration }} seconds" 54 | ... 55 | -------------------------------------------------------------------------------- /playbooks/katello/katello_mirror_installation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | - installer_additional_ops: '' 6 | roles: 7 | - ../common/roles/scalelab-nic-cleanup 8 | - ../common/roles/common 9 | - ../common/roles/remove-home-extend-root 10 | - ../common/roles/enlarge-arp-table 11 | - linux-system-roles.timesync 12 | tasks: 13 | - name: disable & enable repos 14 | shell: "{{ item }}" 15 | with_items: 16 | - subscription-manager repos --disable "*" 17 | - subscription-manager repos --enable rhel-7-server-rpms 18 | - subscription-manager repos --enable rhel-7-server-optional-rpms 19 | - subscription-manager repos --enable rhel-7-server-extras-rpms 20 | - yum install -y yum-utils 21 | when: ansible_distribution == "RedHat" 22 | 23 | - name: settup the server for installation 24 | shell: | 25 | yum -y install centos-release-scl 26 | curl -o /etc/yum.repos.d/satellite.repo http://perf54.perf.lab.eng.bos.redhat.com/pub/mirrors/2020-04-30-Katello_Nightly_Sat68_snap1/satellite.repo 27 | 28 | - name: make a entry of IP & hostname in /etc/hosts file 29 | lineinfile: 30 | path: /etc/hosts 31 | line: '{{ ansible_default_ipv4.address }} {{ ansible_hostname }}' 32 | create: yes 33 | 34 | - name: install the katello nightly 35 | shell: yum -y install katello 36 | 37 | - name: configure the katello nightly 38 | command: foreman-installer --scenario katello --skip-checks-i-know-better --foreman-initial-admin-username {{ sat_user }} --foreman-initial-admin-password {{ sat_pass }} {{ installer_additional_ops }} 39 | 40 | - name: Put SELinux in permissive mode #workaround for https://projects.theforeman.org/issues/29603 41 | selinux: 42 | policy: targeted 43 | state: permissive 44 | 45 | - name: change the 'pool' setting in database.yml file #workaround for https://projects.theforeman.org/issues/29370 46 | replace: 47 | path: /etc/foreman/database.yml 48 | regexp: 'pool: 5$' 49 | replace: 'pool: 30' 50 | 51 | - name: create a file with content #workaround for https://projects.theforeman.org/issues/29370 52 | copy: 53 | dest: /usr/share/foreman/config/initializers/foreman-tasks-db-pool-size.rb 54 | content: | 55 | ForemanTasks.dynflow.config.db_pool_size = 30 56 | 57 | - name: restart the foreman service #workaround for https://projects.theforeman.org/issues/29370 58 | shell: foreman-maintain service restart 59 | -------------------------------------------------------------------------------- /playbooks/common/roles/rhsm/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Following might fail with 3 | # msg: Failed to register with 'subscription.rhn.redhat.com': 'RhsmPool' object has no attribute 'PoolId' 4 | # Should be already fixed these days upstream: 5 | # https://github.com/ansible/ansible-modules-core/issues/421 6 | # So I patched mine Ansible package (ansible-1.9.4-1.fc23.noarch): 7 | # # pwd 8 | # /usr/lib/python2.7/site-packages/ansible/modules/core 9 | # # curl --insecure https://patch-diff.githubusercontent.com/raw/ansible/ansible-modules-core/pull/1204.patch | patch -p 1 --merge 10 | # # vim packaging/os/redhat_subscription.py # had one merge conflict so had to patch it manually 11 | # - action: redhat_subscription 12 | # state=present 13 | # username="{{ rhsm_user }}" 14 | # password="{{ rhsm_pass }}" 15 | # pool="{{ rhsm_pools }}" 16 | # autosubscribe=true 17 | # register: registration 18 | # until: not registration.failed 19 | # retries: 5 20 | # delay: 10 21 | 22 | - name: "Make sure we have required variables" 23 | setup: 24 | gather_subset: "!all" 25 | 26 | ###- name: "Remove registrations" 27 | ### command: 28 | ### subscription-manager clean 29 | ###- name: "Remove any preexistent Satellite config" 30 | ### yum: 31 | ### name: katello-ca-consumer-* 32 | ### state: absent 33 | 34 | # Make sure configuration matches hosted servers. 35 | # Maybe we had the system registered to some satellite before? Clenup 36 | - name: "Make sure configuration matches hosted servers" 37 | command: 38 | subscription-manager config --server.hostname=subscription.rhsm.redhat.com --server.port=443 --server.prefix=/subscription --rhsm.baseurl=https://cdn.redhat.com --rhsm.ca_cert_dir=/etc/rhsm/ca/ --rhsm.repo_ca_cert=/etc/rhsm/ca/redhat-uep.pem 39 | 40 | - name: "Register the system with RHSM" 41 | redhat_subscription: 42 | username: "{{ rhsm_user }}" 43 | password: "{{ rhsm_pass }}" 44 | force_register: true 45 | pool: "{{ rhsm_pool | regex_escape() }}" 46 | register: registering 47 | until: registering.changed 48 | retries: 30 49 | delay: 10 50 | 51 | # Make sure only base RHEL repo is attached 52 | - name: "Disable all RHSM repos and only enable main RHEL8 repos" 53 | rhsm_repository: 54 | name: 55 | - 'rhel-8-for-x86_64-baseos-rpms' 56 | - 'rhel-8-for-x86_64-appstream-rpms' 57 | purge: True 58 | when: ansible_distribution_major_version == 8 and registering.changed 59 | ... 60 | -------------------------------------------------------------------------------- /infra/kibana/saved_objects/visualization_73397dc1-6b75-4066-bb1b-bdb1e53d1970.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "attributes": { 4 | "description": "", 5 | "kibanaSavedObjectMeta": { 6 | "searchSourceJSON": "{\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\",\"key\":\"query\",\"negate\":false,\"type\":\"custom\",\"value\":\"{\\\"wildcard\\\":{\\\"parameters.version.keyword\\\":\\\"*-6.11.*\\\"}}\"},\"query\":{\"wildcard\":{\"parameters.version.keyword\":\"*-6.11.*\"}}}],\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\",\"query\":{\"language\":\"lucene\",\"query\":\"\"}}" 7 | }, 8 | "title": "Sat 6.11 run to version", 9 | "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", 10 | "version": 1, 11 | "visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{\"customLabel\":\"Number of results\"},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"customLabel\":\"Test run ID\",\"field\":\"parameters.run.keyword\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"order\":\"desc\",\"orderBy\":\"_key\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"size\":15},\"schema\":\"bucket\",\"type\":\"terms\"},{\"enabled\":true,\"id\":\"4\",\"params\":{\"customLabel\":\"Satellite version\",\"field\":\"parameters.version.keyword\",\"size\":1},\"schema\":\"bucket\",\"type\":\"significant_terms\"}],\"params\":{\"perPage\":5,\"showMetricsAtAllLevels\":false,\"showPartialRows\":true,\"showTotal\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"totalFunc\":\"sum\"},\"title\":\"Sat 6.11 run to version\",\"type\":\"table\"}" 12 | }, 13 | "id": "73397dc1-6b75-4066-bb1b-bdb1e53d1970", 14 | "migrationVersion": { 15 | "visualization": "7.10.0" 16 | }, 17 | "references": [ 18 | { 19 | "id": "293cb4b0-272c-11ea-bfca-3fb30f912ea8", 20 | "name": "kibanaSavedObjectMeta.searchSourceJSON.index", 21 | "type": "index-pattern" 22 | }, 23 | { 24 | "id": "293cb4b0-272c-11ea-bfca-3fb30f912ea8", 25 | "name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index", 26 | "type": "index-pattern" 27 | } 28 | ], 29 | "type": "visualization", 30 | "updated_at": "2022-07-25T10:59:15.771Z", 31 | "version": "WzQwNTY2LDhd" 32 | } 33 | ] -------------------------------------------------------------------------------- /infra/kibana/saved_objects/visualization_739cfbd4-6cff-4394-bc3f-6e181d498f31.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "attributes": { 4 | "description": "", 5 | "kibanaSavedObjectMeta": { 6 | "searchSourceJSON": "{\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\",\"key\":\"query\",\"negate\":false,\"type\":\"custom\",\"value\":\"{\\\"wildcard\\\":{\\\"parameters.version.keyword\\\":\\\"*-6.12.*\\\"}}\"},\"query\":{\"wildcard\":{\"parameters.version.keyword\":\"*-6.12.*\"}}}],\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\",\"query\":{\"language\":\"lucene\",\"query\":\"\"}}" 7 | }, 8 | "title": "Sat 6.12 run to version", 9 | "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", 10 | "version": 1, 11 | "visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{\"customLabel\":\"Number of results\"},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"customLabel\":\"Test run ID\",\"field\":\"parameters.run.keyword\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"order\":\"desc\",\"orderBy\":\"_key\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"size\":15},\"schema\":\"bucket\",\"type\":\"terms\"},{\"enabled\":true,\"id\":\"4\",\"params\":{\"customLabel\":\"Satellite version\",\"field\":\"parameters.version.keyword\",\"size\":1},\"schema\":\"bucket\",\"type\":\"significant_terms\"}],\"params\":{\"perPage\":5,\"showMetricsAtAllLevels\":false,\"showPartialRows\":true,\"showTotal\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"totalFunc\":\"sum\"},\"title\":\"Sat 6.12 run to version\",\"type\":\"table\"}" 12 | }, 13 | "id": "739cfbd4-6cff-4394-bc3f-6e181d498f31", 14 | "migrationVersion": { 15 | "visualization": "7.10.0" 16 | }, 17 | "references": [ 18 | { 19 | "id": "293cb4b0-272c-11ea-bfca-3fb30f912ea8", 20 | "name": "kibanaSavedObjectMeta.searchSourceJSON.index", 21 | "type": "index-pattern" 22 | }, 23 | { 24 | "id": "293cb4b0-272c-11ea-bfca-3fb30f912ea8", 25 | "name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index", 26 | "type": "index-pattern" 27 | } 28 | ], 29 | "type": "visualization", 30 | "updated_at": "2022-07-25T10:59:15.771Z", 31 | "version": "WzQwNTY3LDhd" 32 | } 33 | ] -------------------------------------------------------------------------------- /infra/kibana/saved_objects/visualization_7a172852-8056-4706-b542-c6b7e2955e56.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "attributes": { 4 | "description": "", 5 | "kibanaSavedObjectMeta": { 6 | "searchSourceJSON": "{\"filter\":[{\"$state\":{\"store\":\"appState\"},\"meta\":{\"alias\":null,\"disabled\":false,\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index\",\"key\":\"query\",\"negate\":false,\"type\":\"custom\",\"value\":\"{\\\"wildcard\\\":{\\\"parameters.version.keyword\\\":\\\"*-6.13.*\\\"}}\"},\"query\":{\"wildcard\":{\"parameters.version.keyword\":\"*-6.13.*\"}}}],\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\",\"query\":{\"language\":\"lucene\",\"query\":\"\"}}" 7 | }, 8 | "title": "Sat 6.13 run to version", 9 | "uiStateJSON": "{\"vis\":{\"params\":{\"sort\":{\"columnIndex\":null,\"direction\":null}}}}", 10 | "version": 1, 11 | "visState": "{\"aggs\":[{\"enabled\":true,\"id\":\"1\",\"params\":{\"customLabel\":\"Number of results\"},\"schema\":\"metric\",\"type\":\"count\"},{\"enabled\":true,\"id\":\"2\",\"params\":{\"customLabel\":\"Test run ID\",\"field\":\"parameters.run.keyword\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\",\"order\":\"desc\",\"orderBy\":\"_key\",\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"size\":15},\"schema\":\"bucket\",\"type\":\"terms\"},{\"enabled\":true,\"id\":\"4\",\"params\":{\"customLabel\":\"Satellite version\",\"field\":\"parameters.version.keyword\",\"size\":1},\"schema\":\"bucket\",\"type\":\"significant_terms\"}],\"params\":{\"perPage\":5,\"showMetricsAtAllLevels\":false,\"showPartialRows\":true,\"showTotal\":false,\"sort\":{\"columnIndex\":null,\"direction\":null},\"totalFunc\":\"sum\"},\"title\":\"Sat 6.13 run to version\",\"type\":\"table\"}" 12 | }, 13 | "id": "7a172852-8056-4706-b542-c6b7e2955e56", 14 | "migrationVersion": { 15 | "visualization": "7.10.0" 16 | }, 17 | "references": [ 18 | { 19 | "id": "293cb4b0-272c-11ea-bfca-3fb30f912ea8", 20 | "name": "kibanaSavedObjectMeta.searchSourceJSON.index", 21 | "type": "index-pattern" 22 | }, 23 | { 24 | "id": "293cb4b0-272c-11ea-bfca-3fb30f912ea8", 25 | "name": "kibanaSavedObjectMeta.searchSourceJSON.filter[0].meta.index", 26 | "type": "index-pattern" 27 | } 28 | ], 29 | "type": "visualization", 30 | "updated_at": "2023-01-09T08:18:07.241Z", 31 | "version": "WzcyNDQxLDEwXQ==" 32 | } 33 | ] -------------------------------------------------------------------------------- /experiment/sync.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | source experiment/run-library.sh 4 | 5 | test_sync_repositories_count="${PARAM_test_sync_repositories_count:-8}" 6 | test_sync_repositories_url_template="${PARAM_test_sync_repositories_url_template:-http://repos.example.com/repo*}" 7 | test_sync_repositories_max_sync_secs="${PARAM_test_sync_repositories_max_sync_secs:-600}" 8 | 9 | test_sync_iso_count="${PARAM_test_sync_iso_count:-8}" 10 | test_sync_iso_url_template="${PARAM_test_sync_iso_url_template:-http://storage.example.com/iso-repos*}" 11 | test_sync_iso_max_sync_secs="${PARAM_test_sync_iso_max_sync_secs:-600}" 12 | 13 | test_sync_docker_count="${PARAM_test_sync_docker_count:-8}" 14 | test_sync_docker_url_template="${PARAM_test_sync_docker_url_template:-https://registry-1.docker.io}" 15 | test_sync_docker_max_sync_secs="${PARAM_test_sync_docker_max_sync_secs:-600}" 16 | 17 | 18 | #section "Checking environment" 19 | #generic_environment_check false 20 | 21 | 22 | section "Sync test" 23 | # Yum repositories 24 | ap 10-test-sync-repositories.log \ 25 | -e "organization='{{ sat_org }}'" \ 26 | -e "test_sync_repositories_count=$test_sync_repositories_count" \ 27 | -e "test_sync_repositories_url_template=$test_sync_repositories_url_template" \ 28 | -e "test_sync_repositories_max_sync_secs=$test_sync_repositories_max_sync_secs" \ 29 | playbooks/tests/sync-repositories.yaml 30 | # ISO repositories 31 | #ap 10-test-sync-iso.log playbooks/tests/sync-iso.yaml -e "test_sync_iso_count=$test_sync_iso_count test_sync_iso_url_template=$test_sync_iso_url_template test_sync_iso_max_sync_secs=$test_sync_iso_max_sync_secs" 32 | # Container repositories 33 | #ap 10-test-sync-docker.log playbooks/tests/sync-docker.yaml -e "test_sync_docker_count=$test_sync_docker_count test_sync_docker_url_template=$test_sync_docker_url_template test_sync_docker_max_sync_secs=$test_sync_docker_max_sync_secs" 34 | 35 | 36 | section "Summary" 37 | # Yum repositories 38 | e SyncRepositories $logs/10-test-sync-repositories.log 39 | e PublishContentViews $logs/10-test-sync-repositories.log 40 | e PromoteContentViews $logs/10-test-sync-repositories.log 41 | # ISO repositories 42 | e SyncRepositories $logs/10-test-sync-iso.log 43 | e PublishContentViews $logs/10-test-sync-iso.log 44 | e PromoteContentViews $logs/10-test-sync-iso.log 45 | # Container repositories 46 | e SyncRepositories $logs/10-test-sync-docker.log 47 | e PublishContentViews $logs/10-test-sync-docker.log 48 | e PromoteContentViews $logs/10-test-sync-docker.log 49 | 50 | 51 | section "Sosreport" 52 | skip_measurement='true' ap sosreporter-gatherer.log playbooks/satellite/sosreport_gatherer.yaml -e "sosreport_gatherer_local_dir='../../$logs/sosreport/'" 53 | 54 | 55 | junit_upload 56 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/client-scripts/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: Set default variable(s) values 3 | ansible.builtin.set_fact: 4 | aks_list: "{{ aks | default('AK') | split }}" 5 | 6 | - name: Assert that list of AKs is not empty 7 | ansible.builtin.assert: 8 | that: 9 | - aks_list | length > 0 10 | 11 | # - name: "Get group name of our location" 12 | # ansible.builtin.set_fact: 13 | # location_groupname: "location_{{ location | lower }}" 14 | 15 | # - ansible.builtin.set_fact: 16 | # location_groupname_groups: "groups[location_groupname]: {{ groups[location_groupname] }}" 17 | 18 | # - debug: 19 | # msg: "location_groupname_groups: {{ location_groupname_groups }}" 20 | 21 | # - name: "Set default variable(s) values" 22 | # ansible.builtin.set_fact: 23 | # registration_target: "{{ tests_registration_target | default(groups[location_groupname] | intersect(groups['satellite6']) | first) }}" 24 | # when: 25 | # - groups[location_groupname] | intersect(groups['satellite6']) | length > 0 26 | 27 | # - name: "Set default variable(s) values" 28 | # ansible.builtin.set_fact: 29 | # registration_target: "{{ tests_registration_target | default(groups[location_groupname] | intersect(groups['capsules']) | first) }}" 30 | # when: 31 | # - "'capsules' in groups" 32 | # - groups[location_groupname] | intersect(groups['capsules']) | length > 0 33 | 34 | # - name: "Set default variable(s) values" 35 | # ansible.builtin.set_fact: 36 | # registration_target: "{{ tests_registration_target | default(groups[location_groupname] | intersect(groups['capsule_lbs']) | first) }}" 37 | # when: 38 | # - "'capsule_lbs' in groups" 39 | # - groups[location_groupname] | intersect(groups['capsule_lbs']) | length > 0 40 | 41 | # - debug: 42 | # msg: "registration_target: {{ tests_registration_target }}" 43 | 44 | - name: Download host registration script from registration target 45 | ansible.builtin.get_url: 46 | url: "http://{{ tests_registration_target }}/pub/host-registration.{{ item }}.sh" 47 | dest: "/root/host-registration.{{ item }}.sh" 48 | mode: '0400' 49 | force: true 50 | loop: "{{ aks_list }}" 51 | 52 | - name: Distribute content host registration related playbooks 53 | throttle: 8 54 | ansible.builtin.copy: 55 | src: "{{ item }}" 56 | dest: "/root/{{ item }}" 57 | force: true 58 | loop: 59 | - host-registration_prepare.yaml 60 | - host-registration.yaml 61 | 62 | - name: Distribute podman related scripts 63 | ansible.builtin.template: 64 | src: "{{ item }}.j2" 65 | dest: "/root/{{ item }}" 66 | force: true 67 | loop: 68 | - podman-login.sh 69 | - podman-pull-rhosp.sh 70 | ... 71 | -------------------------------------------------------------------------------- /experiment/rex.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | source experiment/run-library.sh 4 | 5 | 6 | section 'Checking environment' 7 | generic_environment_check false false 8 | # unset skip_measurement 9 | # set +e 10 | 11 | 12 | section 'Remote execution' 13 | job_template_ansible_default='Run Command - Ansible Default' 14 | job_template_ssh_default='Run Command - Script Default' 15 | 16 | skip_measurement='true' h 10-rex-set-via-ip.log \ 17 | 'settings set --name remote_execution_connect_by_ip --value true' 18 | skip_measurement='true' a 11-rex-cleanup-know_hosts.log \ 19 | -m 'ansible.builtin.shell' \ 20 | -a 'rm -rf /usr/share/foreman-proxy/.ssh/known_hosts*' \ 21 | satellite6 22 | 23 | skip_measurement='true' h 15-rex-date-ansible.log \ 24 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='date' --job-template '$job_template_ansible_default' --search-query 'name ~ container'" 25 | jsr "$logs/15-rex-date-ansible.log" 26 | j "$logs/15-rex-date-ansible.log" 27 | 28 | skip_measurement='true' h 15-rex-date.log \ 29 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='date' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 30 | jsr "$logs/15-rex-date.log" 31 | j "$logs/15-rex-date.log" 32 | 33 | skip_measurement='true' h 16-rex-uploadprofile.log \ 34 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='dnf uploadprofile --force-upload' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 35 | jsr "$logs/16-rex-uploadprofile.log" 36 | j "$logs/16-rex-uploadprofile.log" 37 | 38 | skip_measurement='true' h 17-rex-katello_package_install-podman.log \ 39 | "job-invocation create --async --description-format 'Install %{package} (%{template_name})' --feature katello_package_install --inputs package='podman' --search-query 'name ~ container'" 40 | jsr "$logs/17-rex-katello_package_install-podman.log" 41 | j "$logs/17-rex-katello_package_install-podman.log" 42 | 43 | skip_measurement='true' h 17-rex-podman_pull.log \ 44 | "job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='bash -x /root/podman-pull.sh' --job-template '$job_template_ssh_default' --search-query 'name ~ container'" 45 | jsr "$logs/17-rex-podman_pull.log" 46 | j "$logs/17-rex-podman_pull.log" 47 | 48 | skip_measurement='true' h 18-rex-katello_package_update.log \ 49 | "job-invocation create --async --description-format '%{template_name}' --feature katello_package_update --search-query 'name ~ container'" 50 | jsr "$logs/18-rex-katello_package_update.log" 51 | j "$logs/18-rex-katello_package_update.log" 52 | 53 | 54 | junit_upload 55 | -------------------------------------------------------------------------------- /playbooks/tests/webui-pages.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: false 4 | vars: 5 | ui_concurrency: 10 6 | ui_duration: 300 7 | tasks: 8 | - name: "Show settings" 9 | ansible.builtin.debug: 10 | msg: 11 | - "ui_concurrency = {{ ui_concurrency }}" 12 | - "ui_duration = {{ ui_duration }}" 13 | 14 | - name: "Create virtualenv" 15 | ansible.builtin.shell: 16 | cmd: | 17 | set -o pipefail 18 | 19 | python3 -m venv /root/venv 20 | 21 | source /root/venv/bin/activate 22 | 23 | python3 -m pip install -U pip 24 | python3 -m pip install -e "git+https://github.com/redhat-performance/opl.git#egg=opl-rhcloud-perf-team" 25 | args: 26 | creates: /root/venv 27 | 28 | - name: "Deploy test script" 29 | ansible.builtin.copy: 30 | src: webui-pages.py 31 | dest: /root/webui-pages.py 32 | mode: '0755' 33 | force: true 34 | 35 | - name: "Run the test" 36 | ansible.builtin.shell: 37 | cmd: | 38 | set -o pipefail 39 | 40 | export STATUS_DATA_FILE=/tmp/status-data.json 41 | rm -rf $STATUS_DATA_FILE 42 | export PYTHONWARNINGS="ignore:Unverified HTTPS request" 43 | 44 | source /root/venv/bin/activate 45 | 46 | python3 /root/webui-pages.py \ 47 | --satellite-username {{ sat_user }} \ 48 | --satellite-password {{ sat_pass }} \ 49 | --satellite-org_id 1 \ 50 | --satellite-version {{ sat_version }} \ 51 | --locust-host https://localhost \ 52 | --locust-num-clients {{ ui_concurrency }} \ 53 | --test-duration {{ ui_duration }} 54 | environment: 55 | TZ: UTC 56 | ignore_errors: true 57 | register: test_cmd 58 | 59 | - name: "Show output" 60 | ansible.builtin.debug: 61 | msg: "{{ ['========== Standard output =========='] + test_cmd.stdout_lines + ['', '========== Standard error output =========='] + test_cmd.stderr_lines }}" 62 | run_once: true 63 | 64 | - name: "Fetch status data file from remote host" 65 | ansible.builtin.fetch: 66 | src: "/tmp/status-data.json" 67 | dest: "/tmp/status-data-webui-pages.json" 68 | flat: true 69 | 70 | - name: "Calculate run duration" 71 | ansible.builtin.set_fact: 72 | test_cmd_duration: "{{ (test_cmd.end | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() - (test_cmd.start | to_datetime('%Y-%m-%d %H:%M:%S.%f')).timestamp() }}" 73 | 74 | - name: "Print results" 75 | ansible.builtin.debug: 76 | msg: "WebUIPagesTest_c{{ ui_concurrency }}_d{{ ui_duration }} {{ test_cmd.start }} to {{ test_cmd.end }} taking {{ test_cmd_duration }} seconds" 77 | ... 78 | -------------------------------------------------------------------------------- /playbooks/tests/puppet-big-test.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_hosts 3 | gather_facts: False 4 | vars: 5 | size: 1 # should be provided via external parameter 6 | forks: "{{ size }}" # in how many forks should playbooks run? 7 | use_only_fresh: true # should we use only containers which were not registered yet? 8 | update_used: true # once we use containers, should we update /root/containers-used-count? 9 | tasks: 10 | - name: "Deploy docker host side of big puppet test" 11 | template: 12 | src: puppet-big-client.yaml.j2 13 | dest: /root/puppet-big-client.yaml 14 | validate: python -c 'import yaml; print yaml.safe_load(open("%s", "r"))' 15 | tags: always 16 | 17 | - name: "Ansible on docker host configured to ignore host keys" 18 | lineinfile: 19 | regexp: '^\s*host_key_checking\s*=' 20 | line: 'host_key_checking = False' 21 | path: /etc/ansible/ansible.cfg 22 | tags: always 23 | 24 | - import_tasks: includes/prepare_clients_ini.yaml 25 | vars: 26 | size: "{{ size }}" 27 | tags: always 28 | 29 | - name: "Determine marker variable" 30 | set_fact: 31 | marker: "{{ lookup('pipe', 'date -u -Iseconds') }}" 32 | run_once: yes 33 | tags: always 34 | 35 | 36 | - name: "Set variables for single module job" 37 | set_fact: 38 | nickname: single 39 | content_puppet_cv: SatPerfContentViewPuppetFakeModule 40 | content_puppet_module_name: satperftest 41 | tags: SINGLE 42 | 43 | - name: "Set variables for job with bunch of modules" 44 | set_fact: 45 | nickname: bunch 46 | content_puppet_cv: SatPerfContentViewPuppetRealModules 47 | content_puppet_module_name: acl,apache,archive,firewalld,gcc,haproxy,java,limits,motd,nsswitch,ntp,pam,rsyslog,ssh,sysstat,tomcat 48 | tags: BUNCH 49 | 50 | - name: "Determine Puppet environment name for {{ content_puppet_cv }}" 51 | shell: 52 | hammer --output csv -u "{{ sat_user }}" -p "{{ sat_pass }}" environment list | grep "{{ content_puppet_cv }}" | cut -d ',' -f 2 53 | delegate_to: "{{ groups['satellite6']|first }}" 54 | run_once: yes 55 | register: hammer_envs_grepped 56 | tags: always 57 | - name: "Set Puppet environment label variable" 58 | set_fact: 59 | content_puppet_env: "{{ hammer_envs_grepped.stdout_lines[0] }}" 60 | tags: always 61 | 62 | 63 | # Puppet registration 64 | - import_tasks: includes/puppet-big-test-register.yaml 65 | tags: always 66 | 67 | # Puppet deploy 68 | - import_tasks: includes/puppet-big-test-deploy.yaml 69 | tags: always 70 | 71 | 72 | - import_tasks: includes/update_used_containers.yaml 73 | vars: 74 | used_count: "{{ containers_used_count|int + size|int }}" 75 | tags: always 76 | ... 77 | -------------------------------------------------------------------------------- /playbooks/tests/continuous-reg.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: docker_hosts 3 | gather_facts: False 4 | vars: 5 | # size=10 tags=untagged,REG,REM bootstrap_retries=0 grepper='Register' 6 | contreg_size: 3 # how many containers per docker host to use? 7 | contreg_iter: 100 # how many register&unregister iterations to perform 8 | contreg_file: "/root/container-ips.shuffled.continuous-reg" 9 | contreg_inv: "/root/clients.continuous-reg.ini" 10 | contreg_log: "/root/out-continuous-reg-{{ lookup('pipe', 'date --iso-8601=seconds') }}.log" 11 | tasks: 12 | - name: "Check if we already have {{ contreg_file }} file" 13 | stat: 14 | path: "{{ contreg_file }}" 15 | register: file_stat 16 | - name: "Select hosts to use for continuous registrations" 17 | shell: | 18 | head -n "{{ contreg_size }}" /root/container-ips.shuffled >"{{ contreg_file }}" 19 | cp /root/container-ips.shuffled "{{ contreg_file }}.full" 20 | tail -n "+{{ contreg_size + 1 }}" "{{ contreg_file }}.full" >/root/container-ips.shuffled 21 | cut -d ' ' -f 2 "{{ contreg_file }}" >"{{ contreg_inv }}" 22 | when: "not ( file_stat.stat.isreg | default(false) | bool )" 23 | - name: "Run the loop for {{ contreg_inv }} iterations with registratoins" 24 | shell: | 25 | ansible -u root -i clients.continuous-reg.ini --private-key id_rsa_key all -m shell -a " 26 | set -x; 27 | sleep \$(( \$RANDOM % 60 )); 28 | rpm --quiet -q katello-host-tools || yum -y install katello-host-tools; 29 | rpm --quiet -q zsh && rpm -e zsh; 30 | curl -X DELETE -k -s -u '{{ sat_user }}:{{ sat_pass }}' https://{{ groups['satellite6']|first }}/api/v2/hosts/\$( hostname ); 31 | subscription-manager status; 32 | for i in \$( seq {{ contreg_iter }} ); do 33 | subscription-manager unregister; 34 | subscription-manager status; 35 | subscription-manager register --activationkey {{ content_activationkey }} --org {{ organization }}; 36 | rc=\$?; 37 | if ! [ \"\$rc\" -eq 0 -o \"\$rc\" -eq 2 ]; then 38 | echo \"ERROR: Registration failed with \$rc\" >&2; 39 | exit 1; 40 | fi; 41 | subscription-manager status | grep 'Overall Status: \(Current\|Invalid\)' || exit 2; 42 | subscription-manager refresh || exit 3; 43 | yum -y install zsh || exit 4; 44 | yum -y remove zsh || exit 5; 45 | done &> {{ contreg_log }}; 46 | tail {{ contreg_log }}" 47 | register: contreg_run 48 | - name: "Show output (more output in {{ contreg_log }} in the container)" 49 | debug: 50 | var: contreg_run.stdout_lines 51 | - name: "Show error output (more output in {{ contreg_log }} in the container)" 52 | debug: 53 | var: contreg_run.stderr_lines 54 | ... 55 | -------------------------------------------------------------------------------- /playbooks/tests/downloadtest.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: false 4 | tasks: 5 | - name: "Start dnf download job" 6 | ansible.builtin.shell: 7 | cmd: | 8 | hammer job-invocation create --async --description-format 'Run %{command} (%{template_name})' --inputs command='dnf download --downloadonly --downloaddir=\"/var/tmp\" {{ package_name_download_test }}' --job-template "{{ job_template_ssh_default }}" --search-query 'name ~ container' & 9 | sleep 30 10 | 11 | - name: "Get last job ID" 12 | ansible.builtin.shell: 13 | cmd: | 14 | set -o pipefail 15 | 16 | hammer --csv job-invocation list | cut -d ',' -f 1 | grep '^[0-9]*$' | sort -n | tail -n 1 17 | register: last_job_id_cmd 18 | 19 | - ansible.builtin.debug: var=last_job_id_cmd 20 | 21 | - name: "Set last job ID" 22 | ansible.builtin.set_fact: 23 | last_job_id: "{{ last_job_id_cmd.stdout_lines[0] | int }}" 24 | 25 | - name: "Distribute private key" 26 | ansible.builtin.copy: 27 | src: files/wait-for-job.py 28 | dest: /root/wait-for-job.py 29 | mode: "u=rwx,g=rx,o=rx" 30 | force: true 31 | 32 | - name: Install simpleJson 33 | ansible.builtin.pip: 34 | name: simpleJson 35 | 36 | - name: "Wait for job {{ last_job_id }} to finish" 37 | ansible.builtin.shell: 38 | cmd: | 39 | /usr/libexec/platform-python /root/wait-for-job.py \ 40 | "{{ sat_user }}" \ 41 | "{{ sat_pass }}" \ 42 | "https://{{ groups['satellite6'] | first }}" \ 43 | "{{ last_job_id }}" \ 44 | "{{ max_age_task }}" 45 | register: wait_for_job_cmd 46 | ignore_errors: true 47 | until: wait_for_job_cmd is not failed 48 | retries: 5 49 | delay: 60 50 | 51 | - name: "Parse script result" 52 | ansible.builtin.set_fact: 53 | pass_count: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[2] | int }}" 54 | total_count: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[4] | int }}" 55 | start_time: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[6] + ' ' + ((wait_for_job_cmd.stdout_lines | last).split())[7] }}" 56 | end_time: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[9] + ' ' + ((wait_for_job_cmd.stdout_lines | last).split())[10] }}" 57 | test_time: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[13] | int }}" 58 | avg_duration: "{{ ((wait_for_job_cmd.stdout_lines|last).split())[15] | int }}" 59 | 60 | - name: "Show what we have parsed" 61 | ansible.builtin.debug: 62 | msg: "RESULT: pass_count: {{ pass_count }}; total_count: {{ total_count }}; start_time: {{ start_time }}; end_time: {{ end_time }}; test_time: {{ test_time }}; avg_duration: {{ avg_duration }}" 63 | 64 | - name: "Give server some time to rest" 65 | ansible.builtin.pause: 66 | seconds: 30 67 | -------------------------------------------------------------------------------- /playbooks/katello/katello_nightly_installation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | - installer_additional_ops: '' 6 | roles: 7 | - ../common/roles/scalelab-nic-cleanup 8 | - ../common/roles/common 9 | - ../common/roles/remove-home-extend-root 10 | - ../common/roles/enlarge-arp-table 11 | - linux-system-roles.timesync 12 | tasks: 13 | - name: disable & enable repos 14 | shell: "{{ item }}" 15 | with_items: 16 | - subscription-manager repos --disable "*" 17 | - subscription-manager repos --enable rhel-7-server-rpms 18 | - subscription-manager repos --enable rhel-7-server-optional-rpms 19 | - subscription-manager repos --enable rhel-7-server-extras-rpms 20 | - yum install -y yum-utils 21 | when: ansible_distribution == "RedHat" 22 | 23 | - name: settup the server for installation 24 | shell: "{{ item }}" 25 | with_items: 26 | - yum -y localinstall https://yum.theforeman.org/releases/nightly/el7/x86_64/foreman-release.rpm 27 | - yum -y localinstall https://fedorapeople.org/groups/katello/releases/yum/nightly/katello/el7/x86_64/katello-repos-latest.rpm 28 | - yum -y localinstall https://yum.puppet.com/puppet6-release-el-7.noarch.rpm 29 | - yum -y localinstall https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 30 | - yum -y install foreman-release-scl 31 | 32 | - name: make a entry of IP & hostname in /etc/hosts file 33 | lineinfile: 34 | path: /etc/hosts 35 | line: '{{ ansible_default_ipv4.address }} {{ ansible_hostname }}' 36 | create: yes 37 | 38 | - name: install the katello nightly 39 | shell: yum -y install katello 40 | 41 | - name: configure the katello nightly 42 | command: foreman-installer --scenario katello --skip-checks-i-know-better --foreman-initial-admin-username {{ sat_user }} --foreman-initial-admin-password {{ sat_pass }} {{ installer_additional_ops }} 43 | 44 | - name: Put SELinux in permissive mode #workaround for https://projects.theforeman.org/issues/29603 45 | selinux: 46 | policy: targeted 47 | state: permissive 48 | 49 | #- name: reboot the server #workaround for https://projects.theforeman.org/issues/29603 50 | # shell: reboot 51 | 52 | - name: change the 'pool' setting in database.yml file #workaround for https://projects.theforeman.org/issues/29370 53 | replace: 54 | path: /etc/foreman/database.yml 55 | regexp: 'pool: 5$' 56 | replace: 'pool: 30' 57 | 58 | - name: create a file with content #workaround for https://projects.theforeman.org/issues/29370 59 | copy: 60 | dest: /usr/share/foreman/config/initializers/foreman-tasks-db-pool-size.rb 61 | content: | 62 | ForemanTasks.dynflow.config.db_pool_size = 30 63 | 64 | - name: restart the foreman service #workaround for https://projects.theforeman.org/issues/29370 65 | shell: foreman-maintain service restart 66 | 67 | 68 | 69 | -------------------------------------------------------------------------------- /scripts/build-lots-of-packages.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # -*- coding: UTF-8 -*- 3 | 4 | # This script is intended to build lots of packages for installation into 5 | # docker containers so they have bigger package manifest. Files in packages 6 | # have minimal size, but metadata-vise should be comparable with normal 7 | # packages. 8 | # 9 | # Requirements: 10 | # 11 | # You need rpmfluff python library installed (Fedora have package in repos) 12 | # or just download rpmfluff.py from https://pagure.io/rpmfluff 13 | # 14 | # Usage: 15 | # 16 | # Run it and then copy it somewhere containers can reach it: 17 | # 18 | # scp $( find . -name /tmp/\*.x86_64.rpm ) root@<...>:/var/www/html/pub/<...> 19 | # 20 | # Then create repo from these and prepare repofile and add womething like 21 | # this into Dockerfile: 22 | # 23 | # RUN curl http://<...>/pub/lots_of_packages/lots_of_packages.repo -o /etc/yum.repos.d/lots_of_packages.repo \ 24 | # && yum -y install foo \* --disablerepo=\* --enablerepo=lots_of_packages \ 25 | # && rm -f /etc/yum.repos.d/lots_of_packages.repo \ 26 | # && rm -rf /var/cache/yum/* 27 | 28 | import rpmfluff 29 | 30 | # Get some stats to decide on numbers: 31 | # rpm -ql $( rpm -qa | sort -R | head -n 100 ) | wc -l 32 | # rpm -q --provides $( rpm -qa | sort -R | head -n 100 ) | wc -l 33 | # rpm -q --requires $( rpm -qa | sort -R | head -n 100 ) | wc -l 34 | # rpm -qa --qf "%{SOURCERPM}\n" | sort -u | wc -l; rpm -qa | wc -l 35 | 36 | NAME = 'foo' 37 | PACKAGES = 1000 38 | SUBPACKAGES = 1 39 | CHANGELOGS = 50 40 | FILES = 100 * SUBPACKAGES # we can not have files in subpackages in rpmfluff, so move the payload into main package 41 | PROVIDES = 10 42 | REQUIRES = 20 43 | VERSION = "0.1" 44 | 45 | for p in range(PACKAGES): 46 | foo = rpmfluff.SimpleRpmBuild("%s%s" % (NAME, p), VERSION, str(CHANGELOGS)) 47 | foo.add_summary('This is summary for %s%s' % (NAME, p)) 48 | foo.add_description('This is descriptive description for %s%s' % (NAME, p)) 49 | for c in range(CHANGELOGS): 50 | foo.add_changelog_entry('This is entry %s for package %s%s' % (c, NAME, p), VERSION, str(c)) 51 | for f in range(FILES): 52 | foo.add_simple_payload_file_random() 53 | for d in range(PROVIDES): 54 | foo.add_provides("%s%s_provided%s" % (NAME, p, d)) 55 | for s in range(SUBPACKAGES): 56 | for d in range(PROVIDES): 57 | foo.add_requires("%s%s_sub%s_required%s" % (NAME, p, s, d)) 58 | for d in range(REQUIRES - (SUBPACKAGES * PROVIDES)): 59 | foo.add_requires("/bin/bash") 60 | for s in range(SUBPACKAGES): 61 | sub = foo.add_subpackage('sub%s' % s) 62 | sub.add_summary('This is summary for %s%s-sub%s' % (NAME, p, s)) 63 | sub.add_description('This is descriptive description for %s%s-sub%s' % (NAME, p, s)) 64 | for d in range(PROVIDES): 65 | sub.add_provides("%s%s_sub%s_required%s" % (NAME, p, s, d)) 66 | sub.add_requires("%s%s_provided%s" % (NAME, p, d)) 67 | foo.make() 68 | -------------------------------------------------------------------------------- /playbooks/katello/roles/add_katello_repos/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - name: 'Remove katello-repos' 3 | yum: 4 | name: katello-repos 5 | state: absent 6 | 7 | - name: 'Katello Koji repository' 8 | yum_repository: 9 | name: katello-koji 10 | description: "Katello {{ katello_repositories_version }} Koji Repository" 11 | baseurl: "http://koji.katello.org/releases/yum/katello-{{ katello_repositories_version }}/katello/el{{ ansible_distribution_major_version }}/x86_64/" 12 | priority: 1 13 | gpgcheck: 0 14 | 15 | - name: 'Katello Client Koji repository' 16 | yum_repository: 17 | name: katello-client-koji 18 | description: "Katello {{ katello_repositories_version }} Client Koji Repository" 19 | baseurl: "http://koji.katello.org/releases/yum/katello-{{ katello_repositories_version }}/client/el{{ ansible_distribution_major_version }}/x86_64/" 20 | priority: 1 21 | gpgcheck: 0 22 | 23 | - name: 'Candlepin Koji repository' 24 | yum_repository: 25 | name: candlepin-koji 26 | description: "Candlepin {{ katello_repositories_version }} Koji Repository" 27 | baseurl: "http://koji.katello.org/releases/yum/katello-{{ katello_repositories_version }}/candlepin/el{{ ansible_distribution_major_version }}/x86_64/" 28 | priority: 1 29 | gpgcheck: 0 30 | 31 | - name: 'Pulp Koji repository' 32 | yum_repository: 33 | name: pulp-koji 34 | description: "Pulp {{ katello_repositories_version }} Koji Repository" 35 | baseurl: "https://repos.fedorapeople.org/repos/pulp/pulp/{{ katello_repositories_pulp_release }}/{{ katello_repositories_pulp_version }}/{{ ansible_distribution_major_version }}Server/x86_64/" 36 | priority: 1 37 | gpgcheck: 0 38 | 39 | - name: 'Katello Client Koji repository' 40 | yum_repository: 41 | name: katello-client-koji 42 | description: "Katello {{ katello_repositories_version }} Client Koji Repository" 43 | baseurl: "http://koji.katello.org/releases/yum/katello-{{ katello_repositories_version }}/client/el{{ ansible_distribution_major_version }}/x86_64/" 44 | priority: 1 45 | gpgcheck: 0 46 | 47 | - name: Foreman Koji repository 48 | yum_repository: 49 | name: foreman-koji 50 | description: "Foreman {{ foreman_repositories_version }} Koji Repository" 51 | baseurl: "{{ foreman_repository_base}}" 52 | priority: 1 53 | gpgcheck: 0 54 | 55 | - name: 'Foreman Plugins Koji repository' 56 | yum_repository: 57 | name: foreman-plugins-koji 58 | description: "Foreman Plugins {{ foreman_repositories_version }} Koji Repository" 59 | baseurl: "{{ foreman_plugin_repository_base }}" 60 | priority: 1 61 | gpgcheck: 0 62 | 63 | - name: 'Puppet 5 Repository' 64 | yum: 65 | name: https://yum.puppetlabs.com/puppet5/puppet5-release-el-{{ ansible_distribution_major_version }}.noarch.rpm 66 | state: present 67 | 68 | - name: Install foreman-release-scl repository 69 | yum: 70 | name: foreman-release-scl 71 | state: latest 72 | -------------------------------------------------------------------------------- /infra/kibana/saved_objects/visualization_7f8d3f60-5e84-11ec-8cd3-5d989c3b9841.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "attributes": { 4 | "description": "", 5 | "kibanaSavedObjectMeta": { 6 | "searchSourceJSON": "{\"query\":{\"query\":\"\",\"language\":\"kuery\"},\"filter\":[],\"indexRefName\":\"kibanaSavedObjectMeta.searchSourceJSON.index\"}" 7 | }, 8 | "title": "Sat AA per result", 9 | "uiStateJSON": "{}", 10 | "version": 1, 11 | "visState": "{\"title\":\"Sat AA per result\",\"type\":\"line\",\"aggs\":[{\"id\":\"4\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"result.keyword\",\"orderBy\":\"_key\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\"},\"schema\":\"group\"},{\"id\":\"3\",\"enabled\":true,\"type\":\"terms\",\"params\":{\"field\":\"method.keyword\",\"orderBy\":\"_key\",\"order\":\"desc\",\"size\":5,\"otherBucket\":false,\"otherBucketLabel\":\"Other\",\"missingBucket\":false,\"missingBucketLabel\":\"Missing\"},\"schema\":\"group\"},{\"id\":\"2\",\"enabled\":true,\"type\":\"date_histogram\",\"params\":{\"field\":\"uploaded\",\"timeRange\":{\"from\":\"now-30d\",\"to\":\"now\"},\"useNormalizedEsInterval\":true,\"scaleMetricValues\":false,\"interval\":\"d\",\"drop_partials\":false,\"min_doc_count\":0,\"extended_bounds\":{}},\"schema\":\"segment\"},{\"id\":\"5\",\"enabled\":true,\"type\":\"count\",\"params\":{},\"schema\":\"metric\"}],\"params\":{\"type\":\"line\",\"grid\":{\"categoryLines\":false},\"categoryAxes\":[{\"id\":\"CategoryAxis-1\",\"type\":\"category\",\"position\":\"bottom\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\"},\"labels\":{\"show\":true,\"filter\":true,\"truncate\":100},\"title\":{}}],\"valueAxes\":[{\"id\":\"ValueAxis-1\",\"name\":\"LeftAxis-1\",\"type\":\"value\",\"position\":\"left\",\"show\":true,\"style\":{},\"scale\":{\"type\":\"linear\",\"mode\":\"normal\"},\"labels\":{\"show\":true,\"rotate\":0,\"filter\":false,\"truncate\":100},\"title\":{\"text\":\"Count\"}}],\"seriesParams\":[{\"show\":true,\"type\":\"line\",\"mode\":\"normal\",\"data\":{\"id\":\"5\",\"label\":\"Count\"},\"valueAxis\":\"ValueAxis-1\",\"drawLinesBetweenPoints\":true,\"lineWidth\":2,\"interpolate\":\"linear\",\"showCircles\":true}],\"addTooltip\":true,\"addLegend\":true,\"legendPosition\":\"right\",\"times\":[],\"addTimeMarker\":false,\"labels\":{},\"thresholdLine\":{\"show\":false,\"value\":10,\"width\":1,\"style\":\"full\",\"color\":\"#E7664C\"},\"row\":true}}" 12 | }, 13 | "id": "7f8d3f60-5e84-11ec-8cd3-5d989c3b9841", 14 | "migrationVersion": { 15 | "visualization": "7.10.0" 16 | }, 17 | "references": [ 18 | { 19 | "id": "e99fc6b0-5e80-11ec-8cd3-5d989c3b9841", 20 | "name": "kibanaSavedObjectMeta.searchSourceJSON.index", 21 | "type": "index-pattern" 22 | } 23 | ], 24 | "type": "visualization", 25 | "updated_at": "2021-12-16T15:26:11.030Z", 26 | "version": "WzYyOCwxXQ==" 27 | } 28 | ] -------------------------------------------------------------------------------- /playbooks/katello/katello_stable_installation_3.14.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | - installer_additional_ops: '' 6 | - run_after_install: '' 7 | roles: 8 | - ../common/roles/scalelab-nic-cleanup 9 | - ../common/roles/common 10 | - ../common/roles/remove-home-extend-root 11 | - ../common/roles/enlarge-arp-table 12 | - linux-system-roles.timesync 13 | tasks: 14 | - name: disable & enable repos 15 | shell: "{{ item }}" 16 | with_items: 17 | - subscription-manager repos --disable "*" 18 | - subscription-manager repos --enable rhel-7-server-rpms 19 | - subscription-manager repos --enable rhel-7-server-optional-rpms 20 | - subscription-manager repos --enable rhel-7-server-extras-rpms 21 | - yum install -y yum-utils 22 | when: ansible_distribution == "RedHat" 23 | 24 | - name: settup the server for installation 25 | shell: "{{ item }}" 26 | with_items: 27 | - yum -y localinstall https://yum.theforeman.org/releases/1.24/el7/x86_64/foreman-release.rpm 28 | - yum -y localinstall https://fedorapeople.org/groups/katello/releases/yum/3.14/katello/el7/x86_64/katello-repos-latest.rpm 29 | - yum -y localinstall https://yum.puppet.com/puppet6-release-el-7.noarch.rpm 30 | - yum -y localinstall https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 31 | - yum -y install foreman-release-scl 32 | 33 | - name: make a entry of IP & hostname in /etc/hosts file 34 | lineinfile: 35 | path: /etc/hosts 36 | line: '{{ ansible_default_ipv4.address }} {{ ansible_hostname }}' 37 | create: yes 38 | 39 | - name: install the katello 3.14 40 | shell: yum -y install katello 41 | 42 | - name: configure the katello 3.14 for REX's 43 | shell: "{{ item }}" 44 | with_items: 45 | - yum install epel-release -y 46 | - yum install ansible -y 47 | 48 | - name: construct installer command 49 | set_fact: 50 | installer_command: "foreman-installer --scenario katello --skip-checks-i-know-better --foreman-initial-admin-username {{ sat_user }} --foreman-initial-admin-password {{ sat_pass }} {{ installer_additional_ops }} --enable-foreman-plugin-ansible --enable-foreman-proxy-plugin-ansible --enable-foreman-plugin-remote-execution --enable-foreman-proxy-plugin-remote-execution-ssh" 51 | 52 | - name: show installer command 53 | debug: 54 | var: installer_command 55 | 56 | - name: configure the katello 3.14 57 | command: "{{ installer_command }}" 58 | 59 | - name: change the 'pool' setting in database.yml file #workaround for https://projects.theforeman.org/issues/29370 60 | replace: 61 | path: /etc/foreman/database.yml 62 | regexp: 'pool: 5$' 63 | replace: 'pool: 30' 64 | 65 | - name: create a file with content #workaround for https://projects.theforeman.org/issues/29370 66 | copy: 67 | dest: /usr/share/foreman/config/initializers/foreman-tasks-db-pool-size.rb 68 | content: | 69 | ForemanTasks.dynflow.config.db_pool_size = 30 70 | 71 | - name: run post-install script if provided 72 | shell: "{{ run_after_install }}" 73 | when: run_after_install != '' 74 | -------------------------------------------------------------------------------- /playbooks/satellite/roles/satellite-populate/tasks/main.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # Upload manifest to the Satellite 3 | - name: "Check if manifest is available" 4 | stat: 5 | path=/root/manifest.zip 6 | register: manifesting 7 | - name: "Deploy manifest file" 8 | copy: 9 | src: "{{ sat_manifest }}" 10 | dest: /root/manifest.zip 11 | force: yes 12 | when: "manifesting.stat.exists is not defined or manifesting.stat.exists == false" 13 | register: copying 14 | - name: "Upload manifest" 15 | command: 16 | hammer --username '{{ sat_user }}' --password '{{ sat_pass }}' subscription upload --organization '{{ organization }}' --file /root/manifest.zip 17 | register: uploading 18 | # rc==70 is the new manifest data are same as those already present. 19 | failed_when: not (uploading.rc == 70 or uploading.rc == 0) 20 | until: (uploading.rc == 70) or (uploading.rc == 0) 21 | retries: 5 22 | delay: 10 23 | when: "manifesting.stat.exists is defined or manifesting.stat.exists == true or copying.changed" 24 | 25 | # TODO: We want to be sure manifest is in the Sat before we start trying 26 | # to enable repos. There have to be some better way, but for now this 27 | # should suffice 28 | - name: "Show manifest is there" 29 | command: 30 | hammer --username '{{ sat_user }}' --password '{{ sat_pass }}' subscription manifest-history --organization '{{ organization }}' 31 | register: manifest_history 32 | - name: "Check that manifest is there" 33 | assert: 34 | that: 35 | - "'SUCCESS' in manifest_history.stdout" 36 | - "'file imported successfully' in manifest_history.stdout" 37 | # Reload manifest as othervise it fails frequently when listing repos 38 | - name: "Refresh manifest" 39 | command: 40 | hammer --username '{{ sat_user }}' --password '{{ sat_pass }}' subscription refresh-manifest --organization '{{ organization }}' 41 | 42 | # Enable repositories 43 | - name: "List repositories we already have (not to attempt to enable what we already have)" 44 | command: 45 | hammer --csv --username '{{ sat_user }}' --password '{{ sat_pass }}' repository list --organization '{{ organization }}' 46 | register: repository_list 47 | - include_tasks: try_enable_repo.yaml 48 | when: 'repository_list.stdout.find("," + item.repo + ",") == -1' 49 | with_items: 50 | "{{ sat_repos }}" 51 | 52 | # Start async of the repos 53 | - name: "Start (asynchronous) sync of repos we have just enabled" 54 | command: 55 | hammer --username '{{ sat_user }}' --password '{{ sat_pass }}' repository synchronize --product '{{ item.product }}' --name '{{ item.repo }}' --organization '{{ organization }}' --async 56 | with_items: 57 | "{{ sat_repos }}" 58 | when: "sat_repos_sync == 'async'" 59 | 60 | # Start sync of the repos 61 | - name: "Start (synchronous) sync of repos we have just enabled" 62 | command: 63 | hammer --username '{{ sat_user }}' --password '{{ sat_pass }}' repository synchronize --product '{{ item.product }}' --name '{{ item.repo }}' --organization '{{ organization }}' 64 | with_items: 65 | "{{ sat_repos }}" 66 | when: "sat_repos_sync == 'sync'" 67 | ... 68 | -------------------------------------------------------------------------------- /playbooks/tests/puppet-single-setup.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | content_puppet_product: SatPerfPuppetProduct 6 | content_puppet_repo: SatPerfPuppetFakeModuleRepo 7 | content_puppet_module_author: satperf 8 | content_puppet_module_name: satperftest 9 | content_puppet_module_file: /tmp/puppet-satperftest.txt 10 | content_puppet_module_file_content: Some important sentence 11 | content_puppet_cv: SatPerfContentViewPuppetFakeModule 12 | content_puppet_module_dir: "/root/{{ content_puppet_module_name }}" 13 | tasks: 14 | - name: "Make sure puppet module build dir is empty" 15 | file: 16 | path: "{{ content_puppet_module_name }}" 17 | state: absent 18 | - name: "Generate puppet module template" 19 | command: 20 | "puppet module generate {{ content_puppet_module_author }}-{{ content_puppet_module_name }} --skip-interview" 21 | - name: "Create puppet module content" 22 | template: 23 | src: files/init.pp 24 | dest: "{{ content_puppet_module_name }}/manifests/init.pp" 25 | - name: "Build our puppet module" 26 | command: 27 | puppet module build "{{ content_puppet_module_name }}" 28 | 29 | - name: "Create Puppet product" 30 | command: 31 | hammer --username "{{ sat_user }}" --password "{{ sat_pass }}" product create --label "{{ content_puppet_product }}" --name "{{ content_puppet_product }}" --organization "{{ organization }}" 32 | register: cmd 33 | failed_when: cmd.rc != 0 and 'Name has already been taken for a product in this organization.' not in cmd.stderr 34 | - name: "Create puppet repository" 35 | command: 36 | hammer --username "{{ sat_user }}" --password "{{ sat_pass }}" repository create --content-type puppet --label "{{ content_puppet_repo }}" --name "{{ content_puppet_repo }}" --organization "{{ organization }}" --product "{{ content_puppet_product }}" 37 | register: cmd 38 | failed_when: cmd.rc != 0 and 'Name has already been taken for this product.' not in cmd.stderr 39 | - name: "Upload our puppet module to the repository" 40 | command: 41 | hammer --username "{{ sat_user }}" --password "{{ sat_pass }}" repository upload-content --name "{{ content_puppet_repo }}" --path "{{ content_puppet_module_name }}/pkg/{{ content_puppet_module_author }}-{{ content_puppet_module_name }}-0.1.0.tar.gz" --product "{{ content_puppet_product }}" --organization "{{ organization }}" 42 | register: upload 43 | until: "{{ upload.rc }} == 0" 44 | retries: 2 45 | delay: 10 46 | 47 | - name: "Create content view" 48 | command: 49 | hammer -u "{{ sat_user }}" -p "{{ sat_pass }}" content-view create --name "{{ content_puppet_cv }}" --organization "{{ organization }}" 50 | - name: "Add Puppet module to the content view" 51 | command: 52 | hammer -u "{{ sat_user }}" -p "{{ sat_pass }}" content-view puppet-module add --organization-id 1 --content-view "{{ content_puppet_cv }}" --name "{{ content_puppet_module_name }}" --author "{{ content_puppet_module_author }}" 53 | 54 | - name: "Publish and promote content view without async" 55 | command: 56 | hammer -u "{{ sat_user }}" -p "{{ sat_pass }}" content-view publish --name "{{ content_puppet_cv }}" --organization "{{ organization }}" 57 | ... 58 | -------------------------------------------------------------------------------- /conf/satperf.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | # satperf project settings 3 | satperf_private_key: conf/id_rsa 4 | client_private_key: conf/id_rsa 5 | 6 | # Variables needed for host configuration 7 | configure_plain_network: True 8 | remove_home_partition: True 9 | 10 | # Variables needed by linux-system-roles.timesync Ansible role 11 | timesync_ntp_servers: 12 | - hostname: "clock.redhat.com" 13 | iburst: yes 14 | prefer: yes 15 | trust: yes 16 | 17 | # Required firewall rules to be applied 18 | satellite_firewall: 19 | - service: "RH-Satellite-6" 20 | state: enabled 21 | capsule_firewall: 22 | - service: "RH-Satellite-6-capsule" 23 | state: enabled 24 | 25 | # Partition an additional disk where all the Satellite/capsule data will be stored 26 | satellite_storage_pools: [] 27 | capsule_storage_pools: [] 28 | # Partition sizes recommended for {{ sat_version }} 29 | # Don't forget to keep the info updated 30 | #satellite_storage_pools: 31 | # - name: satellite_data 32 | # disks: 33 | # - sdb 34 | # volumes: 35 | # - name: opt 36 | # size: 3G 37 | # mount_point: /opt 38 | # - name: postgresql 39 | # size: 20G 40 | # mount_point: /var/opt/rh/rh-postgresql12/lib/pgsql 41 | # - name: pulp 42 | # size: 300G 43 | # mount_point: /var/lib/pulp 44 | # - name: puppetlabs 45 | # size: 500M 46 | # mount_point: /opt/puppetlabs 47 | # - name: qpidd 48 | # size: 1G 49 | # mount_point: /var/lib/qpidd 50 | #capsule_storage_pools: 51 | # - name: capsule_data 52 | # disks: 53 | # - sdb 54 | # volumes: 55 | # - name: opt 56 | # size: 500M 57 | # mount_point: /opt 58 | # - name: postgresql 59 | # size: 10G 60 | # mount_point: /var/opt/rh/rh-postgresql12/lib/pgsql 61 | # - name: pulp 62 | # size: 300G 63 | # mount_point: /var/lib/pulp 64 | 65 | # Containers setup 66 | containers_host_registration_options: {} 67 | containers_host_additional_repos: [] 68 | containers_image: rhel8-ubi-init-big_outdated 69 | 70 | # Satellite setup 71 | sat_version: "6.10" 72 | sat_user: admin 73 | sat_pass: changeme 74 | sat_email: root@localhost 75 | sat_org: Default Organization 76 | sat_orglabel: Default_Organization 77 | sat_orgid: 1 78 | sat_location: Default Location 79 | sat_repo_file: ../../../../../conf/sat610.repo 80 | satellite_registration_options: {} 81 | satellite_additional_repos: [] 82 | 83 | # Capsules setup 84 | capsule_registration_options: {} 85 | capsule_additional_repos: [] 86 | 87 | # VMs provisioning 88 | vms_vg: default 89 | vms_kss_os: http://repos.example.com/RHEL-7/7.4/Server/x86_64/os/ 90 | vms_ssh_key: "{{ lookup('file', '../../conf/id_rsa.pub') }}" 91 | vms_ks: rhel7-vm.ks 92 | vms_static_private_hosts: true 93 | 94 | vms: 95 | kvm1.example.com: 96 | - name: kvm1-vm1 97 | cpu_count: 4 98 | mem_size: 16000 99 | disk: "pool={{ vms_vg }},size=100,cache=none" 100 | nic_1g_bridge: br1 101 | nic_1g_mac: RANDOM 102 | nic_10g_bridge: br0 103 | nic_10g_mac: RANDOM 104 | ks: rhel72-vm.ks 105 | ip: 172.1.1.2 106 | netmask: 255.0.0.0 107 | rootpw: password 108 | extra_kernel_args: "" 109 | ... 110 | -------------------------------------------------------------------------------- /playbooks/katello/katello_stable_installation.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: satellite6 3 | gather_facts: False 4 | vars: 5 | - installer_additional_ops: '' 6 | - run_after_install: '' 7 | roles: 8 | - ../common/roles/scalelab-nic-cleanup 9 | - ../common/roles/common 10 | - ../common/roles/remove-home-extend-root 11 | - ../common/roles/enlarge-arp-table 12 | - linux-system-roles.timesync 13 | tasks: 14 | - name: disable & enable repos 15 | command: "{{ item }}" 16 | with_items: 17 | - subscription-manager repos --disable "*" 18 | - subscription-manager repos --enable rhel-7-server-rpms 19 | - subscription-manager repos --enable rhel-7-server-optional-rpms 20 | - subscription-manager repos --enable rhel-7-server-extras-rpms 21 | - yum install -y yum-utils 22 | when: ansible_distribution == "RedHat" 23 | 24 | - name: settup the server for installation 25 | command: "{{ item }}" 26 | with_items: 27 | - yum -y localinstall https://yum.theforeman.org/releases/2.0/el7/x86_64/foreman-release.rpm 28 | - yum -y localinstall https://fedorapeople.org/groups/katello/releases/yum/3.15/katello/el7/x86_64/katello-repos-latest.rpm 29 | - yum -y localinstall https://yum.puppet.com/puppet6-release-el-7.noarch.rpm 30 | - yum -y localinstall https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm 31 | - yum -y install foreman-release-scl 32 | 33 | - name: make a entry of IP & hostname in /etc/hosts file 34 | lineinfile: 35 | path: /etc/hosts 36 | line: '{{ ansible_default_ipv4.address }} {{ ansible_hostname }}' 37 | create: yes 38 | 39 | - name: query repos we have 40 | command: yum repolist 41 | register: yum_repolist_cmd 42 | - name: show repos we have 43 | debug: 44 | var: yum_repolist_cmd.stdout_lines 45 | 46 | - name: install the katello 3.15 47 | command: yum -y install katello 48 | 49 | - name: configure the katello 3.14 for REX's 50 | shell: "{{ item }}" 51 | with_items: 52 | - yum install epel-release -y 53 | - yum install ansible -y 54 | 55 | - name: construct installer command 56 | set_fact: 57 | installer_command: "foreman-installer --scenario katello --skip-checks-i-know-better --foreman-initial-admin-username {{ sat_user }} --foreman-initial-admin-password {{ sat_pass }} {{ installer_additional_ops }} --enable-foreman-plugin-ansible --enable-foreman-proxy-plugin-ansible --enable-foreman-plugin-remote-execution --enable-foreman-proxy-plugin-remote-execution-ssh" 58 | 59 | - name: show installer command 60 | debug: 61 | var: installer_command 62 | 63 | - name: configure the katello 3.15 64 | command: "{{ installer_command }}" 65 | 66 | - name: change the 'pool' setting in database.yml file #workaround for https://projects.theforeman.org/issues/29370 67 | replace: 68 | path: /etc/foreman/database.yml 69 | regexp: 'pool: 5$' 70 | replace: 'pool: 30' 71 | 72 | - name: create a file with content #workaround for https://projects.theforeman.org/issues/29370 73 | copy: 74 | dest: /usr/share/foreman/config/initializers/foreman-tasks-db-pool-size.rb 75 | content: | 76 | ForemanTasks.dynflow.config.db_pool_size = 30 77 | 78 | - name: run post-install script if provided 79 | shell: "{{ run_after_install }}" 80 | when: run_after_install != '' 81 | 82 | -------------------------------------------------------------------------------- /docs/extra/capsule_vm.xml: -------------------------------------------------------------------------------- 1 | 7 | 8 | 9 | capsule01 10 | 9b9a94fb-391e-4713-b0b1-e30b710f9308 11 | 16000000 12 | 16000000 13 | 8 14 | 15 | hvm 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | Nehalem 24 | 25 | 26 | 27 | 28 | 29 | 30 | destroy 31 | restart 32 | restart 33 | 34 | 35 | 36 | 37 | 38 | /usr/libexec/qemu-kvm 39 | 40 | 41 | 42 | 43 |
44 | 45 | 46 |
47 | 48 | 49 | 50 |
51 | 52 | 53 | 54 |
55 | 56 | 57 | 58 |
59 | 60 | 61 | 62 |
63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | 74 | 75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 |
87 | 88 | 89 | 90 | -------------------------------------------------------------------------------- /scripts/run-puppet-workload-recorder.sh: -------------------------------------------------------------------------------- 1 | #!/bin/sh 2 | 3 | RECORDER_URL="$( echo "$1" | sed 's|/*$||' )" # URL of a recorder server, e.g. http://recorder.example.com:5000/ 4 | LOGS_DIR_P="$2" # Directory with logs from `./run-puppet-workload.sh` 5 | LOGS_DIR_B="$3" # Directory with logs from `./run-bench.sh` 6 | 7 | # FIXME: we are determining this only once, in second dir it might come from different satellite, but eventually we will merge these two I hope 8 | # Determine satellite's hostname 9 | sat_hostname=$( tail -n 1 $LOGS_DIR_P/info-hostname.log ) 10 | # Determine satellite's "satellite" package version 11 | sat_ver=$( grep '^satellite-[0-9]' $LOGS_DIR_P/info-rpm-qa.log ) 12 | 13 | # Find all invocations of PuppetOne and PuppetBunch workload 14 | for grepper in "PuppetOne" "PuppetBunch"; do 15 | for line in $( grep -n "/[0-9]\+-$grepper.log," "$LOGS_DIR_P/measurement.log" | cut -d ':' -f 1 ); do 16 | # Sum of the exit codes of commands before us 17 | score=$( head -n $( expr $line - 1 ) $LOGS_DIR_P/measurement.log | cut -d ',' -f 3 | paste -sd+ | bc ) 18 | # Number of containers involved 19 | number=$( basename $( head -n $line $LOGS_DIR_P/measurement.log | tail -n 1 | cut -d ',' -f 2 ) | cut -d '-' -f 1 ) 20 | 21 | # Data 22 | tmp=$( mktemp ) 23 | ./reg-average.py RegisterPuppet $LOGS_DIR_P/$number-$grepper.log 2>/dev/null | tail -n 1 >$tmp 24 | dataA1=$( cut -d ' ' -f 8 $tmp ) 25 | dataA2=$( cut -d ' ' -f 6 $tmp ) 26 | ./reg-average.py SetupPuppet $LOGS_DIR_P/$number-$grepper.log 2>/dev/null | tail -n 1 >$tmp 27 | dataB1=$( cut -d ' ' -f 8 $tmp ) 28 | dataB2=$( cut -d ' ' -f 6 $tmp ) 29 | ./reg-average.py PickupPuppet $LOGS_DIR_P/$number-$grepper.log 2>/dev/null | tail -n 1 >$tmp 30 | dataC1=$( cut -d ' ' -f 8 $tmp ) 31 | dataC2=$( cut -d ' ' -f 6 $tmp ) 32 | rm -f $tmp 33 | 34 | curl -X PUT "$RECORDER_URL/Sat6ContPerf/1/1/$grepper/$sat_hostname/$sat_ver/$score/$number/${dataA1:--}/${dataA2:--}/${dataB1:--}/${dataB2:--}/${dataC1:--}/${dataC2:--}" 35 | done 36 | done 37 | 38 | function put_average() { 39 | action="$1" 40 | grepper="$2" 41 | sum=0 42 | count=0 43 | for line in $( grep -n "$grepper" $LOGS_DIR_B/measurement.log | cut -d ":" -f 1 ); do 44 | row=$( head -n $line $LOGS_DIR_B/measurement.log | tail -n 1 ) 45 | score=$( head -n $line $LOGS_DIR_P/measurement.log | cut -d ',' -f 3 | paste -sd+ | bc ) 46 | beginning=$( echo "$row" | cut -d ',' -f '4' ) 47 | finish=$( echo "$row" | cut -d ',' -f '5' ) 48 | let sum+=$( expr $finish - $beginning ) 49 | let count+=1 50 | done 51 | average=$( echo "scale=3; $sum / $count" | bc ) 52 | curl -X PUT "$RECORDER_URL/Sat6ContPerf/1/1/$action/$sat_hostname/$sat_ver/$score/$average" 53 | } 54 | 55 | put_average "ManifestUpload" "01-manifest-upload-[0-9]\+.log" 56 | put_average "SyncRHEL7immediate" "12-repo-sync-rhel7.log" 57 | put_average "SyncRHEL6ondemand" "12-repo-sync-rhel6.log" 58 | put_average "SyncRHEL7Optionalondemand" "12-repo-sync-rhel7optional.log" 59 | put_average "PublishBigCV" "21-cv-all-publish.log" 60 | put_average "PromoteBigCV" "23-cv-all-promote-[0-9]\+.log" 61 | put_average "PublishSmallerFilteredCV" "33-cv-filtered-publish.log" 62 | put_average "RegisterBunchOfContainers" "44-register-[0-9]\+.log" # TODO: Also record how many passed/failed 63 | put_average "ReXDateOnAll" "52-rex-date.log" # TODO: Make sure we indicate on how much contaiers this ran 64 | put_average "ReXRHSMUpdateOnAll" "53-rex-sm-facts-update.log" 65 | --------------------------------------------------------------------------------