├── .github └── workflows │ └── ci.yml ├── .gitignore ├── .gitmodules ├── .pack_exclude ├── CODING_STYLE ├── LICENSE ├── MAINTAINERS ├── README.rst ├── cfg_file_definition_convention.rst ├── contributor_reviewer_maintainer_guidelines.rst ├── libguestfs ├── cfg │ └── build.cfg ├── control ├── deps │ └── tarball │ │ ├── Augeas.tgz │ │ ├── file_dir.tgz │ │ ├── fs_mount.tgz │ │ ├── hivex.tgz │ │ ├── inotify.tgz │ │ ├── misc.tgz │ │ └── selinux.tgz └── tests │ ├── cfg │ ├── guestfish_augeas.cfg │ ├── guestfish_block_dev.cfg │ ├── guestfish_file_dir.cfg │ ├── guestfish_fs_attr_ops.cfg │ ├── guestfish_fs_mount.cfg │ ├── guestfish_fs_swap.cfg │ ├── guestfish_lvm.cfg │ ├── guestfish_misc.cfg │ ├── guestfs_add.cfg │ ├── guestfs_block_operations.cfg │ ├── guestfs_file_operations.cfg │ ├── guestfs_inspect_operations.cfg │ ├── guestfs_list_operations.cfg │ ├── guestfs_operated_disk.cfg │ ├── guestfs_part_operations.cfg │ ├── guestfs_volume_operations.cfg │ ├── guestmount.cfg │ ├── virt_cat.cfg │ ├── virt_edit.cfg │ ├── virt_file_operations.cfg │ ├── virt_inspect_operations.cfg │ ├── virt_list_operations.cfg │ ├── virt_part_operations.cfg │ ├── virt_sysprep.cfg │ ├── virt_sysprep_opt.cfg │ ├── virt_volume_operations.cfg │ └── virt_win_reg.cfg │ ├── guestfish_augeas.py │ ├── guestfish_block_dev.py │ ├── guestfish_file_dir.py │ ├── guestfish_fs_attr_ops.py │ ├── guestfish_fs_mount.py │ ├── guestfish_fs_swap.py │ ├── guestfish_lvm.py │ ├── guestfish_misc.py │ ├── guestfs_add.py │ ├── guestfs_block_operations.py │ ├── guestfs_file_operations.py │ ├── guestfs_inspect_operations.py │ ├── guestfs_list_operations.py │ ├── guestfs_operated_disk.py │ ├── guestfs_part_operations.py │ ├── guestfs_volume_operations.py │ ├── guestmount.py │ ├── virt_cat.py │ ├── virt_edit.py │ ├── virt_file_operations.py │ ├── virt_inspect_operations.py │ ├── virt_list_operations.py │ ├── virt_part_operations.py │ ├── virt_sysprep.py │ ├── virt_sysprep_opt.py │ ├── virt_volume_operations.py │ └── virt_win_reg.py ├── libvirt ├── cfg │ ├── build.cfg │ ├── default_tests │ └── tests-example.cfg ├── control └── tests │ ├── cfg │ ├── backingchain │ │ ├── blockcommand.cfg │ │ ├── blockcommit │ │ │ ├── blockcommit_all_block_chain.cfg │ │ │ ├── blockcommit_base_image_exist_backingfile.cfg │ │ │ ├── blockcommit_conventional_chain.cfg │ │ │ ├── blockcommit_relative_path.cfg │ │ │ ├── blockcommit_with_async_option.cfg │ │ │ ├── blockcommit_with_bandwith.cfg │ │ │ ├── blockcommit_with_delete_option.cfg │ │ │ ├── blockcommit_with_keep_overlay.cfg │ │ │ ├── blockcommit_with_shallow_option.cfg │ │ │ └── check_allocation_watermark_during_blockcommit.cfg │ │ ├── blockcommit_basic_function.cfg │ │ ├── blockcopy.cfg │ │ ├── blockcopy │ │ │ ├── blockcopy_with_async_option.cfg │ │ │ ├── blockcopy_with_backingfile.cfg │ │ │ ├── blockcopy_with_bandwidth.cfg │ │ │ ├── blockcopy_with_blockdev_option.cfg │ │ │ ├── blockcopy_with_conventional_chain.cfg │ │ │ ├── blockcopy_with_different_dest_xml.cfg │ │ │ ├── blockcopy_with_different_qcow2_properties.cfg │ │ │ ├── blockcopy_with_disk_driver_attributes.cfg │ │ │ ├── blockcopy_with_granularity_buf_size_option.cfg │ │ │ ├── blockcopy_with_syncwrites_option.cfg │ │ │ ├── blockcopy_with_xml_option_to_luks_slice_image.cfg │ │ │ └── blockcopy_with_zero_length_disk.cfg │ │ ├── blockcopy_options.cfg │ │ ├── blockjob │ │ │ ├── blockjob_pivot_after_irregular_operations.cfg │ │ │ ├── blockjob_with_async_option.cfg │ │ │ ├── blockjob_with_bandwidth_option.cfg │ │ │ └── blockjob_with_raw_option.cfg │ │ ├── blockjob_options.cfg │ │ ├── blockpull │ │ │ ├── blockpull_base_image_exist_backingfile.cfg │ │ │ ├── blockpull_conventional_chain.cfg │ │ │ ├── blockpull_relative_path.cfg │ │ │ ├── blockpull_with_async.cfg │ │ │ └── blockpull_with_bandwidth.cfg │ │ ├── blockresize.cfg │ │ ├── domblkthreshold.cfg │ │ ├── event_checking_test │ │ │ └── commit_pull_copy_event.cfg │ │ ├── hotplug_test │ │ │ └── hot_un_plug.cfg │ │ ├── lifecycle │ │ │ └── block_operation_after_lifecycle.cfg │ │ ├── lifecycle_test │ │ │ └── check_mirror_with_restart_libvirtd.cfg │ │ ├── negative_scenario │ │ │ ├── blockcommit_invalid_top_base.cfg │ │ │ ├── blockcopy_with_invalid_destination.cfg │ │ │ ├── blockjob_with_invalid_operation.cfg │ │ │ ├── blockpull_with_invalid_base.cfg │ │ │ ├── do_concurrent_operation.cfg │ │ │ └── interrupt_blockcopy.cfg │ │ ├── repeatability_test │ │ │ └── commit_pull_copy_after_snap.cfg │ │ ├── virsh_domblk │ │ │ └── domblkthreshold_with_backingchain_element.cfg │ │ ├── virtual_disks_blockcopy_options.cfg │ │ ├── virtual_disks_relative_blockcommit.cfg │ │ ├── virtual_disks_snapshot_blockpull.cfg │ │ └── with_disk_attributes_test │ │ │ ├── commit_pull_with_disk_driver_attributes.cfg │ │ │ └── commit_pull_with_disk_source_attributes.cfg │ ├── bios │ │ ├── boot_integration.cfg │ │ ├── boot_order_ovmf.cfg │ │ ├── boot_order_seabios.cfg │ │ ├── virsh_boot.cfg │ │ ├── virsh_boot_reset_nvram.cfg │ │ ├── virsh_boot_sysinfo.cfg │ │ ├── virsh_boot_tseg.cfg │ │ └── vm_boot_nvram_source.cfg │ ├── channel │ │ └── channel_functional.cfg │ ├── chardev │ │ ├── connection_and_operation │ │ │ ├── data_transfer.cfg │ │ │ └── file_append_option.cfg │ │ ├── log │ │ │ └── chardev_with_log_file.cfg │ │ └── virsh_console_operation │ │ │ ├── prevent_multiple_console.cfg │ │ │ └── release_console.cfg │ ├── controller │ │ ├── controller_functional.cfg │ │ ├── multiple_controller_ppc.cfg │ │ ├── pci_controller_memreserve.cfg │ │ ├── pcibridge.cfg │ │ ├── pcie_controller_hotplug_option.cfg │ │ └── pcie_root_port_controller.cfg │ ├── cpu │ │ ├── aarch64_cpu_sve.cfg │ │ ├── aarch64_gic_version.cfg │ │ ├── diagnose_data.cfg │ │ ├── diagnose_spinlock_yield_forward.cfg │ │ ├── guestpin.cfg │ │ ├── iothread.cfg │ │ ├── lifecycle_time.cfg │ │ ├── max_vcpus.cfg │ │ ├── multi_vms_with_stress.cfg │ │ ├── powerpc_hmi.cfg │ │ ├── ppc_cpu_mode.cfg │ │ ├── ppccpucompat.cfg │ │ ├── setvcpu.cfg │ │ ├── topology.cfg │ │ ├── vcpu_affinity.cfg │ │ ├── vcpu_cache.cfg │ │ ├── vcpu_cve.cfg │ │ ├── vcpu_feature.cfg │ │ ├── vcpu_hotpluggable.cfg │ │ ├── vcpu_max_topology.cfg │ │ ├── vcpu_metrics.cfg │ │ ├── vcpu_misc.cfg │ │ ├── vcpu_nested.cfg │ │ ├── vcpu_sched_core.cfg │ │ ├── vcpupin.cfg │ │ ├── virsh_cpu_compare_xml.cfg │ │ └── vm_features.cfg │ ├── daemon │ │ ├── check_daemon_after_remove_pkgs.cfg │ │ ├── check_daemon_default_status.cfg │ │ ├── check_socket_files_with_services.cfg │ │ ├── check_users.cfg │ │ ├── check_virsh_connection_switch_between_monolithic_and_modular.cfg │ │ ├── conf_file │ │ │ ├── libvirtd_conf │ │ │ │ ├── check_negative_parameter.cfg │ │ │ │ ├── host_uuid.cfg │ │ │ │ ├── prefix_num.cfg │ │ │ │ ├── processing_controls.cfg │ │ │ │ ├── set_audit_logging.cfg │ │ │ │ ├── unix_sock.cfg │ │ │ │ └── virt_admin_logging.cfg │ │ │ ├── qemu_conf │ │ │ │ ├── auto_dump.cfg │ │ │ │ ├── clear_emulator_capabilities.cfg │ │ │ │ ├── seccomp_sandbox.cfg │ │ │ │ ├── set_process_name.cfg │ │ │ │ └── set_virtlogd.cfg │ │ │ ├── sysconfig_libvirt_guests │ │ │ │ └── libvirt_guests.cfg │ │ │ ├── sysconfig_libvirtd │ │ │ │ └── libvirtd_config.cfg │ │ │ └── systemd_config │ │ │ │ └── systemd_config.cfg │ │ ├── crash_regression.cfg │ │ ├── daemon_functional.cfg │ │ ├── enable_disable_daemon_socket_services.cfg │ │ ├── init_scripts.cfg │ │ ├── kill_qemu.cfg │ │ ├── kill_started.cfg │ │ ├── kill_starting.cfg │ │ ├── libvirt_guests_service.cfg │ │ ├── libvirtd │ │ │ ├── libvirtd.cfg │ │ │ └── libvirtd_multi_conn.cfg │ │ ├── qemu_monitor_socket_creation_delay.cfg │ │ └── restart_consist.cfg │ ├── domain_life_cycle │ │ └── create_destroy_domain.cfg │ ├── embedded_qemu │ │ └── embedded_qemu.cfg │ ├── event │ │ └── virsh_event.cfg │ ├── features │ │ └── ras.cfg │ ├── gpu │ │ ├── hotplug_gpu.cfg │ │ ├── lifecycle_vm_with_gpu.cfg │ │ ├── start_vm_with_gpu.cfg │ │ ├── start_vm_with_gpu_managed_disabled.cfg │ │ ├── vm_cuda_sanity.cfg │ │ ├── vm_mig_sanity.cfg │ │ └── vm_nvidia_gpu_driver.cfg │ ├── graphics │ │ └── graphics_functional.cfg │ ├── guest_agent │ │ ├── agent_lifecycle.cfg │ │ ├── domhostname.cfg │ │ ├── guest_agent.cfg │ │ └── unix_source_path │ │ │ └── agent_auto_generated_unix_source_path_session_mode.cfg │ ├── guest_kernel_debugging │ │ ├── nmi_test.cfg │ │ └── virsh_dump.cfg │ ├── guest_os_booting │ │ ├── boot_menu │ │ │ └── bootmenu.cfg │ │ ├── boot_order │ │ │ ├── boot_from_cdrom_device.cfg │ │ │ ├── boot_from_disk_device.cfg │ │ │ ├── boot_from_usb_device.cfg │ │ │ ├── boot_from_virtiofs_device.cfg │ │ │ ├── boot_order_setting_negative.cfg │ │ │ ├── boot_with_multiple_boot_dev.cfg │ │ │ ├── boot_with_multiple_boot_order.cfg │ │ │ └── hotplug_device_with_boot_order.cfg │ │ ├── direct_kernel_boot │ │ │ └── direct_kernel_boot.cfg │ │ ├── firmware_configuration │ │ │ ├── os_acpi.cfg │ │ │ ├── smbios_mode.cfg │ │ │ └── sysinfo_fwcfg.cfg │ │ ├── lifecycle │ │ │ ├── lifecycle_boot.cfg │ │ │ └── lifecycle_reset_nvram.cfg │ │ ├── migration │ │ │ └── migration_boot.cfg │ │ ├── negative │ │ │ ├── boot_without_bootable_devices.cfg │ │ │ └── update_device_boot_order.cfg │ │ ├── os_configuration │ │ │ └── without_default_os_attributes.cfg │ │ ├── ovmf_firmware │ │ │ ├── ovmf_backed_nvram.cfg │ │ │ ├── ovmf_firmware_feature.cfg │ │ │ ├── ovmf_loader.cfg │ │ │ ├── ovmf_nvram.cfg │ │ │ ├── ovmf_seclabel_in_nvram.cfg │ │ │ └── ovmf_smm.cfg │ │ ├── seabios_firmware │ │ │ ├── seabios_loader.cfg │ │ │ └── seabios_rebootimeout.cfg │ │ └── useserial │ │ │ └── useserial.cfg │ ├── guest_resource_control │ │ ├── control_cgroup.cfg │ │ └── virsh_blkiotune.cfg │ ├── host_hypervisor │ │ ├── capabilities_output.cfg │ │ └── domcapabilities_output.cfg │ ├── hotplug_serial.cfg │ ├── in_place_upgrade_guest.cfg │ ├── incremental_backup │ │ ├── incremental_backup_backing_chain.cfg │ │ ├── incremental_backup_checkpoint_cmd.cfg │ │ ├── incremental_backup_event_monitor.cfg │ │ ├── incremental_backup_migration.cfg │ │ ├── incremental_backup_multidisk.cfg │ │ ├── incremental_backup_pull_mode.cfg │ │ └── incremental_backup_push_mode.cfg │ ├── iommu │ │ └── iommu_device.cfg │ ├── kernel_panic.cfg │ ├── lease_device.cfg │ ├── libvirt_bench │ │ ├── libvirt_bench_domstate_switch_by_groups.cfg │ │ ├── libvirt_bench_domstate_switch_in_loop.cfg │ │ ├── libvirt_bench_domstate_switch_with_iozone.cfg │ │ ├── libvirt_bench_domstate_switch_with_unixbench.cfg │ │ ├── libvirt_bench_dump_with_netperf.cfg │ │ ├── libvirt_bench_dump_with_unixbench.cfg │ │ ├── libvirt_bench_serial_hotplug.cfg │ │ ├── libvirt_bench_ttcp_from_guest_to_host.cfg │ │ ├── libvirt_bench_usb_hotplug.cfg │ │ └── libvirt_bench_vcpu_hotplug.cfg │ ├── libvirt_cputune.cfg │ ├── libvirt_hooks.cfg │ ├── libvirt_hugepage.cfg │ ├── libvirt_mem.cfg │ ├── libvirt_network_bandwidth.cfg │ ├── libvirt_package.cfg │ ├── libvirt_qemu_cmdline.cfg │ ├── libvirt_scsi.cfg │ ├── libvirt_scsi_hostdev.cfg │ ├── libvirt_vcpu_plug_unplug.cfg │ ├── libvirtd_start.cfg │ ├── limit.cfg │ ├── lxc.xml │ ├── lxc │ │ └── lxc_life_cycle.cfg │ ├── macvtap.cfg │ ├── memory │ │ ├── allocpages.cfg │ │ ├── gmap.cfg │ │ ├── hmat.cfg │ │ ├── memballoon.cfg │ │ ├── memory_allocation │ │ │ ├── define_value_unit.cfg │ │ │ ├── lifecycle_with_memory_allocation.cfg │ │ │ ├── memory_allocation_config_modification.cfg │ │ │ └── memory_invalid_value.cfg │ │ ├── memory_attach_device.cfg │ │ ├── memory_backing │ │ │ ├── discard_memory_content_setting.cfg │ │ │ ├── hugepage_mount_path.cfg │ │ │ ├── hugepage_nodeset.cfg │ │ │ ├── lifecycle_for_hugepage.cfg │ │ │ ├── memory_access_mode.cfg │ │ │ ├── memory_source_and_allocation.cfg │ │ │ └── page_locking_and_shared_pages.cfg │ │ ├── memory_balloon │ │ │ ├── guest_without_mem_balloon.cfg │ │ │ ├── mem_balloon_autodeflate.cfg │ │ │ └── period_config_of_memory_balloon.cfg │ │ ├── memory_devices │ │ │ ├── change_virtio_mem_request_size.cfg │ │ │ ├── dimm_memory_hot_unplug.cfg │ │ │ ├── dimm_memory_hotplug.cfg │ │ │ ├── dimm_memory_lifecycle.cfg │ │ │ ├── dimm_memory_with_access_and_discard.cfg │ │ │ ├── dimm_memory_with_auto_placement_numatune.cfg │ │ │ ├── dimm_memory_with_memory_alloccation_and_numa.cfg │ │ │ ├── invalid_dimm_memory_device_config.cfg │ │ │ ├── invalid_nvdimm_memory_device_config.cfg │ │ │ ├── invalid_virtio_mem_config.cfg │ │ │ ├── lifecycle_for_file_backed_nvdimm_memory.cfg │ │ │ ├── lifecycle_of_mixed_memory_devices.cfg │ │ │ ├── nvdimm_memory_turn_to_dram.cfg │ │ │ ├── virtio_mem_access_and_discard.cfg │ │ │ ├── virtio_mem_auto_placement.cfg │ │ │ ├── virtio_mem_coldplug_and_unplug.cfg │ │ │ ├── virtio_mem_device_lifecycle.cfg │ │ │ ├── virtio_mem_dynamic_slots.cfg │ │ │ ├── virtio_mem_hot_unplug.cfg │ │ │ ├── virtio_mem_hotplug.cfg │ │ │ ├── virtio_mem_with_memory_allocation_and_numa.cfg │ │ │ ├── virtio_mem_with_memory_backing_type.cfg │ │ │ └── virtio_memory_with_numa_node_tuning.cfg │ │ ├── memory_discard.cfg │ │ ├── memory_hotplug.cfg │ │ ├── memory_misc.cfg │ │ ├── memory_tuning │ │ │ └── memory_tuning_settings.cfg │ │ ├── memory_update_device.cfg │ │ ├── nvdimm.cfg │ │ ├── secure_dump.cfg │ │ └── virsh_setmem.cfg │ ├── migration │ │ ├── abort_precopy_migration │ │ │ └── abort_by_domjobabort_on_target.cfg │ │ ├── async_job │ │ │ ├── abort_migration_with_wrong_api_flag.cfg │ │ │ └── migration_domjobinfo.cfg │ │ ├── async_ops │ │ │ ├── destroy_vm_during_finishphase.cfg │ │ │ ├── destroy_vm_during_performphase.cfg │ │ │ ├── migrate_vm_again_during_migration.cfg │ │ │ ├── pause_resume_vm_during_migration.cfg │ │ │ └── query_info_during_migration.cfg │ │ ├── destructive_operations_around_live_migration │ │ │ ├── kill_qemu_during_finishphase.cfg │ │ │ ├── kill_qemu_during_performphase.cfg │ │ │ ├── kill_virtiofsd_during_performphase.cfg │ │ │ └── migration_kill_libvirt_daemon.cfg │ │ ├── guest_lifecycle_operations_during_migration │ │ │ └── migration_poweroff_vm.cfg │ │ ├── live_migration.cfg │ │ ├── memory_copy_mode │ │ │ ├── postcopy.cfg │ │ │ └── precopy.cfg │ │ ├── migrate_bandwidth.cfg │ │ ├── migrate_ceph.cfg │ │ ├── migrate_event.cfg │ │ ├── migrate_gluster.cfg │ │ ├── migrate_graphics.cfg │ │ ├── migrate_mem.cfg │ │ ├── migrate_network.cfg │ │ ├── migrate_options_shared.cfg │ │ ├── migrate_over_unix.cfg │ │ ├── migrate_service_control.cfg │ │ ├── migrate_storage.cfg │ │ ├── migrate_vm.cfg │ │ ├── migrate_vm_in_various_status │ │ │ └── migrate_paused_vm.cfg │ │ ├── migrate_with_legacy_guest.cfg │ │ ├── migrate_with_panic_device.cfg │ │ ├── migrate_with_various_hostname.cfg │ │ ├── migrate_with_virtual_devices.cfg │ │ ├── migration_cmd │ │ │ ├── setmaxdowntime_and_getmaxdowntime.cfg │ │ │ └── setspeed_and_getspeed.cfg │ │ ├── migration_misc │ │ │ ├── canonical_paths_in_shared_filesystems.cfg │ │ │ ├── migration_with_cpu_mode.cfg │ │ │ ├── migration_with_host_hostname.cfg │ │ │ ├── migration_with_host_uuid.cfg │ │ │ ├── migration_with_special_cpu.cfg │ │ │ ├── non_canonical_paths_in_shared_filesystems.cfg │ │ │ └── qemu_err_incoming_migration.cfg │ │ ├── migration_performance_tuning │ │ │ ├── migration_maxdowntime.cfg │ │ │ ├── migration_memory_compression.cfg │ │ │ ├── migration_parallel_connections.cfg │ │ │ ├── migration_precopy_bandwidth.cfg │ │ │ ├── migration_timeout_action.cfg │ │ │ ├── migration_vm_cpu_auto_converge.cfg │ │ │ ├── migration_zerocopy.cfg │ │ │ ├── migration_zerocopy_abort.cfg │ │ │ ├── migration_zerocopy_unsupported_feature_combinations.cfg │ │ │ └── parallel_migration_set_connection_num_without_enabing_parallel_capability.cfg │ │ ├── migration_resource_limit │ │ │ ├── migration_postcopy_bandwidth.cfg │ │ │ └── migration_precopy_and_postcopy_bandwidth.cfg │ │ ├── migration_uri │ │ │ ├── migration_desturi.cfg │ │ │ ├── migration_network_data_transport_tcp.cfg │ │ │ ├── migration_network_data_transport_tcp_listen_address.cfg │ │ │ ├── migration_network_data_transport_tcp_migrateuri.cfg │ │ │ ├── migration_network_data_transport_tcp_migration_address.cfg │ │ │ ├── migration_network_data_transport_tcp_migration_port_reuse.cfg │ │ │ ├── migration_network_data_transport_tls.cfg │ │ │ ├── migration_network_data_transport_tls_migrate_tls_force.cfg │ │ │ ├── migration_network_data_transport_tls_wrong_cert_configuration.cfg │ │ │ ├── migration_network_data_transport_tunnelled.cfg │ │ │ ├── migration_network_data_transport_unix_proxy.cfg │ │ │ ├── tcp_migration_port_allocation_port_occupied_by_other_app.cfg │ │ │ ├── tls_migrate_tls_x509_verify_on_src.cfg │ │ │ └── tls_migrate_tls_x509_verify_on_target.cfg │ │ ├── migration_with_numa_topology.cfg │ │ ├── migration_with_virtiofs │ │ │ ├── migration_with_externally_launched_virtiofs_dev.cfg │ │ │ └── migration_with_internally_launched_virtiofs_dev.cfg │ │ ├── migration_with_vtpm │ │ │ ├── migration_with_external_tpm.cfg │ │ │ ├── migration_with_shared_tpm.cfg │ │ │ ├── migration_with_vtpm_dev.cfg │ │ │ └── migration_with_vtpm_state_on_block_dev.cfg │ │ ├── migration_xml_update.cfg │ │ ├── non_live_migration │ │ │ └── non_live_migration.cfg │ │ ├── offline_migration │ │ │ └── offline_migration.cfg │ │ ├── p2p_keepalive │ │ │ ├── customized_keepalive.cfg │ │ │ ├── default_keepalive.cfg │ │ │ ├── src_and_dest_keepalive_disabled.cfg │ │ │ └── src_keepalive_disabled.cfg │ │ ├── pause_postcopy_migration_and_recover │ │ │ ├── no_paused_during_recover_migration.cfg │ │ │ ├── pause_and_disruptive_and_recover.cfg │ │ │ ├── pause_and_io_error_and_recover.cfg │ │ │ ├── pause_and_recover_and_disruptive.cfg │ │ │ ├── pause_and_recover_twice.cfg │ │ │ ├── pause_by_domjobabort_and_recover.cfg │ │ │ ├── pause_by_network_and_recover.cfg │ │ │ ├── pause_by_proxy_and_recover.cfg │ │ │ ├── unattended_migration.cfg │ │ │ └── unattended_migration_and_disruptive.cfg │ │ └── vm_configuration_and_status │ │ │ ├── migration_suspend_target_vm.cfg │ │ │ ├── persist_target_vm_and_undefine_src_vm.cfg │ │ │ └── persist_target_vm_and_undefine_src_vm_and_abort.cfg │ ├── migration_with_copy_storage │ │ ├── async_job │ │ │ ├── abort_job.cfg │ │ │ └── query_domain_job_info.cfg │ │ ├── autoskipped_disks.cfg │ │ ├── disk_io_load_in_vm.cfg │ │ ├── disk_type_coverage │ │ │ └── block_disk.cfg │ │ ├── migrate_disks.cfg │ │ ├── migrate_vm_disk_on_shared_storage.cfg │ │ ├── migration_bandwidth_limit.cfg │ │ ├── migration_retain_sparsity.cfg │ │ ├── migration_vm_has_no_disk.cfg │ │ ├── migration_with_backingchain.cfg │ │ ├── migration_with_vhostvdpa.cfg │ │ ├── network_data_transport │ │ │ ├── tcp_disks_uri.cfg │ │ │ ├── tcp_listen_address.cfg │ │ │ ├── tcp_migrateuri_and_disks_port.cfg │ │ │ ├── tcp_port_reuse.cfg │ │ │ ├── tls.cfg │ │ │ ├── tls_destination.cfg │ │ │ ├── tls_wrong_cert_configurations.cfg │ │ │ └── unix_proxy.cfg │ │ ├── performance_tuning │ │ │ └── copy_storage_synchronous_writes.cfg │ │ ├── precreate_none_target_disk.cfg │ │ ├── target_image_larger_than_source.cfg │ │ └── target_image_smaller_than_source.cfg │ ├── multifunction.cfg │ ├── multiqueue.cfg │ ├── multivm_loadstress.cfg │ ├── multivm_stress │ │ ├── multivm_cpustress.cfg │ │ └── multivm_iostress.cfg │ ├── npiv │ │ ├── npiv_concurrent.cfg │ │ ├── npiv_hostdev_passthrough.cfg │ │ ├── npiv_image_from_pool.cfg │ │ ├── npiv_nodedev_create_destroy.cfg │ │ ├── npiv_pool_regression.cfg │ │ ├── npiv_pool_vol.cfg │ │ ├── npiv_restart_libvirtd.cfg │ │ ├── npiv_snapshot.cfg │ │ └── npiv_virtual_disk.cfg │ ├── numa │ │ ├── guest_numa.cfg │ │ ├── guest_numa_hmat │ │ │ └── hmat_info.cfg │ │ ├── guest_numa_node_tuning │ │ │ ├── auto_memory_incompatible_guest_binding.cfg │ │ │ ├── auto_memory_nodeset_placement.cfg │ │ │ ├── auto_memory_placement_numad_fail.cfg │ │ │ ├── change_numa_tuning.cfg │ │ │ ├── host_guest_mixed_memory_binding.cfg │ │ │ ├── invalid_nodeset_of_numa_memory_binding.cfg │ │ │ ├── memory_binding_setting.cfg │ │ │ ├── memory_binding_with_emulator_thread.cfg │ │ │ ├── numa_mem_binding_with_offline_cpu.cfg │ │ │ └── specific_numa_memory_bind_hugepage.cfg │ │ ├── guest_numa_topology │ │ │ ├── change_vcpu_pin.cfg │ │ │ ├── numa_topology_with_cpu_topology.cfg │ │ │ ├── numa_topology_with_hugepage.cfg │ │ │ ├── numa_topology_with_numa_distance.cfg │ │ │ └── various_numa_topology_settings.cfg │ │ ├── host_numa │ │ │ ├── host_numa_info.cfg │ │ │ └── host_numa_ksm_parameters.cfg │ │ ├── numa_capabilities.cfg │ │ ├── numa_config_with_auto_placement.cfg │ │ ├── numa_memAccess.cfg │ │ ├── numa_memory.cfg │ │ ├── numa_memory_migrate.cfg │ │ ├── numa_memory_spread.cfg │ │ ├── numa_node_memory_bind.cfg │ │ ├── numa_node_tuning │ │ │ └── auto_mem_placement_with_incompatible_host_nodeset.cfg │ │ ├── numa_nodeset.cfg │ │ ├── numa_numad.cfg │ │ ├── numa_numanode_cpu_info.cfg │ │ ├── numa_numatune_cpu.cfg │ │ ├── numa_preferred_undefine.cfg │ │ └── numad_vcpupin.cfg │ ├── nwfilter │ │ ├── filter_aready_present_binding.cfg │ │ ├── nwfilter_binding_create.cfg │ │ ├── nwfilter_binding_delete.cfg │ │ ├── nwfilter_binding_dumpxml.cfg │ │ ├── nwfilter_binding_list.cfg │ │ ├── nwfilter_daemon_restart.cfg │ │ ├── nwfilter_edit_uuid.cfg │ │ ├── nwfilter_update_lock.cfg │ │ ├── nwfilter_update_vm_running.cfg │ │ ├── nwfilter_vm_attach.cfg │ │ ├── nwfilter_vm_start.cfg │ │ └── vm_destroy_with_nwfilter.cfg │ ├── papr_hpt.cfg │ ├── passthrough │ │ ├── ap │ │ │ ├── libvirt_ap_passthrough.cfg │ │ │ ├── libvirt_ap_passthrough_autostart.cfg │ │ │ └── libvirt_ap_passthrough_hotplug.cfg │ │ ├── ccw │ │ │ ├── libvirt_ccw_passthrough.cfg │ │ │ └── libvirt_ccw_passthrough_read_write.cfg │ │ ├── pci │ │ │ ├── ism_pci_passthrough.cfg │ │ │ ├── libvirt_pci_passthrough.cfg │ │ │ ├── libvirt_pci_passthrough_hotplug.cfg │ │ │ └── vfio.cfg │ │ └── robustness │ │ │ └── passthrough_robustness.cfg │ ├── perf_kvm.cfg │ ├── ppc_device.cfg │ ├── remote_access │ │ ├── remote_tls_multiple_certs.cfg │ │ ├── remote_tls_priority.cfg │ │ ├── remote_with_ssh.cfg │ │ ├── remote_with_tcp.cfg │ │ ├── remote_with_tls.cfg │ │ ├── remote_with_unix.cfg │ │ └── virsh_non_root_polkit.cfg │ ├── remove_guest.cfg │ ├── resource_abnormal.cfg │ ├── save_and_restore │ │ ├── abort_managedsave.cfg │ │ ├── abort_save.cfg │ │ ├── managedsave_remove.cfg │ │ ├── restore_from_local_file.cfg │ │ ├── restore_from_nfs_file.cfg │ │ ├── restore_from_unqualified_file.cfg │ │ ├── save_image_define.cfg │ │ ├── save_image_dumpxml.cfg │ │ ├── save_to_block.cfg │ │ ├── save_to_nfs.cfg │ │ ├── save_with_formats.cfg │ │ └── save_with_options.cfg │ ├── scalability │ │ └── define_create_vm_with_more_vcpus.cfg │ ├── scsi │ │ ├── scsi_command_test_hostdev.cfg │ │ ├── scsi_controller_driver_plug_unplug.cfg │ │ ├── scsi_device.cfg │ │ └── scsi_disk_attributes.cfg │ ├── secure_execution │ │ └── confirm_environment.cfg │ ├── security │ │ ├── rng │ │ │ ├── libvirt_rng.cfg │ │ │ └── virtio_rng.cfg │ │ └── virt_what_cvm.cfg │ ├── serial │ │ ├── serial_functional.cfg │ │ └── serial_pty_log.cfg │ ├── smt.cfg │ ├── snapshot │ │ ├── check_delete_and_revert_snap_event_in_multiple_branch.cfg │ │ ├── delete_disk_and_memory_snapshot.cfg │ │ ├── delete_disk_only_snapshot.cfg │ │ ├── delete_external_after_reverting_internal.cfg │ │ ├── delete_external_snap_with_references.cfg │ │ ├── delete_snapshot_after_disk_attached.cfg │ │ ├── delete_snapshot_metadata.cfg │ │ ├── memory_snapshot_delete.cfg │ │ ├── revert_disk_external_snap.cfg │ │ ├── revert_memory_only_snap.cfg │ │ ├── revert_snap_based_on_state.cfg │ │ ├── revert_snap_for_guest_with_genid.cfg │ │ ├── revert_snap_with_flags.cfg │ │ ├── revert_snapshot_after_xml_updated.cfg │ │ └── snapshot_list_with_options.cfg │ ├── sriov │ │ ├── capabilities │ │ │ └── sriov_capabilities_iommu_support.cfg │ │ ├── failover │ │ │ ├── sriov_failover_abort_migration.cfg │ │ │ ├── sriov_failover_at_dt_hostdev.cfg │ │ │ ├── sriov_failover_lifecycle.cfg │ │ │ └── sriov_failover_migration.cfg │ │ ├── locked_memory_check │ │ │ └── sriov_with_hotplug_memory.cfg │ │ ├── network │ │ │ ├── sriov_check_net_info_by_network_lifecycle_cmds.cfg │ │ │ ├── sriov_check_network_connections.cfg │ │ │ └── sriov_define_or_start_network_with_pf_addr.cfg │ │ ├── nodedev │ │ │ ├── sriov_nodedev_non_driver.cfg │ │ │ ├── sriov_nodedev_reattach_detach.cfg │ │ │ └── sriov_reattach_detach_nodedev_in_use.cfg │ │ ├── plug_unplug │ │ │ ├── sriov_attach_detach_device.cfg │ │ │ ├── sriov_attach_detach_device_after_restarting_service.cfg │ │ │ ├── sriov_attach_detach_device_special_situations.cfg │ │ │ ├── sriov_attach_detach_device_with_flags.cfg │ │ │ ├── sriov_attach_detach_device_with_unsupported_settings.cfg │ │ │ ├── sriov_attach_detach_interface.cfg │ │ │ ├── sriov_attach_detach_interface_check_connections.cfg │ │ │ ├── sriov_attach_detach_interface_from_network.cfg │ │ │ ├── sriov_attach_detach_interface_special_situations.cfg │ │ │ ├── sriov_attach_detach_interface_with_flags.cfg │ │ │ ├── sriov_attach_interface_to_vm_with_vf.cfg │ │ │ └── sriov_attach_released_hostdev.cfg │ │ ├── scalability │ │ │ ├── sriov_scalability_max_vf.cfg │ │ │ └── sriov_scalability_repeated_at_dt.cfg │ │ ├── update_device │ │ │ └── sriov_update_device.cfg │ │ ├── vIOMMU │ │ │ ├── attach_iommu_device.cfg │ │ │ ├── hotplug_device_with_iommu_enabled.cfg │ │ │ ├── intel_iommu_aw_bits.cfg │ │ │ ├── intel_iommu_virtio_device_with_ats.cfg │ │ │ ├── intel_iommu_with_dma_translation.cfg │ │ │ ├── intel_iommu_without_enabling_caching_mode.cfg │ │ │ ├── intel_iommu_without_ioapic.cfg │ │ │ ├── iommu_alias.cfg │ │ │ ├── iommu_device_lifecycle.cfg │ │ │ ├── iommu_device_settings.cfg │ │ │ ├── migration_iommu_device.cfg │ │ │ ├── viommu_netperf.cfg │ │ │ ├── viommu_transfer_file.cfg │ │ │ ├── viommu_unload_driver.cfg │ │ │ └── virtio_iommu_with_addtional_attributes.cfg │ │ └── vm_lifecycle │ │ │ ├── direct_passthrough_vm_lifecycle_start_destroy.cfg │ │ │ ├── sriov_vm_lifecycle_exclusive_check_offline_domain.cfg │ │ │ ├── sriov_vm_lifecycle_exclusive_check_running_domain.cfg │ │ │ ├── sriov_vm_lifecycle_iommu_at_dt_hostdev.cfg │ │ │ ├── sriov_vm_lifecycle_managedsave.cfg │ │ │ ├── sriov_vm_lifecycle_reboot.cfg │ │ │ ├── sriov_vm_lifecycle_start_destroy.cfg │ │ │ ├── sriov_vm_lifecycle_start_negative.cfg │ │ │ ├── sriov_vm_lifecycle_suspend_resume.cfg │ │ │ └── sriov_vm_lifecycle_unmanaged.cfg │ ├── storage │ │ ├── virsh_pool.cfg │ │ └── virsh_pool_autostart.cfg │ ├── storage_discard.cfg │ ├── svirt │ │ ├── dac │ │ │ ├── dac_seclabel_overall_domain.cfg │ │ │ └── dac_seclabel_per_device.cfg │ │ ├── dac_nfs_disk.cfg │ │ ├── dac_nfs_save_restore.cfg │ │ ├── dac_per_disk_hotplug.cfg │ │ ├── dac_start_destroy.cfg │ │ ├── dac_vm_per_image_start.cfg │ │ ├── default_dac_check.cfg │ │ ├── label_restore_rules │ │ │ ├── label_restore_rules_disk_access_modes.cfg │ │ │ └── label_restore_rules_on_failed_start.cfg │ │ ├── libvirt_keywrap.cfg │ │ ├── qemu_conf │ │ │ ├── svirt_qemu_namespace.cfg │ │ │ └── svirt_qemu_security_confined.cfg │ │ ├── selinux │ │ │ ├── selinux_seclabel_overall_domain.cfg │ │ │ └── selinux_seclabel_per_device.cfg │ │ ├── selinux_relabel.cfg │ │ ├── shared_storage │ │ │ └── svirt_nfs_disk.cfg │ │ ├── svirt_attach_disk.cfg │ │ ├── svirt_save_restore.cfg │ │ ├── svirt_start_destroy.cfg │ │ ├── svirt_undefine_define.cfg │ │ ├── svirt_virt_clone.cfg │ │ ├── svirt_virt_install.cfg │ │ └── umask_value │ │ │ └── svirt_umask_files_accessed_by_qemu.cfg │ ├── timer_management.cfg │ ├── usb │ │ ├── libvirt_usb_hotplug_controller.cfg │ │ ├── libvirt_usb_hotplug_device.cfg │ │ ├── usb_device.cfg │ │ └── usb_passthrough.cfg │ ├── virsh_cmd │ │ ├── domain │ │ │ ├── virsh_attach_detach_disk.cfg │ │ │ ├── virsh_attach_detach_disk_lxc.cfg │ │ │ ├── virsh_attach_detach_disk_matrix.cfg │ │ │ ├── virsh_attach_detach_interface.cfg │ │ │ ├── virsh_attach_detach_interface_matrix.cfg │ │ │ ├── virsh_attach_device.cfg │ │ │ ├── virsh_attach_device_matrix.cfg │ │ │ ├── virsh_attach_passthrough_no_bus.cfg │ │ │ ├── virsh_autostart.cfg │ │ │ ├── virsh_blkdeviotune.cfg │ │ │ ├── virsh_blockcommit.cfg │ │ │ ├── virsh_blockcopy.cfg │ │ │ ├── virsh_blockcopy_xml.cfg │ │ │ ├── virsh_blockjob.cfg │ │ │ ├── virsh_blockpull.cfg │ │ │ ├── virsh_blockresize.cfg │ │ │ ├── virsh_change_media.cfg │ │ │ ├── virsh_change_media_matrix.cfg │ │ │ ├── virsh_console.cfg │ │ │ ├── virsh_cpu_baseline.cfg │ │ │ ├── virsh_cpu_compare.cfg │ │ │ ├── virsh_cpu_stats.cfg │ │ │ ├── virsh_cpu_xml.cfg │ │ │ ├── virsh_create.cfg │ │ │ ├── virsh_create_lxc.cfg │ │ │ ├── virsh_define.cfg │ │ │ ├── virsh_desc.cfg │ │ │ ├── virsh_destroy.cfg │ │ │ ├── virsh_detach_device.cfg │ │ │ ├── virsh_detach_device_alias.cfg │ │ │ ├── virsh_detach_serial_device_alias.cfg │ │ │ ├── virsh_domblkerror.cfg │ │ │ ├── virsh_domblklist.cfg │ │ │ ├── virsh_domblkthreshold.cfg │ │ │ ├── virsh_domcontrol.cfg │ │ │ ├── virsh_domdirtyrate_calc.cfg │ │ │ ├── virsh_domdisplay.cfg │ │ │ ├── virsh_domfsfreeze.cfg │ │ │ ├── virsh_domfsfreeze_domfsthaw.cfg │ │ │ ├── virsh_domfsinfo.cfg │ │ │ ├── virsh_domfsthaw.cfg │ │ │ ├── virsh_domfstrim.cfg │ │ │ ├── virsh_domid.cfg │ │ │ ├── virsh_domif_setlink_getlink.cfg │ │ │ ├── virsh_domifaddr.cfg │ │ │ ├── virsh_domiflist.cfg │ │ │ ├── virsh_domiftune.cfg │ │ │ ├── virsh_domjobabort.cfg │ │ │ ├── virsh_domjobinfo.cfg │ │ │ ├── virsh_domname.cfg │ │ │ ├── virsh_dompmsuspend.cfg │ │ │ ├── virsh_domrename.cfg │ │ │ ├── virsh_domtime.cfg │ │ │ ├── virsh_domuuid.cfg │ │ │ ├── virsh_domxml_from_native.cfg │ │ │ ├── virsh_domxml_to_native.cfg │ │ │ ├── virsh_dumpxml.cfg │ │ │ ├── virsh_edit.cfg │ │ │ ├── virsh_emulatorpin.cfg │ │ │ ├── virsh_emulatorpin_mix.cfg │ │ │ ├── virsh_guestinfo.cfg │ │ │ ├── virsh_guestvcpus.cfg │ │ │ ├── virsh_hypervisor_cpu_baseline.cfg │ │ │ ├── virsh_hypervisor_cpu_compare.cfg │ │ │ ├── virsh_iothreadadd.cfg │ │ │ ├── virsh_iothreaddel.cfg │ │ │ ├── virsh_iothreadinfo.cfg │ │ │ ├── virsh_iothreadpin.cfg │ │ │ ├── virsh_managedsave.cfg │ │ │ ├── virsh_managedsave_extra.cfg │ │ │ ├── virsh_managedsave_restore.cfg │ │ │ ├── virsh_managedsave_special_name.cfg │ │ │ ├── virsh_managedsave_undefine.cfg │ │ │ ├── virsh_memtune.cfg │ │ │ ├── virsh_metadata.cfg │ │ │ ├── virsh_migrate.cfg │ │ │ ├── virsh_migrate_compcache.cfg │ │ │ ├── virsh_migrate_copy_storage.cfg │ │ │ ├── virsh_migrate_multi_vms.cfg │ │ │ ├── virsh_migrate_option_mix.cfg │ │ │ ├── virsh_migrate_set_get_speed.cfg │ │ │ ├── virsh_migrate_setmaxdowntime.cfg │ │ │ ├── virsh_migrate_stress.cfg │ │ │ ├── virsh_migrate_virtio_scsi.cfg │ │ │ ├── virsh_migration.cfg │ │ │ ├── virsh_numatune.cfg │ │ │ ├── virsh_qemu_agent_command.cfg │ │ │ ├── virsh_qemu_agent_command_fs.cfg │ │ │ ├── virsh_qemu_attach.cfg │ │ │ ├── virsh_qemu_monitor_blockjob.cfg │ │ │ ├── virsh_qemu_monitor_command.cfg │ │ │ ├── virsh_reboot.cfg │ │ │ ├── virsh_reset.cfg │ │ │ ├── virsh_restore.cfg │ │ │ ├── virsh_resume.cfg │ │ │ ├── virsh_save.cfg │ │ │ ├── virsh_save_image_define.cfg │ │ │ ├── virsh_save_image_edit.cfg │ │ │ ├── virsh_schedinfo_qemu_posix.cfg │ │ │ ├── virsh_schedinfo_xen_credit.cfg │ │ │ ├── virsh_screenshot.cfg │ │ │ ├── virsh_sendkey.cfg │ │ │ ├── virsh_set_get_user_sshkeys.cfg │ │ │ ├── virsh_set_user_password.cfg │ │ │ ├── virsh_setmaxmem.cfg │ │ │ ├── virsh_setvcpu.cfg │ │ │ ├── virsh_setvcpus.cfg │ │ │ ├── virsh_shutdown.cfg │ │ │ ├── virsh_sosreport.cfg │ │ │ ├── virsh_start.cfg │ │ │ ├── virsh_suspend.cfg │ │ │ ├── virsh_ttyconsole.cfg │ │ │ ├── virsh_undefine.cfg │ │ │ ├── virsh_update_device.cfg │ │ │ ├── virsh_update_device_matrix.cfg │ │ │ ├── virsh_vcpucount.cfg │ │ │ ├── virsh_vcpuinfo.cfg │ │ │ ├── virsh_vcpupin.cfg │ │ │ └── virsh_vncdisplay.cfg │ │ ├── filter │ │ │ ├── virsh_nwfilter_define.cfg │ │ │ ├── virsh_nwfilter_dumpxml.cfg │ │ │ ├── virsh_nwfilter_edit.cfg │ │ │ ├── virsh_nwfilter_list.cfg │ │ │ └── virsh_nwfilter_undefine.cfg │ │ ├── host │ │ │ ├── virsh_capabilities.cfg │ │ │ ├── virsh_cpu_models.cfg │ │ │ ├── virsh_deprecate_api.cfg │ │ │ ├── virsh_domcapabilities.cfg │ │ │ ├── virsh_freecell.cfg │ │ │ ├── virsh_freepages.cfg │ │ │ ├── virsh_hostname.cfg │ │ │ ├── virsh_maxvcpus.cfg │ │ │ ├── virsh_node_memtune.cfg │ │ │ ├── virsh_nodecpumap.cfg │ │ │ ├── virsh_nodecpustats.cfg │ │ │ ├── virsh_nodeinfo.cfg │ │ │ ├── virsh_nodememstats.cfg │ │ │ ├── virsh_nodesuspend.cfg │ │ │ ├── virsh_sysinfo.cfg │ │ │ ├── virsh_uri.cfg │ │ │ └── virsh_version.cfg │ │ ├── interface │ │ │ ├── virsh_iface.cfg │ │ │ ├── virsh_iface_bridge.cfg │ │ │ ├── virsh_iface_edit.cfg │ │ │ ├── virsh_iface_list.cfg │ │ │ └── virsh_iface_trans.cfg │ │ ├── monitor │ │ │ ├── virsh_backing_chain_domblkinfo.cfg │ │ │ ├── virsh_domblkinfo.cfg │ │ │ ├── virsh_domblkstat.cfg │ │ │ ├── virsh_domifstat.cfg │ │ │ ├── virsh_dominfo.cfg │ │ │ ├── virsh_dommemstat.cfg │ │ │ ├── virsh_domstate.cfg │ │ │ ├── virsh_domstats.cfg │ │ │ ├── virsh_list.cfg │ │ │ └── virsh_perf.cfg │ │ ├── network │ │ │ ├── virsh_net_autostart.cfg │ │ │ ├── virsh_net_create.cfg │ │ │ ├── virsh_net_define_undefine.cfg │ │ │ ├── virsh_net_destroy.cfg │ │ │ ├── virsh_net_dhcp_leases.cfg │ │ │ ├── virsh_net_dumpxml.cfg │ │ │ ├── virsh_net_edit.cfg │ │ │ ├── virsh_net_event.cfg │ │ │ ├── virsh_net_info.cfg │ │ │ ├── virsh_net_list.cfg │ │ │ ├── virsh_net_name.cfg │ │ │ ├── virsh_net_start.cfg │ │ │ ├── virsh_net_update.cfg │ │ │ └── virsh_net_uuid.cfg │ │ ├── nodedev │ │ │ ├── crypto_nodedev_create_destroy.cfg │ │ │ ├── virsh_nodedev_create_destroy.cfg │ │ │ ├── virsh_nodedev_detach_reattach.cfg │ │ │ ├── virsh_nodedev_dumpxml.cfg │ │ │ ├── virsh_nodedev_dumpxml_chain.cfg │ │ │ ├── virsh_nodedev_event.cfg │ │ │ ├── virsh_nodedev_list.cfg │ │ │ ├── virsh_nodedev_persistence_mdev.cfg │ │ │ └── virsh_nodedev_reset.cfg │ │ ├── pool │ │ │ ├── virsh_find_storage_pool_sources.cfg │ │ │ ├── virsh_find_storage_pool_sources_as.cfg │ │ │ ├── virsh_pool_acl.cfg │ │ │ ├── virsh_pool_auth.cfg │ │ │ ├── virsh_pool_capabilities.cfg │ │ │ ├── virsh_pool_create.cfg │ │ │ ├── virsh_pool_create_as.cfg │ │ │ └── virsh_pool_edit.cfg │ │ ├── secret │ │ │ ├── virsh_secret_define_undefine.cfg │ │ │ ├── virsh_secret_dumpxml.cfg │ │ │ ├── virsh_secret_list.cfg │ │ │ └── virsh_secret_set_get.cfg │ │ ├── snapshot │ │ │ ├── virsh_snapshot.cfg │ │ │ ├── virsh_snapshot_create_as.cfg │ │ │ ├── virsh_snapshot_disk.cfg │ │ │ ├── virsh_snapshot_dumpxml.cfg │ │ │ ├── virsh_snapshot_edit.cfg │ │ │ ├── virsh_snapshot_mode.cfg │ │ │ └── virsh_snapshot_par_cur.cfg │ │ ├── virsh_connect.cfg │ │ ├── virsh_itself.cfg │ │ ├── virsh_qemu_cmdline_core.cfg │ │ └── volume │ │ │ ├── virsh_vol_clone_wipe.cfg │ │ │ ├── virsh_vol_create.cfg │ │ │ ├── virsh_vol_create_from.cfg │ │ │ ├── virsh_vol_download_upload.cfg │ │ │ ├── virsh_vol_resize.cfg │ │ │ ├── virsh_volume.cfg │ │ │ ├── virsh_volume_application.cfg │ │ │ └── vol_concurrent.cfg │ ├── virt_admin │ │ ├── management │ │ │ ├── virt_admin_client_disconnect.cfg │ │ │ ├── virt_admin_server_clients_set.cfg │ │ │ ├── virt_admin_server_threadpool_set.cfg │ │ │ └── virt_admin_server_update_tls.cfg │ │ ├── monitor │ │ │ ├── virt_admin_srv_clients_info.cfg │ │ │ ├── virt_admin_srv_list.cfg │ │ │ └── virt_admin_srv_threadpool_info.cfg │ │ └── virt_admin_itself.cfg │ ├── virt_cmd │ │ ├── virt_clone.cfg │ │ ├── virt_top.cfg │ │ ├── virt_what.cfg │ │ └── virt_xml_validate.cfg │ ├── virtio │ │ └── virtio_page_per_vq.cfg │ ├── virtio_transitional │ │ ├── virtio_transitional_blk.cfg │ │ ├── virtio_transitional_blk_negative.cfg │ │ ├── virtio_transitional_mem_balloon.cfg │ │ ├── virtio_transitional_nic.cfg │ │ ├── virtio_transitional_rng.cfg │ │ ├── virtio_transitional_serial.cfg │ │ └── virtio_transitional_vsock.cfg │ ├── virtiofs │ │ ├── virtiofs.cfg │ │ └── virtiofs_unprivileged.cfg │ ├── virtual_device │ │ ├── input_devices.cfg │ │ ├── input_devices_plug_unplug.cfg │ │ ├── sound_device.cfg │ │ ├── tpm_device.cfg │ │ ├── video_devices.cfg │ │ ├── vsock.cfg │ │ └── watchdog.cfg │ ├── virtual_disks │ │ ├── at_dt_iscsi_disk.cfg │ │ ├── startup_policy.cfg │ │ ├── vhostvdpa_block_backend_type │ │ │ ├── blockcopy_vhostvdpa_backend_disk.cfg │ │ │ ├── define_start_vm_with_multi_vhostvdpa_backend_disks.cfg │ │ │ ├── define_start_vm_with_vhostvdpa_backend_disk.cfg │ │ │ ├── define_start_vms_with_same_vhostvdpa_backend_disk.cfg │ │ │ ├── hotplug_vhostvdpa_backend_disk.cfg │ │ │ ├── nodedev_vhostvdpa_disk.cfg │ │ │ └── vm_lifecycle_vhostvdpa_backend_disk.cfg │ │ ├── virtual_disks_alias.cfg │ │ ├── virtual_disks_audit_log_disk.cfg │ │ ├── virtual_disks_backingstore_disk.cfg │ │ ├── virtual_disks_blockresize.cfg │ │ ├── virtual_disks_ccw_addr.cfg │ │ ├── virtual_disks_cdrom_device.cfg │ │ ├── virtual_disks_ceph.cfg │ │ ├── virtual_disks_dasd.cfg │ │ ├── virtual_disks_datastore.cfg │ │ ├── virtual_disks_device_mapper.cfg │ │ ├── virtual_disks_discard_granularity.cfg │ │ ├── virtual_disks_discard_no_unref.cfg │ │ ├── virtual_disks_encryption.cfg │ │ ├── virtual_disks_filedescriptor.cfg │ │ ├── virtual_disks_geometry.cfg │ │ ├── virtual_disks_gluster.cfg │ │ ├── virtual_disks_https.cfg │ │ ├── virtual_disks_io_tuning.cfg │ │ ├── virtual_disks_iothread.cfg │ │ ├── virtual_disks_iothreads_queue.cfg │ │ ├── virtual_disks_iscsi.cfg │ │ ├── virtual_disks_luks.cfg │ │ ├── virtual_disks_metadatacache.cfg │ │ ├── virtual_disks_multiattributes.cfg │ │ ├── virtual_disks_multidisks.cfg │ │ ├── virtual_disks_multipath.cfg │ │ ├── virtual_disks_multivms.cfg │ │ ├── virtual_disks_nbd.cfg │ │ ├── virtual_disks_nvme.cfg │ │ ├── virtual_disks_optional_startuppolicy.cfg │ │ ├── virtual_disks_rerror_policy.cfg │ │ ├── virtual_disks_rotation_rate.cfg │ │ ├── virtual_disks_scsi3_persistent_reservation.cfg │ │ ├── virtual_disks_slice_operation.cfg │ │ ├── virtual_disks_snapshot_blockresize.cfg │ │ ├── virtual_disks_ssh.cfg │ │ ├── virtual_disks_transient_disk.cfg │ │ ├── virtual_disks_usb.cfg │ │ ├── virtual_disks_usb_startuppolicy.cfg │ │ └── virtual_disks_vhostuser.cfg │ ├── virtual_interface │ │ ├── hotplug_mem.cfg │ │ ├── interface_update_device_negative.cfg │ │ ├── interface_update_device_offline_domain.cfg │ │ ├── interface_update_device_running_domain.cfg │ │ └── vdpa_attach_duplicated_device.cfg │ ├── virtual_network │ │ ├── address │ │ │ └── virtual_network_address_tftp.cfg │ │ ├── attach_detach_device │ │ │ ├── attach_iface_with_boot_order.cfg │ │ │ ├── attach_mtu_malformed.cfg │ │ │ └── attach_user_type_iface.cfg │ │ ├── connectivity │ │ │ ├── connectivity_check_bridge_interface.cfg │ │ │ ├── connectivity_check_bridge_interface_unprivileged.cfg │ │ │ ├── connectivity_check_direct_interface.cfg │ │ │ ├── connectivity_check_ethernet_interface.cfg │ │ │ ├── connectivity_check_mcast_interface.cfg │ │ │ ├── connectivity_check_network_interface.cfg │ │ │ ├── connectivity_check_tcp_tunnel_interface.cfg │ │ │ ├── connectivity_check_udp_tunnel_interface.cfg │ │ │ ├── connectivity_check_user_interface.cfg │ │ │ ├── connectivity_check_vdpa_interface.cfg │ │ │ └── netperf_nat_interface.cfg │ │ ├── domifaddr.cfg │ │ ├── driver │ │ │ ├── check_vhost_cpu_affinity_with_emulatorpin.cfg │ │ │ └── rx_tx_queue_size.cfg │ │ ├── elements_and_attributes │ │ │ ├── attribute_port_isolated.cfg │ │ │ ├── element_coalesce.cfg │ │ │ ├── element_mac_specific_addr.cfg │ │ │ ├── element_model.cfg │ │ │ └── element_sndbuf.cfg │ │ ├── hotplug │ │ │ ├── attach_detach_device │ │ │ │ ├── hotplug_hotunplug_vdpa_interface.cfg │ │ │ │ └── rollback_vdpafd_on_hotplug_failure.cfg │ │ │ └── attach_detach_interface │ │ │ │ └── attach_interface_with_model.cfg │ │ ├── iface_attach_detach.cfg │ │ ├── iface_bridge.cfg │ │ ├── iface_coalesce.cfg │ │ ├── iface_hotplug.cfg │ │ ├── iface_network.cfg │ │ ├── iface_nss.cfg │ │ ├── iface_options.cfg │ │ ├── iface_ovs.cfg │ │ ├── iface_rename.cfg │ │ ├── iface_stat.cfg │ │ ├── iface_target.cfg │ │ ├── iface_unprivileged.cfg │ │ ├── iface_update.cfg │ │ ├── lifecycle │ │ │ ├── lifecycle_vdpa_interface.cfg │ │ │ └── restart_service.cfg │ │ ├── link_state │ │ │ └── link_state_model_type.cfg │ │ ├── locked_memory_vdpa │ │ │ ├── hotplug_mem_to_vm_with_multiple_vdpa_interfaces.cfg │ │ │ ├── hotplug_mem_to_vm_with_vdpa.cfg │ │ │ ├── mem_lock_limit_multiple_mixed_interfaces.cfg │ │ │ └── mem_lock_limit_multiple_vdpa_interfaces.cfg │ │ ├── mtu.cfg │ │ ├── network │ │ │ ├── elements_and_attributes │ │ │ │ └── network_static_route.cfg │ │ │ └── net_update_dns.cfg │ │ ├── network_misc.cfg │ │ ├── nodedev │ │ │ └── nodedev_vdpa_interface.cfg │ │ ├── passt │ │ │ ├── passt_attach_detach.cfg │ │ │ ├── passt_connectivity_between_2vms.cfg │ │ │ ├── passt_function.cfg │ │ │ ├── passt_lifecycle.cfg │ │ │ ├── passt_negative_setting.cfg │ │ │ ├── passt_reconnect.cfg │ │ │ └── passt_transfer_file.cfg │ │ ├── qos │ │ │ ├── check_actual_network_throughput.cfg │ │ │ ├── check_actual_network_throughput_direct.cfg │ │ │ ├── check_bandwidth_by_domiftune.cfg │ │ │ ├── check_qos_floor.cfg │ │ │ └── test_bandwidth_boundry.cfg │ │ ├── resolve_vm_hostname_by_resolvectl.cfg │ │ ├── start_vm_with_duplicate_target_dev_name.cfg │ │ ├── update_device │ │ │ ├── unsupported_live_update_add.cfg │ │ │ ├── unsupported_live_update_alter.cfg │ │ │ ├── unsupported_live_update_delete.cfg │ │ │ ├── update_device_coalesce.cfg │ │ │ ├── update_driver_non_virtio.cfg │ │ │ ├── update_iface_link_state.cfg │ │ │ ├── update_iface_portgroup.cfg │ │ │ ├── update_iface_qos.cfg │ │ │ ├── update_iface_qos_invalid.cfg │ │ │ ├── update_iface_source.cfg │ │ │ ├── update_iface_trustGuestRxFilters.cfg │ │ │ ├── update_iface_type_live.cfg │ │ │ ├── update_iface_with_identifier.cfg │ │ │ ├── update_iface_with_options_active.cfg │ │ │ ├── update_iface_with_options_inactive.cfg │ │ │ ├── update_iface_with_unchangable.cfg │ │ │ └── update_port_isolated.cfg │ │ └── virtual_network_multivms.cfg │ ├── vm_boot_with_kernel_param.cfg │ ├── vm_create_destroy_concurrently.cfg │ └── vm_start_destroy_repeatedly.cfg │ ├── deps │ ├── cap_mix.xml │ ├── capabilities.xml │ ├── capabilities_s390x.xml │ ├── capability_cpu.xml │ ├── caps_skylake_server_cascadelake_server.xml │ ├── cpu.xml │ ├── cpu_s390x.xml │ ├── cve_2023_3750.sh │ ├── domcapabilities.xml │ ├── domcapabilities_s390x.xml │ ├── domcaps_skylake_client_cascadelake_server.xml │ ├── domcaps_skylake_server_cascadelake_server.xml │ ├── hook_qemu_restore.py │ ├── negative_domcapabilities_s390x.xml │ └── qemu_wrapper.py │ └── src │ ├── __init__.py │ ├── backingchain │ ├── blockcommand.py │ ├── blockcommit │ │ ├── blockcommit_all_block_chain.py │ │ ├── blockcommit_base_image_exist_backingfile.py │ │ ├── blockcommit_conventional_chain.py │ │ ├── blockcommit_relative_path.py │ │ ├── blockcommit_with_async_option.py │ │ ├── blockcommit_with_bandwith.py │ │ ├── blockcommit_with_delete_option.py │ │ ├── blockcommit_with_keep_overlay.py │ │ ├── blockcommit_with_shallow_option.py │ │ └── check_allocation_watermark_during_blockcommit.py │ ├── blockcommit_basic_function.py │ ├── blockcopy.py │ ├── blockcopy │ │ ├── blockcopy_with_async_option.py │ │ ├── blockcopy_with_backingfile.py │ │ ├── blockcopy_with_bandwidth.py │ │ ├── blockcopy_with_blockdev_option.py │ │ ├── blockcopy_with_conventional_chain.py │ │ ├── blockcopy_with_different_dest_xml.py │ │ ├── blockcopy_with_different_qcow2_properties.py │ │ ├── blockcopy_with_disk_driver_attributes.py │ │ ├── blockcopy_with_granularity_buf_size_option.py │ │ ├── blockcopy_with_syncwrites_option.py │ │ ├── blockcopy_with_xml_option_to_luks_slice_image.py │ │ └── blockcopy_with_zero_length_disk.py │ ├── blockcopy_options.py │ ├── blockjob │ │ ├── blockjob_pivot_after_irregular_operations.py │ │ ├── blockjob_with_async_option.py │ │ ├── blockjob_with_bandwidth_option.py │ │ └── blockjob_with_raw_option.py │ ├── blockjob_options.py │ ├── blockpull │ │ ├── blockpull_base_image_exist_backingfile.py │ │ ├── blockpull_conventional_chain.py │ │ ├── blockpull_relative_path.py │ │ ├── blockpull_with_async.py │ │ └── blockpull_with_bandwidth.py │ ├── blockresize.py │ ├── domblkthreshold.py │ ├── event_checking_test │ │ └── commit_pull_copy_event.py │ ├── hotplug_test │ │ └── hot_un_plug.py │ ├── lifecycle │ │ └── block_operation_after_lifecycle.py │ ├── lifecycle_test │ │ └── check_mirror_with_restart_libvirtd.py │ ├── negative_scenario │ │ ├── blockcommit_invalid_top_base.py │ │ ├── blockcopy_with_invalid_destination.py │ │ ├── blockjob_with_invalid_operation.py │ │ ├── blockpull_with_invalid_base.py │ │ ├── do_concurrent_operation.py │ │ └── interrupt_blockcopy.py │ ├── repeatability_test │ │ └── commit_pull_copy_after_snap.py │ ├── virsh_domblk │ │ └── domblkthreshold_with_backingchain_element.py │ ├── virtual_disks_blockcopy_options.py │ ├── virtual_disks_relative_blockcommit.py │ ├── virtual_disks_snapshot_blockpull.py │ └── with_disk_attributes_test │ │ ├── commit_pull_with_disk_driver_attributes.py │ │ └── commit_pull_with_disk_source_attributes.py │ ├── bios │ ├── boot_integration.py │ ├── boot_order_ovmf.py │ ├── boot_order_seabios.py │ ├── virsh_boot.py │ ├── virsh_boot_reset_nvram.py │ ├── virsh_boot_sysinfo.py │ ├── virsh_boot_tseg.py │ └── vm_boot_nvram_source.py │ ├── channel │ └── channel_functional.py │ ├── chardev │ ├── connection_and_operation │ │ ├── data_transfer.py │ │ └── file_append_option.py │ ├── log │ │ └── chardev_with_log_file.py │ └── virsh_console_operation │ │ ├── prevent_multiple_console.py │ │ └── release_console.py │ ├── controller │ ├── controller_functional.py │ ├── multiple_controller_ppc.py │ ├── pci_controller_memreserve.py │ ├── pcibridge.py │ ├── pcie_controller_hotplug_option.py │ └── pcie_root_port_controller.py │ ├── cpu │ ├── aarch64_cpu_sve.py │ ├── aarch64_gic_version.py │ ├── diagnose_data.py │ ├── diagnose_spinlock_yield_forward.py │ ├── guestpin.py │ ├── iothread.py │ ├── lifecycle_time.py │ ├── max_vcpus.py │ ├── multi_vms_with_stress.py │ ├── powerpc_hmi.py │ ├── ppc_cpu_mode.py │ ├── ppccpucompat.py │ ├── setvcpu.py │ ├── topology.py │ ├── vcpu_affinity.py │ ├── vcpu_cache.py │ ├── vcpu_cve.py │ ├── vcpu_feature.py │ ├── vcpu_hotpluggable.py │ ├── vcpu_max_topology.py │ ├── vcpu_metrics.py │ ├── vcpu_misc.py │ ├── vcpu_nested.py │ ├── vcpu_sched_core.py │ ├── vcpupin.py │ ├── virsh_cpu_compare_xml.py │ └── vm_features.py │ ├── daemon │ ├── check_daemon_after_remove_pkgs.py │ ├── check_daemon_default_status.py │ ├── check_socket_files_with_services.py │ ├── check_users.py │ ├── check_virsh_connection_switch_between_monolithic_and_modular.py │ ├── conf_file │ │ ├── libvirtd_conf │ │ │ ├── check_negative_parameter.py │ │ │ ├── host_uuid.py │ │ │ ├── prefix_num.py │ │ │ ├── processing_controls.py │ │ │ ├── set_audit_logging.py │ │ │ ├── unix_sock.py │ │ │ └── virt_admin_logging.py │ │ ├── qemu_conf │ │ │ ├── auto_dump.py │ │ │ ├── clear_emulator_capabilities.py │ │ │ ├── seccomp_sandbox.py │ │ │ ├── set_process_name.py │ │ │ └── set_virtlogd.py │ │ ├── sysconfig_libvirt_guests │ │ │ └── libvirt_guests.py │ │ ├── sysconfig_libvirtd │ │ │ └── libvirtd_config.py │ │ └── systemd_config │ │ │ └── systemd_config.py │ ├── crash_regression.py │ ├── daemon_functional.py │ ├── enable_disable_daemon_socket_services.py │ ├── init_scripts.py │ ├── kill_qemu.py │ ├── kill_started.py │ ├── kill_starting.py │ ├── libvirt_guests_service.py │ ├── libvirtd │ │ ├── libvirtd.py │ │ └── libvirtd_multi_conn.py │ ├── qemu_monitor_socket_creation_delay.py │ └── restart_consist.py │ ├── domain_life_cycle │ └── create_destroy_domain.py │ ├── embedded_qemu │ └── embedded_qemu.py │ ├── event │ └── virsh_event.py │ ├── features │ └── ras.py │ ├── gpu │ ├── hotplug_gpu.py │ ├── lifecycle_vm_with_gpu.py │ ├── start_vm_with_gpu.py │ ├── start_vm_with_gpu_managed_disabled.py │ ├── vm_cuda_sanity.py │ ├── vm_mig_sanity.py │ └── vm_nvidia_gpu_driver.py │ ├── graphics │ └── graphics_functional.py │ ├── guest_agent │ ├── agent_lifecycle.py │ ├── domhostname.py │ ├── guest_agent.py │ └── unix_source_path │ │ └── agent_auto_generated_unix_source_path_session_mode.py │ ├── guest_kernel_debugging │ ├── nmi_test.py │ └── virsh_dump.py │ ├── guest_os_booting │ ├── boot_menu │ │ └── bootmenu.py │ ├── boot_order │ │ ├── boot_from_cdrom_device.py │ │ ├── boot_from_disk_device.py │ │ ├── boot_from_usb_device.py │ │ ├── boot_from_virtiofs_device.py │ │ ├── boot_order_setting_negative.py │ │ ├── boot_with_multiple_boot_dev.py │ │ ├── boot_with_multiple_boot_order.py │ │ └── hotplug_device_with_boot_order.py │ ├── direct_kernel_boot │ │ └── direct_kernel_boot.py │ ├── firmware_configuration │ │ ├── os_acpi.py │ │ ├── smbios_mode.py │ │ └── sysinfo_fwcfg.py │ ├── lifecycle │ │ ├── lifecycle_boot.py │ │ └── lifecycle_reset_nvram.py │ ├── migration │ │ └── migration_boot.py │ ├── negative │ │ ├── boot_without_bootable_devices.py │ │ └── update_device_boot_order.py │ ├── os_configuration │ │ └── without_default_os_attributes.py │ ├── ovmf_firmware │ │ ├── ovmf_backed_nvram.py │ │ ├── ovmf_firmware_feature.py │ │ ├── ovmf_loader.py │ │ ├── ovmf_nvram.py │ │ ├── ovmf_seclabel_in_nvram.py │ │ └── ovmf_smm.py │ ├── seabios_firmware │ │ ├── seabios_loader.py │ │ └── seabios_rebootimeout.py │ └── useserial │ │ └── useserial.py │ ├── guest_resource_control │ ├── control_cgroup.py │ └── virsh_blkiotune.py │ ├── host_hypervisor │ ├── capabilities_output.py │ └── domcapabilities_output.py │ ├── hotplug_serial.py │ ├── in_place_upgrade_guest.py │ ├── incremental_backup │ ├── incremental_backup_backing_chain.py │ ├── incremental_backup_checkpoint_cmd.py │ ├── incremental_backup_event_monitor.py │ ├── incremental_backup_migration.py │ ├── incremental_backup_multidisk.py │ ├── incremental_backup_pull_mode.py │ └── incremental_backup_push_mode.py │ ├── iommu │ └── iommu_device.py │ ├── kernel_panic.py │ ├── lease_device.py │ ├── libvirt_bench │ ├── libvirt_bench_domstate_switch_by_groups.py │ ├── libvirt_bench_domstate_switch_in_loop.py │ ├── libvirt_bench_domstate_switch_with_iozone.py │ ├── libvirt_bench_domstate_switch_with_unixbench.py │ ├── libvirt_bench_dump_with_netperf.py │ ├── libvirt_bench_dump_with_unixbench.py │ ├── libvirt_bench_serial_hotplug.py │ ├── libvirt_bench_ttcp_from_guest_to_host.py │ ├── libvirt_bench_usb_hotplug.py │ └── libvirt_bench_vcpu_hotplug.py │ ├── libvirt_cputune.py │ ├── libvirt_hooks.py │ ├── libvirt_hugepage.py │ ├── libvirt_mem.py │ ├── libvirt_network_bandwidth.py │ ├── libvirt_package.py │ ├── libvirt_qemu_cmdline.py │ ├── libvirt_scsi.py │ ├── libvirt_scsi_hostdev.py │ ├── libvirt_vcpu_plug_unplug.py │ ├── libvirtd_start.py │ ├── limit.py │ ├── lxc │ └── lxc_life_cycle.py │ ├── macvtap.py │ ├── memory │ ├── allocpages.py │ ├── gmap.py │ ├── hmat.py │ ├── memballoon.py │ ├── memory_allocation │ │ ├── define_value_unit.py │ │ ├── lifecycle_with_memory_allocation.py │ │ ├── memory_allocation_config_modification.py │ │ └── memory_invalid_value.py │ ├── memory_attach_device.py │ ├── memory_backing │ │ ├── discard_memory_content_setting.py │ │ ├── hugepage_mount_path.py │ │ ├── hugepage_nodeset.py │ │ ├── lifecycle_for_hugepage.py │ │ ├── memory_access_mode.py │ │ ├── memory_source_and_allocation.py │ │ └── page_locking_and_shared_pages.py │ ├── memory_balloon │ │ ├── guest_without_mem_balloon.py │ │ ├── mem_balloon_autodeflate.py │ │ └── period_config_of_memory_balloon.py │ ├── memory_devices │ │ ├── change_virtio_mem_request_size.py │ │ ├── dimm_memory_hot_unplug.py │ │ ├── dimm_memory_hotplug.py │ │ ├── dimm_memory_lifecycle.py │ │ ├── dimm_memory_with_access_and_discard.py │ │ ├── dimm_memory_with_auto_placement_numatune.py │ │ ├── dimm_memory_with_memory_alloccation_and_numa.py │ │ ├── invalid_dimm_memory_device_config.py │ │ ├── invalid_nvdimm_memory_device_config.py │ │ ├── invalid_virtio_mem_config.py │ │ ├── lifecycle_for_file_backed_nvdimm_memory.py │ │ ├── lifecycle_of_mixed_memory_devices.py │ │ ├── nvdimm_memory_turn_to_dram.py │ │ ├── virtio_mem_access_and_discard.py │ │ ├── virtio_mem_auto_placement.py │ │ ├── virtio_mem_coldplug_and_unplug.py │ │ ├── virtio_mem_device_lifecycle.py │ │ ├── virtio_mem_dynamic_slots.py │ │ ├── virtio_mem_hot_unplug.py │ │ ├── virtio_mem_hotplug.py │ │ ├── virtio_mem_with_memory_allocation_and_numa.py │ │ ├── virtio_mem_with_memory_backing_type.py │ │ └── virtio_memory_with_numa_node_tuning.py │ ├── memory_discard.py │ ├── memory_hotplug.py │ ├── memory_misc.py │ ├── memory_tuning │ │ └── memory_tuning_settings.py │ ├── memory_update_device.py │ ├── nvdimm.py │ ├── secure_dump.py │ └── virsh_setmem.py │ ├── migration │ ├── abort_precopy_migration │ │ └── abort_by_domjobabort_on_target.py │ ├── async_job │ │ ├── async_job.py │ │ └── migration_domjobinfo.py │ ├── async_ops │ │ ├── destroy_vm_during_finishphase.py │ │ ├── destroy_vm_during_performphase.py │ │ ├── migrate_vm_again_during_migration.py │ │ ├── pause_resume_vm_during_migration.py │ │ └── query_info_during_migration.py │ ├── destructive_operations_around_live_migration │ │ ├── kill_qemu_during_finishphase.py │ │ ├── kill_qemu_during_performphase.py │ │ ├── kill_virtiofsd_during_performphase.py │ │ └── migration_kill_libvirt_daemon.py │ ├── guest_lifecycle_operations_during_migration │ │ └── migration_poweroff_vm.py │ ├── live_migration.py │ ├── memory_copy_mode │ │ └── memory_copy_mode.py │ ├── migrate_bandwidth.py │ ├── migrate_ceph.py │ ├── migrate_event.py │ ├── migrate_gluster.py │ ├── migrate_graphics.py │ ├── migrate_mem.py │ ├── migrate_network.py │ ├── migrate_options_shared.py │ ├── migrate_over_unix.py │ ├── migrate_service_control.py │ ├── migrate_storage.py │ ├── migrate_vm.py │ ├── migrate_vm_in_various_status │ │ └── migrate_paused_vm.py │ ├── migrate_with_legacy_guest.py │ ├── migrate_with_panic_device.py │ ├── migrate_with_various_hostname.py │ ├── migrate_with_virtual_devices.py │ ├── migration_cmd │ │ ├── setmaxdowntime_and_getmaxdowntime.py │ │ └── setspeed_and_getspeed.py │ ├── migration_misc │ │ ├── canonical_paths_in_shared_filesystems.py │ │ ├── migration_with_cpu_mode.py │ │ ├── migration_with_host_hostname.py │ │ ├── migration_with_host_uuid.py │ │ ├── migration_with_special_cpu.py │ │ ├── non_canonical_paths_in_shared_filesystems.py │ │ └── qemu_err_incoming_migration.py │ ├── migration_performance_tuning │ │ ├── migration_maxdowntime.py │ │ ├── migration_performance_tuning.py │ │ ├── migration_precopy_bandwidth.py │ │ └── migration_zerocopy.py │ ├── migration_resource_limit │ │ └── migration_bandwidth.py │ ├── migration_uri │ │ ├── migration_desturi.py │ │ ├── migration_network_data_transport.py │ │ ├── migration_network_data_transport_tcp.py │ │ ├── migration_network_data_transport_tls.py │ │ ├── tcp_migration_port_allocation_port_occupied_by_other_app.py │ │ └── tls_migrate_tls_x509_verify.py │ ├── migration_with_numa_topology.py │ ├── migration_with_virtiofs │ │ ├── migration_with_externally_launched_virtiofs_dev.py │ │ └── migration_with_internally_launched_virtiofs_dev.py │ ├── migration_with_vtpm │ │ ├── migration_with_external_tpm.py │ │ ├── migration_with_shared_tpm.py │ │ ├── migration_with_vtpm_dev.py │ │ └── migration_with_vtpm_state_on_block_dev.py │ ├── migration_xml_update.py │ ├── non_live_migration │ │ └── non_live_migration.py │ ├── offline_migration │ │ └── offline_migration.py │ ├── p2p_keepalive │ │ └── p2p_keepalive.py │ ├── pause_postcopy_migration_and_recover │ │ ├── no_paused_during_recover_migration.py │ │ ├── pause_and_disruptive_and_recover.py │ │ ├── pause_and_io_error_and_recover.py │ │ ├── pause_and_recover_and_disruptive.py │ │ ├── pause_and_recover_twice.py │ │ ├── pause_by_domjobabort_and_recover.py │ │ ├── pause_by_network_and_recover.py │ │ ├── pause_by_proxy_and_recover.py │ │ ├── unattended_migration.py │ │ └── unattended_migration_and_disruptive.py │ └── vm_configuration_and_status │ │ ├── migration_vm_configuration_and_status.py │ │ ├── persist_target_vm_and_undefine_src_vm.py │ │ └── persist_target_vm_and_undefine_src_vm_and_abort.py │ ├── migration_with_copy_storage │ ├── async_job │ │ ├── abort_job.py │ │ └── query_domain_job_info.py │ ├── autoskipped_disks.py │ ├── disk_io_load_in_vm.py │ ├── disk_type_coverage │ │ └── block_disk.py │ ├── migrate_disks.py │ ├── migrate_vm_disk_on_shared_storage.py │ ├── migration_bandwidth_limit.py │ ├── migration_retain_sparsity.py │ ├── migration_vm_has_no_disk.py │ ├── migration_with_backingchain.py │ ├── migration_with_vhostvdpa.py │ ├── network_data_transport │ │ ├── tcp.py │ │ ├── tcp_migrateuri_and_disks_port.py │ │ ├── tcp_port_reuse.py │ │ ├── tls.py │ │ └── unix_proxy.py │ ├── performance_tuning │ │ └── copy_storage_synchronous_writes.py │ ├── precreate_none_target_disk.py │ ├── target_image_larger_than_source.py │ └── target_image_smaller_than_source.py │ ├── multifunction.py │ ├── multiqueue.py │ ├── multivm_loadstress.py │ ├── multivm_stress │ └── multivm_stress.py │ ├── npiv │ ├── npiv_concurrent.py │ ├── npiv_hostdev_passthrough.py │ ├── npiv_image_from_pool.py │ ├── npiv_nodedev_create_destroy.py │ ├── npiv_pool_regression.py │ ├── npiv_pool_vol.py │ ├── npiv_restart_libvirtd.py │ ├── npiv_snapshot.py │ └── npiv_virtual_disk.py │ ├── numa │ ├── guest_numa.py │ ├── guest_numa_hmat │ │ └── hmat_info.py │ ├── guest_numa_node_tuning │ │ ├── auto_memory_incompatible_guest_binding.py │ │ ├── auto_memory_nodeset_placement.py │ │ ├── auto_memory_placement_numad_fail.py │ │ ├── change_numa_tuning.py │ │ ├── host_guest_mixed_memory_binding.py │ │ ├── invalid_nodeset_of_numa_memory_binding.py │ │ ├── memory_binding_setting.py │ │ ├── memory_binding_with_emulator_thread.py │ │ ├── numa_mem_binding_with_offline_cpu.py │ │ └── specific_numa_memory_bind_hugepage.py │ ├── guest_numa_topology │ │ ├── change_vcpu_pin.py │ │ ├── numa_topology_with_cpu_topology.py │ │ ├── numa_topology_with_hugepage.py │ │ ├── numa_topology_with_numa_distance.py │ │ └── various_numa_topology_settings.py │ ├── host_numa │ │ ├── host_numa_info.py │ │ └── host_numa_ksm_parameters.py │ ├── numa_capabilities.py │ ├── numa_config_with_auto_placement.py │ ├── numa_memAccess.py │ ├── numa_memory.py │ ├── numa_memory_migrate.py │ ├── numa_memory_spread.py │ ├── numa_node_memory_bind.py │ ├── numa_node_tuning │ │ └── auto_mem_placement_with_incompatible_host_nodeset.py │ ├── numa_nodeset.py │ ├── numa_numad.py │ ├── numa_numanode_cpu_info.py │ ├── numa_numatune_cpu.py │ ├── numa_preferred_undefine.py │ └── numad_vcpupin.py │ ├── nwfilter │ ├── filter_aready_present_binding.py │ ├── nwfilter_binding_create.py │ ├── nwfilter_binding_delete.py │ ├── nwfilter_binding_dumpxml.py │ ├── nwfilter_binding_list.py │ ├── nwfilter_daemon_restart.py │ ├── nwfilter_edit_uuid.py │ ├── nwfilter_update_lock.py │ ├── nwfilter_update_vm_running.py │ ├── nwfilter_vm_attach.py │ ├── nwfilter_vm_start.py │ └── vm_destroy_with_nwfilter.py │ ├── papr_hpt.py │ ├── passthrough │ ├── ap │ │ ├── libvirt_ap_passthrough.py │ │ ├── libvirt_ap_passthrough_autostart.py │ │ └── libvirt_ap_passthrough_hotplug.py │ ├── ccw │ │ ├── libvirt_ccw_passthrough.py │ │ └── libvirt_ccw_passthrough_read_write.py │ ├── pci │ │ ├── ism_pci_passthrough.py │ │ ├── libvirt_pci_passthrough.py │ │ ├── libvirt_pci_passthrough_hotplug.py │ │ └── vfio.py │ └── robustness │ │ └── passthrough_robustness.py │ ├── perf_kvm.py │ ├── ppc_device.py │ ├── remote_access │ ├── remote_access.py │ ├── remote_tls_multiple_certs.py │ ├── remote_tls_priority.py │ └── virsh_non_root_polkit.py │ ├── remove_guest.py │ ├── resource_abnormal.py │ ├── save_and_restore │ ├── abort_managedsave.py │ ├── abort_save.py │ ├── managedsave_remove.py │ ├── restore_from_local_file.py │ ├── restore_from_nfs_file.py │ ├── restore_from_unqualified_file.py │ ├── save_image_define.py │ ├── save_image_dumpxml.py │ ├── save_to_block.py │ ├── save_to_nfs.py │ ├── save_with_formats.py │ └── save_with_options.py │ ├── scalability │ └── define_create_vm_with_more_vcpus.py │ ├── scsi │ ├── scsi_command_test_hostdev.py │ ├── scsi_controller_driver_plug_unplug.py │ ├── scsi_device.py │ └── scsi_disk_attributes.py │ ├── secure_execution │ └── confirm_environment.py │ ├── security │ ├── rng │ │ ├── libvirt_rng.py │ │ └── virtio_rng.py │ └── virt_what_cvm.py │ ├── serial │ ├── serial_functional.py │ └── serial_pty_log.py │ ├── smt.py │ ├── snapshot │ ├── check_delete_and_revert_snap_event_in_multiple_branch.py │ ├── delete_disk_and_memory_snapshot.py │ ├── delete_disk_only_snapshot.py │ ├── delete_external_after_reverting_internal.py │ ├── delete_external_snap_with_references.py │ ├── delete_snapshot_after_disk_attached.py │ ├── delete_snapshot_metadata.py │ ├── memory_snapshot_delete.py │ ├── revert_disk_external_snap.py │ ├── revert_memory_only_snap.py │ ├── revert_snap_based_on_state.py │ ├── revert_snap_for_guest_with_genid.py │ ├── revert_snap_with_flags.py │ ├── revert_snapshot_after_xml_updated.py │ └── snapshot_list_with_options.py │ ├── sriov │ ├── capabilities │ │ └── sriov_capabilities_iommu_support.py │ ├── failover │ │ ├── sriov_failover_at_dt_hostdev.py │ │ ├── sriov_failover_lifecycle.py │ │ └── sriov_failover_migration.py │ ├── locked_memory_check │ │ └── sriov_with_hotplug_memory.py │ ├── network │ │ ├── sriov_check_net_info_by_network_lifecycle_cmds.py │ │ ├── sriov_check_network_connections.py │ │ └── sriov_define_or_start_network_with_pf_addr.py │ ├── nodedev │ │ ├── sriov_nodedev_non_driver.py │ │ ├── sriov_nodedev_reattach_detach.py │ │ └── sriov_reattach_detach_nodedev_in_use.py │ ├── plug_unplug │ │ ├── sriov_attach_detach_device.py │ │ ├── sriov_attach_detach_device_after_restarting_service.py │ │ ├── sriov_attach_detach_device_special_situations.py │ │ ├── sriov_attach_detach_device_with_flags.py │ │ ├── sriov_attach_detach_device_with_unsupported_settings.py │ │ ├── sriov_attach_detach_interface.py │ │ ├── sriov_attach_detach_interface_check_connections.py │ │ ├── sriov_attach_detach_interface_from_network.py │ │ ├── sriov_attach_detach_interface_special_situations.py │ │ ├── sriov_attach_detach_interface_with_flags.py │ │ ├── sriov_attach_interface_to_vm_with_vf.py │ │ └── sriov_attach_released_hostdev.py │ ├── scalability │ │ ├── sriov_scalability_max_vfs.py │ │ └── sriov_scalability_repeated_at_dt.py │ ├── update_device │ │ └── sriov_update_device.py │ ├── vIOMMU │ │ ├── attach_iommu_device.py │ │ ├── hotplug_device_with_iommu_enabled.py │ │ ├── intel_iommu_aw_bits.py │ │ ├── intel_iommu_with_dma_translation.py │ │ ├── intel_iommu_without_enabling_caching_mode.py │ │ ├── intel_iommu_without_ioapic.py │ │ ├── iommu_alias.py │ │ ├── iommu_device_lifecycle.py │ │ ├── iommu_device_settings.py │ │ ├── migration_iommu_device.py │ │ ├── viommu_netperf.py │ │ ├── viommu_transfer_file.py │ │ ├── viommu_unload_driver.py │ │ └── virtio_iommu_with_addtional_attributes.py │ └── vm_lifecycle │ │ ├── direct_passthrough_vm_lifecycle_start_destroy.py │ │ ├── sriov_vm_lifecycle_exclusive_check_offline_domain.py │ │ ├── sriov_vm_lifecycle_exclusive_check_running_domain.py │ │ ├── sriov_vm_lifecycle_iommu_at_dt_hostdev.py │ │ ├── sriov_vm_lifecycle_managedsave.py │ │ ├── sriov_vm_lifecycle_reboot.py │ │ ├── sriov_vm_lifecycle_start_destroy.py │ │ ├── sriov_vm_lifecycle_start_negative.py │ │ ├── sriov_vm_lifecycle_suspend_resume.py │ │ └── sriov_vm_lifecycle_unmanaged.py │ ├── storage │ ├── virsh_pool.py │ └── virsh_pool_autostart.py │ ├── storage_discard.py │ ├── svirt │ ├── dac │ │ ├── dac_seclabel_overall_domain.py │ │ └── dac_seclabel_per_device.py │ ├── dac_nfs_disk.py │ ├── dac_nfs_save_restore.py │ ├── dac_per_disk_hotplug.py │ ├── dac_start_destroy.py │ ├── dac_vm_per_image_start.py │ ├── default_dac_check.py │ ├── label_restore_rules │ │ ├── label_restore_rules_disk_access_modes.py │ │ └── label_restore_rules_on_failed_start.py │ ├── libvirt_keywrap.py │ ├── qemu_conf │ │ ├── svirt_qemu_namespace.py │ │ └── svirt_qemu_security_confined.py │ ├── selinux │ │ ├── selinux_seclabel_overall_domain.py │ │ └── selinux_seclabel_per_device.py │ ├── selinux_relabel.py │ ├── shared_storage │ │ └── svirt_nfs_disk.py │ ├── svirt_attach_disk.py │ ├── svirt_save_restore.py │ ├── svirt_start_destroy.py │ ├── svirt_undefine_define.py │ ├── svirt_virt_clone.py │ ├── svirt_virt_install.py │ └── umask_value │ │ └── svirt_umask_files_accessed_by_qemu.py │ ├── timer_management.py │ ├── usb │ ├── libvirt_usb_hotplug_controller.py │ ├── libvirt_usb_hotplug_device.py │ ├── usb_device.py │ └── usb_passthrough.py │ ├── virsh_cmd │ ├── domain │ │ ├── __init__.py │ │ ├── virsh_attach_detach_disk.py │ │ ├── virsh_attach_detach_disk_lxc.py │ │ ├── virsh_attach_detach_disk_matrix.py │ │ ├── virsh_attach_detach_interface.py │ │ ├── virsh_attach_detach_interface_matrix.py │ │ ├── virsh_attach_device.py │ │ ├── virsh_attach_device_matrix.py │ │ ├── virsh_attach_passthrough_no_bus.py │ │ ├── virsh_autostart.py │ │ ├── virsh_blkdeviotune.py │ │ ├── virsh_blockcommit.py │ │ ├── virsh_blockcopy.py │ │ ├── virsh_blockcopy_xml.py │ │ ├── virsh_blockjob.py │ │ ├── virsh_blockpull.py │ │ ├── virsh_blockresize.py │ │ ├── virsh_change_media.py │ │ ├── virsh_change_media_matrix.py │ │ ├── virsh_console.py │ │ ├── virsh_cpu_baseline.py │ │ ├── virsh_cpu_compare.py │ │ ├── virsh_cpu_stats.py │ │ ├── virsh_cpu_xml.py │ │ ├── virsh_create.py │ │ ├── virsh_create_lxc.py │ │ ├── virsh_define.py │ │ ├── virsh_desc.py │ │ ├── virsh_destroy.py │ │ ├── virsh_detach_device.py │ │ ├── virsh_detach_device_alias.py │ │ ├── virsh_detach_serial_device_alias.py │ │ ├── virsh_domblkerror.py │ │ ├── virsh_domblklist.py │ │ ├── virsh_domblkthreshold.py │ │ ├── virsh_domcontrol.py │ │ ├── virsh_domdirtyrate_calc.py │ │ ├── virsh_domdisplay.py │ │ ├── virsh_domfsfreeze.py │ │ ├── virsh_domfsfreeze_domfsthaw.py │ │ ├── virsh_domfsinfo.py │ │ ├── virsh_domfsthaw.py │ │ ├── virsh_domfstrim.py │ │ ├── virsh_domid.py │ │ ├── virsh_domif_setlink_getlink.py │ │ ├── virsh_domifaddr.py │ │ ├── virsh_domiflist.py │ │ ├── virsh_domiftune.py │ │ ├── virsh_domjobabort.py │ │ ├── virsh_domjobinfo.py │ │ ├── virsh_domname.py │ │ ├── virsh_dompmsuspend.py │ │ ├── virsh_domrename.py │ │ ├── virsh_domtime.py │ │ ├── virsh_domuuid.py │ │ ├── virsh_domxml_from_native.py │ │ ├── virsh_domxml_to_native.py │ │ ├── virsh_dumpxml.py │ │ ├── virsh_edit.py │ │ ├── virsh_emulatorpin.py │ │ ├── virsh_emulatorpin_mix.py │ │ ├── virsh_guestinfo.py │ │ ├── virsh_guestvcpus.py │ │ ├── virsh_hypervisor_cpu_baseline.py │ │ ├── virsh_hypervisor_cpu_compare.py │ │ ├── virsh_iothreadadd.py │ │ ├── virsh_iothreaddel.py │ │ ├── virsh_iothreadinfo.py │ │ ├── virsh_iothreadpin.py │ │ ├── virsh_managedsave.py │ │ ├── virsh_managedsave_extra.py │ │ ├── virsh_managedsave_restore.py │ │ ├── virsh_managedsave_special_name.py │ │ ├── virsh_managedsave_undefine.py │ │ ├── virsh_memtune.py │ │ ├── virsh_metadata.py │ │ ├── virsh_migrate.py │ │ ├── virsh_migrate_compcache.py │ │ ├── virsh_migrate_copy_storage.py │ │ ├── virsh_migrate_multi_vms.py │ │ ├── virsh_migrate_option_mix.py │ │ ├── virsh_migrate_set_get_speed.py │ │ ├── virsh_migrate_setmaxdowntime.py │ │ ├── virsh_migrate_stress.py │ │ ├── virsh_migrate_virtio_scsi.py │ │ ├── virsh_migration.py │ │ ├── virsh_numatune.py │ │ ├── virsh_qemu_agent_command.py │ │ ├── virsh_qemu_agent_command_fs.py │ │ ├── virsh_qemu_attach.py │ │ ├── virsh_qemu_monitor_blockjob.py │ │ ├── virsh_qemu_monitor_command.py │ │ ├── virsh_reboot.py │ │ ├── virsh_reset.py │ │ ├── virsh_restore.py │ │ ├── virsh_resume.py │ │ ├── virsh_save.py │ │ ├── virsh_save_image_define.py │ │ ├── virsh_save_image_edit.py │ │ ├── virsh_schedinfo_qemu_posix.py │ │ ├── virsh_schedinfo_xen_credit.py │ │ ├── virsh_screenshot.py │ │ ├── virsh_sendkey.py │ │ ├── virsh_set_get_user_sshkeys.py │ │ ├── virsh_set_user_password.py │ │ ├── virsh_setmaxmem.py │ │ ├── virsh_setvcpu.py │ │ ├── virsh_setvcpus.py │ │ ├── virsh_shutdown.py │ │ ├── virsh_sosreport.py │ │ ├── virsh_start.py │ │ ├── virsh_suspend.py │ │ ├── virsh_ttyconsole.py │ │ ├── virsh_undefine.py │ │ ├── virsh_update_device.py │ │ ├── virsh_update_device_matrix.py │ │ ├── virsh_vcpucount.py │ │ ├── virsh_vcpuinfo.py │ │ ├── virsh_vcpupin.py │ │ └── virsh_vncdisplay.py │ ├── filter │ │ ├── __init__.py │ │ ├── virsh_nwfilter_define.py │ │ ├── virsh_nwfilter_dumpxml.py │ │ ├── virsh_nwfilter_edit.py │ │ ├── virsh_nwfilter_list.py │ │ └── virsh_nwfilter_undefine.py │ ├── host │ │ ├── __init__.py │ │ ├── virsh_capabilities.py │ │ ├── virsh_cpu_models.py │ │ ├── virsh_deprecate_api.py │ │ ├── virsh_domcapabilities.py │ │ ├── virsh_freecell.py │ │ ├── virsh_freepages.py │ │ ├── virsh_hostname.py │ │ ├── virsh_maxvcpus.py │ │ ├── virsh_node_memtune.py │ │ ├── virsh_nodecpumap.py │ │ ├── virsh_nodecpustats.py │ │ ├── virsh_nodeinfo.py │ │ ├── virsh_nodememstats.py │ │ ├── virsh_nodesuspend.py │ │ ├── virsh_sysinfo.py │ │ ├── virsh_uri.py │ │ └── virsh_version.py │ ├── interface │ │ ├── __init__.py │ │ ├── virsh_iface.py │ │ ├── virsh_iface_bridge.py │ │ ├── virsh_iface_edit.py │ │ ├── virsh_iface_list.py │ │ └── virsh_iface_trans.py │ ├── monitor │ │ ├── __init__.py │ │ ├── virsh_backing_chain_domblkinfo.py │ │ ├── virsh_domblkinfo.py │ │ ├── virsh_domblkstat.py │ │ ├── virsh_domifstat.py │ │ ├── virsh_dominfo.py │ │ ├── virsh_dommemstat.py │ │ ├── virsh_domstate.py │ │ ├── virsh_domstats.py │ │ ├── virsh_list.py │ │ └── virsh_perf.py │ ├── network │ │ ├── __init__.py │ │ ├── virsh_net_autostart.py │ │ ├── virsh_net_create.py │ │ ├── virsh_net_define_undefine.py │ │ ├── virsh_net_destroy.py │ │ ├── virsh_net_dhcp_leases.py │ │ ├── virsh_net_dumpxml.py │ │ ├── virsh_net_edit.py │ │ ├── virsh_net_event.py │ │ ├── virsh_net_info.py │ │ ├── virsh_net_list.py │ │ ├── virsh_net_name.py │ │ ├── virsh_net_start.py │ │ ├── virsh_net_update.py │ │ └── virsh_net_uuid.py │ ├── nodedev │ │ ├── __init__.py │ │ ├── crypto_nodedev_create_destroy.py │ │ ├── virsh_nodedev_create_destroy.py │ │ ├── virsh_nodedev_detach_reattach.py │ │ ├── virsh_nodedev_dumpxml.py │ │ ├── virsh_nodedev_dumpxml_chain.py │ │ ├── virsh_nodedev_event.py │ │ ├── virsh_nodedev_list.py │ │ ├── virsh_nodedev_persistence_mdev.py │ │ └── virsh_nodedev_reset.py │ ├── pool │ │ ├── __init__.py │ │ ├── virsh_find_storage_pool_sources.py │ │ ├── virsh_find_storage_pool_sources_as.py │ │ ├── virsh_pool_acl.py │ │ ├── virsh_pool_auth.py │ │ ├── virsh_pool_capabilities.py │ │ ├── virsh_pool_create.py │ │ ├── virsh_pool_create_as.py │ │ └── virsh_pool_edit.py │ ├── secret │ │ ├── __init__.py │ │ ├── virsh_secret_define_undefine.py │ │ ├── virsh_secret_dumpxml.py │ │ ├── virsh_secret_list.py │ │ └── virsh_secret_set_get.py │ ├── snapshot │ │ ├── __init__.py │ │ ├── virsh_snapshot.py │ │ ├── virsh_snapshot_create_as.py │ │ ├── virsh_snapshot_disk.py │ │ ├── virsh_snapshot_dumpxml.py │ │ ├── virsh_snapshot_edit.py │ │ ├── virsh_snapshot_mode.py │ │ └── virsh_snapshot_par_cur.py │ ├── virsh_connect.py │ ├── virsh_itself.py │ ├── virsh_qemu_cmdline_core.py │ └── volume │ │ ├── __init__.py │ │ ├── virsh_vol_clone_wipe.py │ │ ├── virsh_vol_create.py │ │ ├── virsh_vol_create_from.py │ │ ├── virsh_vol_download_upload.py │ │ ├── virsh_vol_resize.py │ │ ├── virsh_volume.py │ │ ├── virsh_volume_application.py │ │ └── vol_concurrent.py │ ├── virt_admin │ ├── management │ │ ├── virt_admin_client_disconnect.py │ │ ├── virt_admin_server_clients_set.py │ │ ├── virt_admin_server_threadpool_set.py │ │ └── virt_admin_server_update_tls.py │ ├── monitor │ │ ├── virt_admin_srv_clients_info.py │ │ ├── virt_admin_srv_list.py │ │ └── virt_admin_srv_threadpool_info.py │ └── virt_admin_itself.py │ ├── virt_cmd │ ├── virt_clone.py │ ├── virt_top.py │ ├── virt_what.py │ └── virt_xml_validate.py │ ├── virtio │ └── virtio_page_per_vq.py │ ├── virtio_transitional │ ├── __init__.py │ ├── virtio_transitional_base.py │ ├── virtio_transitional_blk.py │ ├── virtio_transitional_blk_negative.py │ ├── virtio_transitional_mem_balloon.py │ ├── virtio_transitional_nic.py │ ├── virtio_transitional_rng.py │ ├── virtio_transitional_serial.py │ └── virtio_transitional_vsock.py │ ├── virtiofs │ ├── virtiofs.py │ └── virtiofs_unprivileged.py │ ├── virtual_device │ ├── input_devices.py │ ├── input_devices_plug_unplug.py │ ├── sound_device.py │ ├── tpm_device.py │ ├── video_devices.py │ ├── vsock.py │ └── watchdog.py │ ├── virtual_disks │ ├── at_dt_iscsi_disk.py │ ├── startup_policy.py │ ├── vhostvdpa_block_backend_type │ │ ├── blockcopy_vhostvdpa_backend_disk.py │ │ ├── define_start_vm_with_multi_vhostvdpa_backend_disks.py │ │ ├── define_start_vm_with_vhostvdpa_backend_disk.py │ │ ├── define_start_vms_with_same_vhostvdpa_backend_disk.py │ │ ├── hotplug_vhostvdpa_backend_disk.py │ │ ├── nodedev_vhostvdpa_disk.py │ │ └── vm_lifecycle_vhostvdpa_backend_disk.py │ ├── virtual_disks_alias.py │ ├── virtual_disks_audit_log_disk.py │ ├── virtual_disks_backingstore_disk.py │ ├── virtual_disks_blockresize.py │ ├── virtual_disks_ccw_addr.py │ ├── virtual_disks_cdrom_device.py │ ├── virtual_disks_ceph.py │ ├── virtual_disks_dasd.py │ ├── virtual_disks_datastore.py │ ├── virtual_disks_device_mapper.py │ ├── virtual_disks_discard_granularity.py │ ├── virtual_disks_discard_no_unref.py │ ├── virtual_disks_encryption.py │ ├── virtual_disks_filedescriptor.py │ ├── virtual_disks_geometry.py │ ├── virtual_disks_gluster.py │ ├── virtual_disks_https.py │ ├── virtual_disks_io_tuning.py │ ├── virtual_disks_iothread.py │ ├── virtual_disks_iothreads_queue.py │ ├── virtual_disks_iscsi.py │ ├── virtual_disks_luks.py │ ├── virtual_disks_metadatacache.py │ ├── virtual_disks_multiattributes.py │ ├── virtual_disks_multidisks.py │ ├── virtual_disks_multipath.py │ ├── virtual_disks_multivms.py │ ├── virtual_disks_nbd.py │ ├── virtual_disks_nvme.py │ ├── virtual_disks_optional_startuppolicy.py │ ├── virtual_disks_rerror_policy.py │ ├── virtual_disks_rotation_rate.py │ ├── virtual_disks_scsi3_persistent_reservation.py │ ├── virtual_disks_slice_operation.py │ ├── virtual_disks_snapshot_blockresize.py │ ├── virtual_disks_ssh.py │ ├── virtual_disks_transient_disk.py │ ├── virtual_disks_usb.py │ ├── virtual_disks_usb_startuppolicy.py │ └── virtual_disks_vhostuser.py │ ├── virtual_interface │ ├── hotplug_mem.py │ ├── interface_update_device_negative.py │ ├── interface_update_device_offline_domain.py │ ├── interface_update_device_running_domain.py │ └── vdpa_attach_duplicated_devices.py │ ├── virtual_network │ ├── address │ │ └── virtual_network_address_tftp.py │ ├── attach_detach_device │ │ ├── attach_iface_with_boot_order.py │ │ ├── attach_mtu_malformed.py │ │ └── attach_user_type_iface.py │ ├── connectivity │ │ ├── connectivity_check_bridge_interface.py │ │ ├── connectivity_check_bridge_interface_unprivileged.py │ │ ├── connectivity_check_direct_interface.py │ │ ├── connectivity_check_ethernet_interface.py │ │ ├── connectivity_check_mcast_interface.py │ │ ├── connectivity_check_network_interface.py │ │ ├── connectivity_check_tcp_tunnel_interface.py │ │ ├── connectivity_check_udp_tunnel_interface.py │ │ ├── connectivity_check_user_interface.py │ │ ├── connectivity_check_vdpa_interface.py │ │ └── netperf_nat_interface.py │ ├── domifaddr.py │ ├── driver │ │ ├── check_vhost_cpu_affinity_with_emulatorpin.py │ │ └── rx_tx_queue_size.py │ ├── elements_and_attributes │ │ ├── attribute_port_isolated.py │ │ ├── element_coalesce.py │ │ ├── element_mac_specific_addr.py │ │ ├── element_model.py │ │ └── element_sndbuf.py │ ├── hotplug │ │ ├── attach_detach_device │ │ │ ├── hotplug_hotunplug_vdpa_interface.py │ │ │ └── rollback_vdpafd_on_hotplug_failure.py │ │ └── attach_detach_interface │ │ │ └── attach_interface_with_model.py │ ├── iface_attach_detach.py │ ├── iface_bridge.py │ ├── iface_coalesce.py │ ├── iface_hotplug.py │ ├── iface_network.py │ ├── iface_nss.py │ ├── iface_options.py │ ├── iface_ovs.py │ ├── iface_rename.py │ ├── iface_stat.py │ ├── iface_target.py │ ├── iface_unprivileged.py │ ├── iface_update.py │ ├── lifecycle │ │ ├── lifecycle_vdpa_interface.py │ │ └── restart_service.py │ ├── link_state │ │ └── link_state_model_type.py │ ├── locked_memory_vdpa │ │ ├── hotplug_mem_to_vm_with_multiple_vdpa_interfaces.py │ │ ├── hotplug_mem_to_vm_with_vdpa.py │ │ ├── mem_lock_limit_multiple_mixed_interfaces.py │ │ └── mem_lock_limit_multiple_vdpa_interfaces.py │ ├── mtu.py │ ├── network │ │ ├── elements_and_attributes │ │ │ └── network_static_route.py │ │ └── net_update_dns.py │ ├── network_misc.py │ ├── nodedev │ │ └── nodedev_vdpa_interface.py │ ├── passt │ │ ├── passt_attach_detach.py │ │ ├── passt_connectivity_between_2vms.py │ │ ├── passt_function.py │ │ ├── passt_lifecycle.py │ │ ├── passt_negative_setting.py │ │ ├── passt_reconnect.py │ │ └── passt_transfer_file.py │ ├── qos │ │ ├── check_actual_network_throughput.py │ │ ├── check_actual_network_throughput_direct.py │ │ ├── check_bandwidth_by_domiftune.py │ │ ├── check_qos_floor.py │ │ └── test_bandwidth_boundry.py │ ├── resolve_vm_hostname_by_resolvectl.py │ ├── start_vm_with_duplicate_target_dev_name.py │ ├── update_device │ │ ├── unsupported_live_update.py │ │ ├── update_device_coalesce.py │ │ ├── update_driver_non_virtio.py │ │ ├── update_iface_link_state.py │ │ ├── update_iface_portgroup.py │ │ ├── update_iface_qos.py │ │ ├── update_iface_qos_invalid.py │ │ ├── update_iface_source.py │ │ ├── update_iface_trustGuestRxFilters.py │ │ ├── update_iface_type_live.py │ │ ├── update_iface_with_identifier.py │ │ ├── update_iface_with_options.py │ │ ├── update_iface_with_unchangable.py │ │ └── update_port_isolated.py │ └── virtual_network_multivms.py │ ├── vm_boot_with_kernel_param.py │ ├── vm_create_destroy_concurrently.py │ └── vm_start_destroy_repeatedly.py ├── lvsb ├── control └── tests │ ├── cfg │ ├── lvsb_complex_options.cfg │ ├── lvsb_date.cfg │ ├── lvsb_network_options.cfg │ └── lvsb_security_options.cfg │ └── src │ ├── lvsb_complex_options.py │ ├── lvsb_date.py │ ├── lvsb_network_options.py │ └── lvsb_security_options.py ├── provider ├── __init__.py ├── backingchain │ ├── blockcommand_base.py │ └── check_functions.py ├── bootc_image_builder │ ├── aws_utils.py │ └── bootc_image_build_utils.py ├── chardev │ ├── chardev_base.py │ └── check_points.py ├── cpu.py ├── gpu │ ├── __init__.py │ └── gpu_base.py ├── guest_os_booting │ └── guest_os_booting_base.py ├── interface │ ├── check_points.py │ ├── interface_base.py │ └── vdpa_base.py ├── libvirt_version.py ├── libvirtd │ └── libvirtd_base.py ├── memory │ └── memory_base.py ├── migration │ ├── base_steps.py │ ├── migration_base.py │ └── migration_vtpm.py ├── numa │ └── numa_base.py ├── save │ └── save_base.py ├── security │ └── security_base.py ├── snapshot │ └── snapshot_base.py ├── sriov │ ├── __init__.py │ ├── check_points.py │ └── sriov_base.py ├── usb │ └── usb_base.py ├── v2v_vmcheck_helper.py ├── vfio │ ├── __init__.py │ ├── ap.py │ ├── ccw.py │ └── mdev_handlers.py ├── viommu │ ├── __init__.py │ └── viommu_base.py ├── virtio_rng │ └── check_points.py ├── virtual_disk │ └── disk_base.py └── virtual_network │ ├── network_base.py │ ├── passt.py │ └── tftpboot.py ├── requirements-travis.txt ├── requirements.txt ├── spell.ignore ├── tp-libvirt_review_comment_summary.rst ├── v2v ├── cfg │ └── build.cfg └── tests │ ├── cfg │ ├── convert_from_file.cfg │ ├── convert_vm_to_libvirt.cfg │ ├── convert_vm_to_ovirt.cfg │ ├── function_test_esx.cfg │ ├── function_test_xen.cfg │ ├── libnbd │ │ ├── libnbd.cfg │ │ └── nbdfuse.cfg │ ├── nbdkit │ │ └── nbdkit.cfg │ ├── specific_kvm.cfg │ └── v2v_options.cfg │ └── src │ ├── convert_from_file.py │ ├── convert_vm_to_libvirt.py │ ├── convert_vm_to_ovirt.py │ ├── function_test_esx.py │ ├── function_test_xen.py │ ├── install.py │ ├── libnbd │ ├── libnbd.py │ └── nbdfuse.py │ ├── nbdkit │ └── nbdkit.py │ ├── specific_kvm.py │ └── v2v_options.py └── virttools ├── README.md └── tests ├── cfg ├── bootc_image_builder │ ├── bootc_disk_image_build.cfg │ └── bootc_disk_image_install.cfg └── virt_install │ ├── blk_installation.cfg │ ├── hostdev_mdev.cfg │ ├── kernel_cmdline.cfg │ ├── pxe_installation.cfg │ └── vfio_installation.cfg └── src ├── bootc_image_builder ├── bootc_disk_image_build.py └── bootc_disk_image_install.py └── virt_install ├── blk_installation.py ├── hostdev_mdev.py ├── kernel_cmdline.py ├── pxe_installation.py └── vfio_installation.py /.gitmodules: -------------------------------------------------------------------------------- 1 | [submodule "tools/codespell"] 2 | path = tools/codespell 3 | url = git://github.com/lucasdemarchi/codespell.git 4 | -------------------------------------------------------------------------------- /.pack_exclude: -------------------------------------------------------------------------------- 1 | ./.git 2 | ./tmp/* 3 | ./shared/data/* 4 | ./logs/* 5 | *.pyc 6 | -------------------------------------------------------------------------------- /MAINTAINERS: -------------------------------------------------------------------------------- 1 | tp-libvirt maintainers 2 | ====================== 3 | 4 | The intention of this file is not to establish who owns what portions of the 5 | code base, but to provide a set of names that developers can consult when they 6 | have a question about a particular subset and also to provide a set of names 7 | you can look for on github for any pull requests you might want to get approved. 8 | 9 | In general, if you have a question, you should send an email to the Virt Test 10 | development mailing list and not any specific individual privately. 11 | 12 | L: Avocado-devel 13 | M: Guannan Wayne Sun 14 | M: Satheesh Rajendran 15 | M: Balamuruhan S 16 | M: Kyla Zhang 17 | M: Chunfu Wen 18 | -------------------------------------------------------------------------------- /contributor_reviewer_maintainer_guidelines.rst: -------------------------------------------------------------------------------- 1 | Please follow up the same best practices as avocado-vt: 2 | https://github.com/avocado-framework/avocado-vt/blob/master/PR_Review_And_Contribute_Practices.txt 3 | -------------------------------------------------------------------------------- /libguestfs/cfg/build.cfg: -------------------------------------------------------------------------------- 1 | # Copy this file to build.cfg and edit it. 2 | 3 | 4 | variants: 5 | - build: 6 | type = install 7 | # TODO: add auto build for libguestfs-tools. 8 | # Just for reminder here. :) 9 | 10 | 11 | # Note that, please comment out the 'no build' line to enable the build test 12 | # no build 13 | -------------------------------------------------------------------------------- /libguestfs/deps/tarball/Augeas.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/Augeas.tgz -------------------------------------------------------------------------------- /libguestfs/deps/tarball/file_dir.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/file_dir.tgz -------------------------------------------------------------------------------- /libguestfs/deps/tarball/fs_mount.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/fs_mount.tgz -------------------------------------------------------------------------------- /libguestfs/deps/tarball/hivex.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/hivex.tgz -------------------------------------------------------------------------------- /libguestfs/deps/tarball/inotify.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/inotify.tgz -------------------------------------------------------------------------------- /libguestfs/deps/tarball/misc.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/misc.tgz -------------------------------------------------------------------------------- /libguestfs/deps/tarball/selinux.tgz: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libguestfs/deps/tarball/selinux.tgz -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestfs_block_operations.cfg: -------------------------------------------------------------------------------- 1 | - guestfs_block_operations: 2 | type = guestfs_block_operations 3 | start_vm = "no" 4 | # Define a vm with new name for easier cleanup 5 | gf_updated_new_vm = "${main_vm}_gftemp" 6 | gf_updated_target_dev = "vdb" 7 | gf_additional_device = "/dev/${gf_updated_target_dev}" 8 | gf_updated_device_size = "512M" 9 | gf_mountpoint = "/mnt" 10 | variants: 11 | - blockdev_info: 12 | gf_block_operation = "blockdev_info" 13 | - blockdev_ro: 14 | gf_block_operation = "blockdev_ro" 15 | - blockdev_rw: 16 | gf_block_operation = "blockdev_rw" 17 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestfs_file_operations.cfg: -------------------------------------------------------------------------------- 1 | - guestfs_file_operations: 2 | type = guestfs_file_operations 3 | start_vm = "no" 4 | variants: 5 | - tar_in: 6 | gf_file_operation = "tar_in" 7 | gf_temp_file = "/tmp/test_tar_in" 8 | - tar_out: 9 | gf_file_operation = "tar_out" 10 | gf_temp_file = "/tmp/test_tar_out" 11 | - copy_in: 12 | gf_file_operation = "copy_in" 13 | gf_temp_file = "/tmp/test_copy_in" 14 | - copy_out: 15 | gf_file_operation = "copy_out" 16 | gf_temp_file = "/tmp/test_copy_out" 17 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestfs_inspect_operations.cfg: -------------------------------------------------------------------------------- 1 | - guestfs_inspect_operations: 2 | type = guestfs_inspect_operations 3 | start_vm = "no" 4 | variants: 5 | - inspect_get: 6 | gf_inspect_operation = "inspect_get" 7 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestfs_list_operations.cfg: -------------------------------------------------------------------------------- 1 | - guestfs_list_operations: 2 | type = guestfs_list_operations 3 | start_vm = "no" 4 | inspector = True 5 | variants: 6 | - list_with_mount: 7 | list_operation = "list_with_mount" 8 | - list_without_mount: 9 | list_operation = "list_without_mount" 10 | - list_without_launch: 11 | list_operation = "list_without_launch" 12 | - list_with_inspector: 13 | list_operation = "list_with_inspector" 14 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestfs_operated_disk.cfg: -------------------------------------------------------------------------------- 1 | - guestfs_operated_disk: 2 | type = guestfs_operated_disk 3 | start_vm = "no" 4 | # Disk operation may spend too much time 5 | # Reset it according your need 6 | timeout = 360 7 | variants: 8 | - cloned_vm: 9 | disk_operation = "cloned_vm" 10 | cloned_mac = "CREATED" 11 | new_filesystem_path = "" 12 | - sparsified_vm: 13 | disk_operation = "sparsified_vm" 14 | - resized_vm: 15 | disk_operation = "resized_vm" 16 | resize_part_num = "1" 17 | # This is expanded size of resized partition 18 | resized_size = "+1G" 19 | # This is expand size of whole disk 20 | increased_size = "+10G" 21 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestfs_part_operations.cfg: -------------------------------------------------------------------------------- 1 | - guestfs_part_operations: 2 | type = guestfs_part_operations 3 | start_vm = "no" 4 | # Define a vm with new name for easier cleanup 5 | gf_updated_new_vm = "${main_vm}_gftemp" 6 | gf_updated_target_dev = "vdb" 7 | gf_additional_device = "/dev/${gf_updated_target_dev}" 8 | gf_updated_device_size = "512M" 9 | gf_mountpoint = "/mnt" 10 | variants: 11 | - formatted_part: 12 | gf_part_operation = "formatted_part" 13 | - unformatted_part: 14 | gf_part_operation = "unformatted_part" 15 | - formatted_disk: 16 | gf_part_operation = "formatted_disk" 17 | - partition_info: 18 | gf_part_operation = "partition_info" 19 | - fscked_partition: 20 | gf_part_operation = "fscked_partition" 21 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/guestmount.cfg: -------------------------------------------------------------------------------- 1 | - guestmount: 2 | type = guestmount 3 | start_vm = "no" 4 | gm_inspector = "no" 5 | gm_mount = "no" 6 | gm_readonly = "no" 7 | gm_tempfile = "/home/gm_tmp" 8 | gm_mountpoint = "/mountpoint" 9 | variants: 10 | - normal_test: 11 | status_error = "no" 12 | variants: 13 | - with_inspector: 14 | gm_inspector = "yes" 15 | - with_mount: 16 | gm_mount = "yes" 17 | - error_test: 18 | status_error = "yes" 19 | variants: 20 | - with_readonly: 21 | gm_readonly = "yes" 22 | - without_mount_filesystems: 23 | - with_vm_running: 24 | start_vm = "yes" 25 | gm_inspector = "yes" 26 | - not_exist_vm: 27 | gm_vm_ref = "NOT_EXIST_VM" 28 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/virt_file_operations.cfg: -------------------------------------------------------------------------------- 1 | - virt_file_operations: 2 | type = virt_file_operations 3 | start_vm = "no" 4 | variants: 5 | - tar_in: 6 | vt_file_operation = "virt_tar_in" 7 | vt_temp_file = "/tmp/test_virt_tar_in" 8 | - tar_out: 9 | vt_file_operation = "virt_tar_out" 10 | vt_temp_file = "/tmp/test_virt_tar_out" 11 | - copy_in: 12 | vt_file_operation = "virt_copy_in" 13 | vt_temp_file = "/tmp/test_virt_copy_in" 14 | - copy_out: 15 | vt_file_operation = "virt_copy_out" 16 | vt_temp_file = "/tmp/test_virt_copy_out" 17 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/virt_inspect_operations.cfg: -------------------------------------------------------------------------------- 1 | - virt_inspect_operations: 2 | type = virt_inspect_operations 3 | start_vm = "no" 4 | is_redhat = "no" 5 | variants: 6 | - inspect_get: 7 | vt_inspect_operation = "inspect_get" 8 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/virt_list_operations.cfg: -------------------------------------------------------------------------------- 1 | - virt_list_operations: 2 | type = virt_list_operations 3 | start_vm = "no" 4 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/virt_part_operations.cfg: -------------------------------------------------------------------------------- 1 | - virt_part_operations: 2 | type = virt_part_operations 3 | start_vm = "no" 4 | # Define a vm with new name for easier cleanup 5 | # For history reason, they have prefix 'gf'. :D 6 | # It should be fixed in future. 7 | gf_updated_new_vm = "${main_vm}_vttemp" 8 | gf_updated_target_dev = "vdb" 9 | gf_additional_device = "/dev/${gf_updated_target_dev}" 10 | gf_updated_device_size = "512M" 11 | gm_inspector = "no" 12 | vt_mountpoint = "/mnt" 13 | variants: 14 | - formatted_part: 15 | vt_part_operation = "formatted_part" 16 | - unformatted_part: 17 | vt_part_operation = "unformatted_part" 18 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/virt_sysprep.cfg: -------------------------------------------------------------------------------- 1 | - virt_sysprep: 2 | type = virt_sysprep 3 | sysprep_hostname = "sysprep" 4 | start_vm = "no" 5 | kill_vm = "yes" 6 | kill_vm_before_start = "yes" 7 | # Specify this parameter for other filesystem type(ext3,ext2) if necessary 8 | sysprep_file_system = "ext4" 9 | variants: 10 | - guest_target: 11 | sysprep_target = "guest" 12 | - image_target: 13 | sysprep_target = "image" 14 | variants: 15 | - virt_clone: 16 | sysprep_type = "clone" 17 | - virt_resize: 18 | sysprep_type = "resize" 19 | - virt_sparsify: 20 | only qcow2 21 | sysprep_type = "sparsify" 22 | -------------------------------------------------------------------------------- /libguestfs/tests/cfg/virt_volume_operations.cfg: -------------------------------------------------------------------------------- 1 | - virt_volume_operations: 2 | type = virt_volume_operations 3 | start_vm = "no" 4 | # Define a vm with new name for easier cleanup 5 | gf_updated_new_vm = "${main_vm}_vttemp" 6 | gf_updated_target_dev = "vdb" 7 | gf_additional_device = "/dev/${gf_updated_target_dev}" 8 | gf_updated_device_size = "512M" 9 | gm_inspector = "no" 10 | vt_mountpoint = "/mnt" 11 | variants: 12 | - created_volume_group: 13 | vt_volume_operation = "created_volume_group" 14 | - created_volume: 15 | vt_volume_operation = "created_volume" 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcommit/blockcommit_with_async_option.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcommit.async_option: 2 | type = blockcommit_with_async_option 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | snap_num = 4 6 | variants case: 7 | - async: 8 | base_image_suffix = 1 9 | top_option = " --top ${target_disk}" 10 | expected_chain = "1>base" 11 | commit_options = "--active --wait --verbose --async" 12 | - async_timeout: 13 | base_image_suffix = 1 14 | expected_chain = "4>3>2>1>base" 15 | commit_options = " --active --wait --verbose --bytes 1 --async --timeout 1" 16 | event_cmd = " qemu-monitor-event %s --loop" 17 | expected_job = "BLOCK_JOB_CANCELLED" 18 | variants: 19 | - file_disk: 20 | disk_type = "file" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcommit/blockcommit_with_keep_overlay.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcommit.keep_overlay: 2 | type = blockcommit_with_keep_overlay 3 | start_vm = "yes" 4 | commit_options = " --keep-overlay" 5 | target_disk = "vdb" 6 | disk_type = "file" 7 | snap_num = 4 8 | disk_dict = {"type_name":"${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type":"qcow2"}} 9 | variants: 10 | - positive_test: 11 | variants test_scenario: 12 | - active: 13 | base_image_suffix = 3 14 | expected_chain = "4>3>2>1>base" 15 | - negative_test: 16 | variants test_scenario: 17 | - inactive: 18 | top_image_suffix = 3 19 | err_msg = "invalid argument: active commit requested" 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcommit_basic_function.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcommit.basic_function: 2 | type = blockcommit_basic_function 3 | start_vm = 'no' 4 | status_error = 'no' 5 | variants: 6 | - block_disk: 7 | disk_type = 'block' 8 | driver_type = 'raw' 9 | variants: 10 | - commit_tb: 11 | case_name = 'commit_top_to_base' 12 | check_func = 'base_top' 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcopy.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcopy: 2 | type = blockcopy 3 | variants: 4 | - positive_test: 5 | variants case: 6 | - reuse_external: 7 | start_vm = 'yes' 8 | - custom_cluster_size: 9 | func_supported_since_libvirt_ver = (6, 10, 1) 10 | unsupported_err_msg = "This libvirt version doesn't support feature of custom cluster size" 11 | start_vm = 'yes' 12 | image_format = 'qcow2' 13 | image_size = '100M' 14 | image_cluster_size = '1024' 15 | source_image_name = 'source_image' 16 | target_image_name = 'target_image' 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcopy/blockcopy_with_async_option.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcopy.async_option: 2 | type = blockcopy_with_async_option 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | variants case: 6 | - async: 7 | blockcopy_options = " --wait --verbose --async --transient-job --pivot" 8 | expected_chain = "copy_file" 9 | - async_timeout: 10 | blockcopy_options = " --wait --verbose --async --timeout 1 --bytes 1 --transient-job" 11 | event_cmd = " qemu-monitor-event {} --loop" 12 | expected_job = "BLOCK_JOB_CANCELLED" 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcopy/blockcopy_with_disk_driver_attributes.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcopy.disk.driver_attr: 2 | type = blockcopy_with_disk_driver_attributes 3 | target_disk = "vda" 4 | variants: 5 | - metadata_cache: 6 | func_supported_since_libvirt_ver = (7, 0, 0) 7 | image_path = "/tmp/backingchain_copy.img" 8 | unit = "bytes" 9 | max_size = 1024 10 | disk_dict = {"type_name":"file", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver_metadatacache":{"max_size":${max_size}, "max_size_unit":"${unit}"}, "driver": {"name": "qemu", "type":"qcow2"}, 'source': {'attrs': {'file': '${image_path}','index':'3'}}} 11 | blockcopy_option = " --xml {} --transient-job --wait --verbose --pivot" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcopy/blockcopy_with_syncwrites_option.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcopy.synchronous_writes: 2 | type = blockcopy_with_syncwrites_option 3 | variants: 4 | - positive_test: 5 | func_supported_since_libvirt_ver = (8, 0, 0) 6 | func_supported_since_qemu_kvm_ver = (3, 0, 0) 7 | target_disk = "vdb" 8 | disk_type = "file" 9 | disk_dict = {"type_name":"${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type":"qcow2"}} 10 | blockcopy_option = " --synchronous-writes --wait --verbose --transient-job" 11 | abort_option = " --pivot" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockcopy/blockcopy_with_zero_length_disk.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockcopy.zero_length_disk: 2 | type = blockcopy_with_zero_length_disk 3 | start_vm = "no" 4 | target_disk = "vdb" 5 | blockcopy_option = " --wait --verbose" 6 | blockcopy_msg = "Now in mirroring phase" 7 | done_job_msg = "Block Copy: [100 %]" 8 | no_job_msg = "No current block job" 9 | variants: 10 | - with_transient_job: 11 | disk_type = "file" 12 | disk_size = "1G" 13 | disk_image_format = "raw" 14 | disk_dict = {"type_name":"${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type":"${disk_image_format}"}} 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockjob/blockjob_pivot_after_irregular_operations.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockjob.pivot: 2 | type = blockjob_pivot_after_irregular_operations 3 | start_vm = 'yes' 4 | target_disk = 'vda' 5 | pivot_option = " --pivot" 6 | abort_option = " --abort" 7 | variants test_scenario: 8 | - before_finish: 9 | blockcopy_options = "blockcopy %s %s %s --transient-job --bytes 200" 10 | err_msg = "not ready for pivot yet" 11 | - delete_copy_file: 12 | blockcopy_options = " --transient-job --wait --verbose " 13 | err_msg = "No such file or directory" 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockjob/blockjob_with_async_option.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockjob.async: 2 | type = blockjob_with_async_option 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | blockcopy_options = " --wait --verbose --transient-job --bandwidth 1" 6 | variants: 7 | - async_option: 8 | blockjob_options = " --async" 9 | event_cmd = "qemu-monitor-event %s --loop" 10 | expected_event = "BLOCK_JOB_CANCELLED" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockjob/blockjob_with_bandwidth_option.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockjob.bandwidth: 2 | type = blockjob_with_bandwidth_option 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | variants: 6 | - value_updated: 7 | update_times = 3 8 | option_1 = " --bandwidth 2" 9 | option_2 = " --bandwidth 3" 10 | option_3 = " --bytes 1000" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockjob/blockjob_with_raw_option.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockjob.raw: 2 | type = blockjob_with_raw_option 3 | start_vm = 'yes' 4 | disk = 'vda' 5 | variants: 6 | - raw_list: 7 | option_value = ' --raw' 8 | case_name = 'raw_list' 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockjob_options.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockjob.options: 2 | type = blockjob_options 3 | start_vm = 'yes' 4 | status_error = 'no' 5 | disk = 'vda' 6 | variants: 7 | - option_raw: 8 | option_value = ' --raw' 9 | case_name = 'blockjob_raw' 10 | - option_async: 11 | option_value = ' --async' 12 | case_name = 'blockjob_async' 13 | bandwidth = 1 14 | event_cmd = " qemu-monitor-event %s --loop" 15 | expected_event = "BLOCK_JOB_CANCELLED" 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockpull/blockpull_with_async.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockpull.async_option: 2 | type = blockpull_with_async 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | snap_num = 4 6 | variants case: 7 | - async_timeout: 8 | base_option = " --base ${target_disk}[2]" 9 | pull_options = " --wait --verbose --bytes 1 --async --timeout 1" 10 | event_cmd = "qemu-monitor-event {} --loop" 11 | expected_job = "BLOCK_JOB_CANCELLED" 12 | variants: 13 | - file_disk: 14 | disk_type = "file" 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/blockresize.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockresize: 2 | type = blockresize 3 | start_vm = 'yes' 4 | status_error = 'no' 5 | variants: 6 | - positive_test: 7 | status_error = "no" 8 | variants: 9 | - raw_image: 10 | driver_type = 'raw' 11 | case_name = 'raw_disk_blockresize' 12 | attach_disk_extra_options = ' --subdriver raw' 13 | new_disk = 'vdd' 14 | variants: 15 | - size_g: 16 | expected_block_size = '15g' 17 | - size_b: 18 | expected_block_size = '1024b' 19 | - size_mb: 20 | expected_block_size = '1024m' 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/domblkthreshold.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.domblkthreshold: 2 | type = domblkthreshold 3 | start_vm = 'yes' 4 | variants: 5 | - positive_test: 6 | variants: 7 | - inactivate_layer: 8 | case_name = 'domblkthreshold_inactivate_layer' 9 | domblk_threshold = '1' 10 | commit_options = '--active' 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/event_checking_test/commit_pull_copy_event.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.event_checking.commit_pull_copy: 2 | type = commit_pull_copy_event 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | block_options = " --wait --verbose" 6 | event_cmd = "event %s --all --loop" 7 | snap_num = "1" 8 | variants block_cmd: 9 | - blockcommit: 10 | special_options = " --active" 11 | expected_event = ['Active Block Commit for vda ready', 'Active Block Commit for vda completed'] 12 | - blockpull: 13 | special_options = "" 14 | expected_event = ['Block Pull for vda completed'] 15 | - blockcopy: 16 | special_options = " --transient-job" 17 | expected_event = ['Block Copy for vda ready', 'Block Copy for vda completed'] 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/hotplug_test/hot_un_plug.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.hotplug_test.hot_un_plug: 2 | type = hot_un_plug 3 | start_vm = "yes" 4 | target_disk = "vdb" 5 | common_options = " --wait --verbose" 6 | snap_num = 4 7 | snap_extra = " --diskspec vda,snapshot=no" 8 | variants block_cmd: 9 | - blockcommit: 10 | top_image_suffix = 3 11 | base_image_suffix = 2 12 | expected_chain = "4>2>1>base" 13 | - blockpull: 14 | base_image_suffix = 2 15 | expected_chain = "4>2>1>base" 16 | - blockcopy: 17 | blockcopy_option = " --shallow --transient-job --pivot" 18 | expected_chain = "copy_file>3>2>1>base" 19 | variants: 20 | - file_disk: 21 | disk_type = "file" 22 | disk_dict = {"type_name": "${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type": "qcow2"}} 23 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/lifecycle_test/check_mirror_with_restart_libvirtd.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.lifecycle_test.check_mirror_with_restart_libvirtd: 2 | type = check_mirror_with_restart_libvirtd 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | snap_num = 1 6 | variants block_cmd: 7 | - blockcommit: 8 | block_options = " --active" 9 | - blockcopy: 10 | block_options = " --transient-job --shallow" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/negative_scenario/blockjob_with_invalid_operation.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.blockjob.invalid_operation: 2 | type = blockjob_with_invalid_operation 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | variants test_scenario: 6 | - not_existing_path: 7 | variants: 8 | - disk: 9 | path = "vdx" 10 | err_msg = "error: invalid argument: disk '${path}' not found in domain" 11 | - image: 12 | path = "/var/lib/libvirt/images/xxx.img" 13 | err_msg = "error: invalid argument: disk '${path}' not found in domain" 14 | - release_job: 15 | blockcopy_option = " --reuse-external %s --transient-job --wait --verbose" 16 | blockcopy_err = "Copy failed" 17 | less_image_size = "10M" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/negative_scenario/blockpull_with_invalid_base.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.negative.blockpull.invalid_base: 2 | type = blockpull_with_invalid_base 3 | start_vm = "yes" 4 | target_disk = "vda" 5 | snap_num = 4 6 | pull_options = "--base %s --wait --verbose" 7 | error_msg = "error: invalid argument: could not find image " 8 | variants case: 9 | - active_as_base: 10 | - not_existing_path: 11 | not_exist_file = "/var/lib/libvirt/images/xxxx.img" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/negative_scenario/interrupt_blockcopy.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.negative.interrupt_blockcopy: 2 | type = interrupt_blockcopy 3 | start_vm = "no" 4 | variants: 5 | - with_transient_job: 6 | target_disk = "vda" 7 | blockcopy_options = " ${copy_path} --transient-job --wait --verbose --async" 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/repeatability_test/commit_pull_copy_after_snap.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.repeatability_test.commit_pull_copy_after_snap: 2 | type = commit_pull_copy_after_snap 3 | start_vm = "yes" 4 | common_option = " --wait --verbose" 5 | target_disk = "vdb" 6 | status_error = "no" 7 | snap_num = 1 8 | snap_extra = " --diskspec vda,snapshot=no" 9 | disk_type = "file" 10 | disk_dict = {"type_name": "${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type": "qcow2"}} 11 | variants block_cmd: 12 | - blockcommit: 13 | block_option = " --active --pivot" 14 | - blockpull: 15 | - blockcopy: 16 | copy_image = "/tmp/test.copy" 17 | block_option = " ${copy_image} --transient-job --pivot" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/backingchain/with_disk_attributes_test/commit_pull_with_disk_source_attributes.cfg: -------------------------------------------------------------------------------- 1 | - backingchain.with_disk_attributes_test.with_source_attributes: 2 | type = commit_pull_with_disk_source_attributes 3 | start_vm = no 4 | common_options = " --wait --verbose" 5 | target_disk = "vdb" 6 | disk_type = "file" 7 | snap_num = 3 8 | disk_dict = {"type_name":"${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type":"qcow2"}} 9 | variants: 10 | - with_datastore: 11 | func_supported_since_libvirt_ver = (10, 10, 0) 12 | data_file_option = " -o data_file=%s" 13 | variants block_cmd: 14 | - blockcommit: 15 | blockcommit_options = " --active --pivot" 16 | expected_chain_index = "base" 17 | - blockpull: 18 | expected_chain_index = "3" 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/bios/virsh_boot_reset_nvram.cfg: -------------------------------------------------------------------------------- 1 | - virsh.boot_reset_nvram: 2 | type = virsh_boot_reset_nvram 3 | start_vm = "no" 4 | only q35 5 | option = "--reset-nvram" 6 | os_attrs = {'os_firmware': 'efi', 'machine': 'q35', 'type': 'hvm'} 7 | func_supported_since_libvirt_ver = (8, 1, 0) 8 | err_msg = "system firmware block device\s*has invalid size" 9 | variants test_case: 10 | - start_destroyed_vm: 11 | - start_managedsaved_vm: 12 | - restore_saved_vm: 13 | output_file = 'save_file' 14 | - create_destroyed_vm: 15 | output_file = 'dumpxml_file' 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/bios/vm_boot_nvram_source.cfg: -------------------------------------------------------------------------------- 1 | - vm_boot.nvram_source: 2 | type = vm_boot_nvram_source 3 | start_vm = no 4 | variants case: 5 | - file: 6 | func_supported_since_libvirt_ver = (8, 5, 0) 7 | start_vm = 'yes' 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/chardev/log/chardev_with_log_file.cfg: -------------------------------------------------------------------------------- 1 | - chardev.log: 2 | type = chardev_with_log_file 3 | start_vm = 'no' 4 | log_file = "/var/log/libvirt/chardev_test.log" 5 | variants: 6 | - console: 7 | chardev = "console" 8 | check_cmd = "uname -r" 9 | variants console_type: 10 | - pty: 11 | chardev_type = "pty" 12 | device_dict = "{'type_name':'${chardev_type}','log': {'file': '${log_file}', 'append':'off'}}" 13 | - unix: 14 | chardev_type = 'unix' 15 | source_mode = "bind" 16 | source_path = "/tmp/foo" 17 | access_cmd = "socat stdin unix-connect:${source_path}" 18 | device_dict = "{'type_name':'${chardev_type}','log': {'file': '${log_file}', 'append':'off'}, 'sources': [{'attrs': {'path': '${source_path}', 'mode':'${source_mode}'}}]}" 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/aarch64_gic_version.cfg: -------------------------------------------------------------------------------- 1 | - aarch64_gic_version: 2 | type = aarch64_gic_version 3 | start_vm = "yes" 4 | check_gic_command_host = "grep GIC /proc/interrupts | head -1" 5 | check_gic_command_guest = "grep GIC /proc/interrupts | head -1" 6 | only aarch64 7 | variants: 8 | - gic_version_2: 9 | gic_version = '2' 10 | err_msg = "error: Failed to start domain" 11 | status_error = "yes" 12 | - gic_version_3: 13 | gic_version = '3' 14 | status_error = "no" 15 | - gic_version_host: 16 | gic_version = 'host' 17 | status_error = "no" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/diagnose_data.cfg: -------------------------------------------------------------------------------- 1 | - diagnose_data: 2 | type = diagnose_data 3 | only s390-virtio 4 | variants: 5 | - with_diag318: 6 | els = require 7 | diag318 = require 8 | check_stat = yes 9 | variants: 10 | - hotplug: 11 | final_number_of_vcpus = 248 12 | plug = hot 13 | - coldplug: 14 | final_number_of_vcpus = 248 15 | plug = cold 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/diagnose_spinlock_yield_forward.cfg: -------------------------------------------------------------------------------- 1 | - diagnose_9c_forward: 2 | type = diagnose_spinlock_yield_forward 3 | only s390-virtio 4 | variants: 5 | - forwarding_on: 6 | diag9c_forward_hz = 10000 7 | - forwarding_off: 8 | diag9c_forward_hz = 0 9 | 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/lifecycle_time.cfg: -------------------------------------------------------------------------------- 1 | - lifecycle.time: 2 | type = lifecycle_time 3 | variants: 4 | - suspend_resume: 5 | start_vm = yes 6 | sleep_before_resume = 5 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/multi_vms_with_stress.cfg: -------------------------------------------------------------------------------- 1 | - multi_vms_with_stress: 2 | type = multi_vms_with_stress 3 | memory = 4194304 4 | vm_names = vm2 vm3 5 | stress_args = '--cpu 4 --io 4 --vm 2 --vm-bytes 128M &' 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/topology.cfg: -------------------------------------------------------------------------------- 1 | - topology: 2 | type = topology 3 | only Linux 4 | start_vm = no 5 | current_vcpus = 4 6 | max_vcpus = 4 7 | cores = 2 8 | threads = 1 9 | sockets = 2 10 | variants: 11 | - positive: 12 | variants: 13 | - hotplug: 14 | current_vcpus = 3 15 | 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/vcpu_cve.cfg: -------------------------------------------------------------------------------- 1 | - vcpu.cve: 2 | type = vcpu_cve 3 | take_regular_screendumps = no 4 | start_vm = no 5 | variants cpu_mode: 6 | - host_model: 7 | no aarch64 8 | cpu_mode = 'host-model' 9 | - host_passthrough: 10 | cpu_mode = 'host-passthrough' 11 | variants test_case: 12 | - guest_cpu_cve_status: 13 | search_str = 'Vulnerable' 14 | check_cmd = 'cat /sys/devices/system/cpu/vulnerabilities/' 15 | search_file_list = ['l1tf', 'mds', 'meltdown', 'spec_store_bypass', 'spectre_v1', 'spectre_v2', 'tsx_async_abort'] 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/vcpu_max_topology.cfg: -------------------------------------------------------------------------------- 1 | - vcpu.max_topology: 2 | type = vcpu_max_topology 3 | memory = 4194304 4 | vcpus_placement = "static" 5 | sockets = "" 6 | cores = "" 7 | clusters = "" 8 | variants: 9 | - one_socket: 10 | sockets = "one" 11 | cores = "many" 12 | variants: 13 | - default_clusters: 14 | clusters = "" 15 | - many_clusters: 16 | clusters = "many" 17 | - one_core_per_socket: 18 | sockets = "many" 19 | cores = "one" 20 | clusters = "" 21 | - many_cores_per_socket: 22 | sockets = "many" 23 | cores = "many" 24 | variants: 25 | - default_clusters: 26 | clusters = "" 27 | - many_clusters: 28 | clusters = "many" 29 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/vcpu_metrics.cfg: -------------------------------------------------------------------------------- 1 | - virsh.vcpu_metrics: 2 | take_regular_screendumps = no 3 | type = "vcpu_metrics" 4 | variants: 5 | - normal_test: 6 | variants test_case: 7 | - with_unprivileged_user: 8 | start_vm = no 9 | func_supported_since_libvirt_ver = (9, 0, 0) 10 | unprivileged_user = 'domstats_testuser' 11 | interface_attrs = {'type_name': 'user'} 12 | domstats_option = '--cpu-total' 13 | cpu_stats_option = '--total' 14 | unprivileged_boot_disk_path = '/home/${unprivileged_user}' 15 | unprivileged_user_dumpxml_path = '/tmp/domstats_unprivileged_user' 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/vcpu_nested.cfg: -------------------------------------------------------------------------------- 1 | - vcpu_nested: 2 | type = vcpu_nested 3 | start_vm = "no" 4 | need_nested = 'yes' 5 | variants: 6 | - positive_test: 7 | variants: 8 | - change_vm_cpu: 9 | case = 'change_vm_cpu' 10 | cpu_old_mode = 'host-model' 11 | cpu_new_mode = 'host-passthrough' 12 | cmd_in_guest = "stat %s|grep '^Modify: '|cut -d' ' -f2-3" 13 | - check_nested_capability: 14 | case = 'check_nested_capability' 15 | cpu_old_mode = 'host-model' 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/cpu/vcpupin.cfg: -------------------------------------------------------------------------------- 1 | - vcpupin: 2 | type = vcpupin 3 | vcpu_placement = 'static' 4 | vcpu_max = 4 5 | vcpu_current = 2 6 | variants: 7 | - positive: 8 | variants test_case: 9 | - vcpupin_live_active_vm: 10 | cmd_for_inactive_dumpxml = "grep ' 2 | lxc_test_vm1 3 | 500000 4 | 500000 5 | 1 6 | i 7 | exe 8 | /bin/sh 9 | 10 | 11 | destroy 12 | restart 13 | destroy 14 | 15 | /usr/libexec/libvirt_lxc 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/memory/allocpages.cfg: -------------------------------------------------------------------------------- 1 | - virsh.allocpages: 2 | type = allocpages 3 | start_vm = no 4 | variants test_case: 5 | - s390x_1M: 6 | only s390-virtio 7 | page_size = "1M" 8 | page_count = 1024 9 | - with_options: 10 | page_size = 2048 11 | s390-virtio: 12 | page_size = 1024 13 | page_count = 3 14 | cmd_check_freepage = 'cat /sys/devices/system/node/node{}/hugepages/hugepages-${page_size}kB/free_hugepages' 15 | - readonly: 16 | page_size = 2048 17 | s390-virtio: 18 | page_size = 1024 19 | readonly = True 20 | page_count = 3 21 | cmd_check_freepage = 'cat /sys/devices/system/node/node{}/hugepages/hugepages-${page_size}kB/free_hugepages' 22 | err_msg = 'operation forbidden: read only access prevents virNodeAllocPages' 23 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/memory/gmap.cfg: -------------------------------------------------------------------------------- 1 | - gmap: 2 | type = gmap 3 | start_vm = no 4 | only s390-virtio 5 | kvm_module_parameters = 'nested=1' 6 | variants: 7 | - l3_shadow_table_counters: 8 | l2_mem = 3906250 9 | target_tag = "mount_tag0" 10 | fs_dict = {'accessmode': 'passthrough', 'source': {'dir': 'replace_in_code'}, "target": {'dir': '${target_tag}'}, 'driver': {'type': 'virtiofs'}} 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/memory/memballoon.cfg: -------------------------------------------------------------------------------- 1 | - memballoon: 2 | type = memballoon 3 | start_vm = no 4 | variants group: 5 | - alias: 6 | destroy_after = 'yes' 7 | variants model: 8 | - virtio: 9 | alias_name = 'ua-c80aba6e-b6d8-448b-ab6e-8c7b5c29f353' 10 | has_alias = 'yes' 11 | - none: 12 | alias_name = 'balloon0' 13 | has_alias = 'no' 14 | - period: 15 | variants case: 16 | - memstat: 17 | start_vm = 'yes' 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/memory/secure_dump.cfg: -------------------------------------------------------------------------------- 1 | - secure_dump: 2 | type = secure_dump 3 | start_vm = yes 4 | only s390-virtio 5 | variants: 6 | - decrypt: 7 | comm_key = 0123456789012345678901234567890 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/npiv/npiv_restart_libvirtd.cfg: -------------------------------------------------------------------------------- 1 | - npiv.restart_libvirtd: 2 | type = npiv_restart_libvirtd 3 | start_vm = "no" 4 | wwnn = "ENTER.YOUR.WWNN" 5 | wwpn = "ENTER.YOUR.WWPN" 6 | fc_host_dir = "/sys/class/fc_host" 7 | variants: 8 | - positive_testing: 9 | status_error = "yes" 10 | variants: 11 | - create_vhba_by_echo: 12 | create_vhba_method = "echo" 13 | - create_vhba_by_virsh: 14 | create_vhba_method = "virsh" 15 | variants: 16 | - destroy_vhba_by_echo: 17 | destroy_vhba_method = "echo" 18 | - destroy_vhba_by_virsh: 19 | destroy_vhba_method = "virsh" 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/guest_numa_node_tuning/auto_memory_placement_numad_fail.cfg: -------------------------------------------------------------------------------- 1 | - guest_numa_node_tuning.auto_memory_placement_numad_fail: 2 | type = auto_memory_placement_numad_fail 3 | take_regular_screendumps = no 4 | start_vm = "no" 5 | err_msg = "Failed to query numad for the advisory nodeset" 6 | variants memory_binding_mode: 7 | - mem_mode_strict: 8 | mem_mode = 'strict' 9 | - mem_mode_interleave: 10 | mem_mode = 'interleave' 11 | - mem_mode_preferred: 12 | mem_mode = 'preferred' 13 | - mem_mode_restrictive: 14 | mem_mode = 'restrictive' 15 | numa_memory = {'mode': '${mem_mode}', 'placement': 'auto'} 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/guest_numa_node_tuning/change_numa_tuning.cfg: -------------------------------------------------------------------------------- 1 | - guest_numa_node_tuning.change_numa_tuning: 2 | type = change_numa_tuning 3 | take_regular_screendumps = no 4 | start_vm = "no" 5 | cpu_mode = 'host-model' 6 | aarch64: 7 | cpu_mode = 'host-passthrough' 8 | vm_attrs = {'cpu': {'mode': '${cpu_mode}'}} 9 | single_host_node = 'yes' 10 | err_msg = "can't change numatune mode for running domain" 11 | variants memory_binding_mode: 12 | - mem_mode_strict: 13 | mem_mode = 'strict' 14 | - mem_mode_interleave: 15 | mem_mode = 'interleave' 16 | - mem_mode_preferred: 17 | mem_mode = 'preferred' 18 | - mem_mode_restrictive: 19 | mem_mode = 'restrictive' 20 | numa_memory = {'mode': '${mem_mode}', 'nodeset': '%s'} 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/host_numa/host_numa_info.cfg: -------------------------------------------------------------------------------- 1 | - host_numa.numa_info: 2 | type = host_numa_info 3 | take_regular_screendumps = no 4 | start_vm = "no" 5 | no s390-virtio 6 | variants kernel_size: 7 | - 4k: 8 | default_pagesize = 4 9 | variants: 10 | - all_hugepages: 11 | allocate_dict = {2048: 200, 1048576: 2} 12 | aarch64: 13 | allocate_dict = {64: 100, 2048: 100, 32768: 4, 1048576: 1} 14 | - 64k: 15 | only aarch64 16 | default_pagesize = 64 17 | variants: 18 | - all_hugepages: 19 | required_kernel = [5.14.0,) 20 | expect_nodes_num = 1 21 | allocate_dict = {2048: 200, 524288: 4, 16777216: 1} 22 | - 512M_2M: 23 | allocate_dict = {2048: 200, 524288: 4} 24 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/host_numa/host_numa_ksm_parameters.cfg: -------------------------------------------------------------------------------- 1 | - host_numa.ksm_parameters: 2 | type = host_numa_ksm_parameters 3 | take_regular_screendumps = no 4 | start_vm = "no" 5 | no s390-virtio 6 | variants: 7 | - default: 8 | ksm_files = ['merge_across_nodes', 'pages_to_scan', 'sleep_millisecs'] 9 | set_ksm_values = {'shm_pages_to_scan': '200', 'shm_sleep_millisecs': '10', 'shm_merge_across_nodes': '0'} 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_capabilities.cfg: -------------------------------------------------------------------------------- 1 | - numa_capabilities: 2 | type = numa_capabilities 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | status_error = "no" 6 | variants: 7 | - default: 8 | aarch64: 9 | missing_cpu_topology_key = 'die_id' 10 | 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_config_with_auto_placement.cfg: -------------------------------------------------------------------------------- 1 | - numa_config_with_auto_placement: 2 | type = numa_config_with_auto_placement 3 | memory_mode = "strict" 4 | memory_placement = 'auto' 5 | vcpu_placement = 'auto' 6 | variants: 7 | - default: 8 | iothreads = 2 9 | - memory_bind: 10 | bind_test = "yes" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_memAccess.cfg: -------------------------------------------------------------------------------- 1 | - numa_memAccess: 2 | type = numa_memAccess 3 | page_id_0 = "{'size': 2048, 'unit': 'KiB', 'nodeset': '0'}" 4 | cell_id_1 = "{ 'id': '1', 'memory': '512000', 'unit': 'KiB'}" 5 | variants: 6 | - invalid: 7 | cell_id_0 = "{'id': '0', 'memory': '512000', 'unit': 'KiB', 'memAccess': 'invalid'}" 8 | err_message = "'memAccess'.*'invalid'" 9 | - shared: 10 | cell_id_0 = "{'id': '0', 'memory': '512000', 'unit': 'KiB', 'memAccess': 'shared'}" 11 | - edit: 12 | cell_id_0 = "{'id': '0', 'memory': '512000', 'unit': 'KiB', 'memAccess': 'shared'}" 13 | edit_test = 'yes' 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_memory_migrate.cfg: -------------------------------------------------------------------------------- 1 | - numa_memory_migrate: 2 | type = numa_memory_migrate 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | memory_mode = "restrictive" 6 | take_regular_screendumps = "no" 7 | libvirtd_debug_file = "/var/log/libvirt/daemon.log" 8 | libvirtd_debug_level = "1" 9 | variants: 10 | - mem_auto: 11 | memory_placement = "auto" 12 | - mem_nodeset: 13 | memory_placement = "static" 14 | memory_nodeset = x 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_memory_spread.cfg: -------------------------------------------------------------------------------- 1 | - numa_memory_spread: 2 | type = numa_memory_spread 3 | start_vm = "no" 4 | memory_mode = "restrictive" 5 | variants: 6 | - positive_test: 7 | status_error = "no" 8 | variants: 9 | - default: 10 | limit_mb = 100 11 | cgget_message = 'cpuset.memory_migrate: 1' 12 | - negative_test: 13 | status_error = "yes" 14 | variants: 15 | - strict_memory_mode: 16 | memory_mode = "strict" 17 | error_message = "can't change nodeset for strict mode for running domain" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_node_memory_bind.cfg: -------------------------------------------------------------------------------- 1 | - numa_node_memory_bind: 2 | type = numa_node_memory_bind 3 | variants: 4 | - vcpu_bind: 5 | replace_string = :%s::: 6 | - placement_bind: 7 | replace_string = :%s::\r: 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_node_tuning/auto_mem_placement_with_incompatible_host_nodeset.cfg: -------------------------------------------------------------------------------- 1 | - guest_numa_node_tuning.incompatible_host_nodeset: 2 | type = auto_mem_placement_with_incompatible_host_nodeset 3 | start_vm = "no" 4 | nodeset = "0" 5 | placement = "auto" 6 | expected_xpaths = [{'element_attrs': [".//memory[@mode='%s']", ".//memory[@placement='${placement}']"]}] 7 | error_msg = "XML document failed to validate against schema" 8 | success_msg = "Domain '%s' XML configuration edited" 9 | variants: 10 | - strict: 11 | tuning_mode = "strict" 12 | - interleave: 13 | tuning_mode = "interleave" 14 | - preferred: 15 | tuning_mode = "preferred" 16 | - restrictive: 17 | tuning_mode = "restrictive" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_nodeset.cfg: -------------------------------------------------------------------------------- 1 | - numa_nodeset: 2 | type = numa_nodeset 3 | memory_mode = "strict" 4 | cellid = "0" 5 | aarch64: 6 | cpu_mode = "host-passthrough" 7 | variants: 8 | - invalid: 9 | error_msg = 'unsupported configuration: NUMA node \d+ is unavailable' 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_numad.cfg: -------------------------------------------------------------------------------- 1 | - numa_numad: 2 | type = numa_numad 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | variants: 6 | - update_numad: 7 | err_msg = 'Failed to query numad for the advisory nodeset' 8 | memory_placement = 'auto' 9 | memory_mode = 'strict' 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_numanode_cpu_info.cfg: -------------------------------------------------------------------------------- 1 | - numa_numanode_cpu_info: 2 | type = numa_numanode_cpu_info 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | numa_cells_with_memory_required = 2 6 | variants: 7 | - default: 8 | err_msg = 'unable to map backing store for guest RAM: Cannot allocate memory' 9 | current_memory_size = 4194304 10 | memory_size = 4194304 11 | nodes_memory = ['1572864', '524288'] 12 | memory_mode = "strict" 13 | 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_numatune_cpu.cfg: -------------------------------------------------------------------------------- 1 | - numa_numatune_cpu: 2 | type = numa_numatune_cpu 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | numa_cells_with_memory_required = 2 6 | variants: 7 | - unrelated_cpu_offline: 8 | cpu_index = 1 9 | memory_mode = "strict" 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numa_preferred_undefine.cfg: -------------------------------------------------------------------------------- 1 | - numa_preferred_undefine: 2 | type = numa_preferred_undefine 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | status_error = "no" 6 | memory_mode = "preferred" 7 | memory_placement = "auto" 8 | memory_nodeset = "0" 9 | variants: 10 | - default: 11 | bug_url = "https://bugzilla.redhat.com/show_bug.cgi?id=1006722" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/numa/numad_vcpupin.cfg: -------------------------------------------------------------------------------- 1 | - numad_vcpupin: 2 | type = numad_vcpupin 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | status_error = "no" 6 | vcpu_placement = "static" 7 | memory_mode = "interleave" 8 | memory_placement = "static" 9 | memory_nodeset = "0-1" 10 | variants: 11 | - default: 12 | bug_url = "https://bugzilla.redhat.com/show_bug.cgi?id=846620" 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/filter_aready_present_binding.cfg: -------------------------------------------------------------------------------- 1 | - filter_aready_present_binding: 2 | type = filter_aready_present_binding 3 | start_vm = "yes" 4 | status_error = "no" 5 | kill_vm = "yes" 6 | filter_name = "clean-traffic" 7 | filter_binding_name = "no-arp-mac-spoofing" 8 | expected_failed = "already exists" 9 | target_dev = "new_tap" 10 | source_network = "default" 11 | source_bridge = "virbr0" 12 | alias_name = "net0" 13 | variants: 14 | - filter_is_define: 15 | is_nwfilter_define = "yes" 16 | - filter_not_define: 17 | is_nwfilter_define = "no" 18 | check_cmd = " ebtables -t nat -L | grep -A3 'Bridge chain: I-vnet0-arp-mac'" 19 | expected_match = "-j DROP" 20 | filter_binding_copy = "no-ip-spoofing" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_binding_create.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_binding_create: 2 | type = nwfilter_binding_create 3 | start_vm = "yes" 4 | status_error = "no" 5 | kill_vm = "yes" 6 | variants: 7 | - check: 8 | check_cmd = "ebtables -t nat -L" 9 | check_filter = "nwfilter-binding-list" 10 | filter_name = "clean-traffic" 11 | wait_time = "1" 12 | expected_match = "-j DROP" 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_binding_delete.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_binding_delete: 2 | type = nwfilter_binding_delete 3 | start_vm = "yes" 4 | status_error = "no" 5 | kill_vm = "yes" 6 | filter_name = "clean-traffic" 7 | check_cmd = "ebtables -t nat -L" 8 | expected_not_match = "-j DROP" 9 | target_name = "net_tap" 10 | variants: 11 | - variable_notation: 12 | parameters_name_0 = "IP" 13 | parameters_name_1 = "IP" 14 | parameters_name_2 = "IP" 15 | parameters_value_0 = "10.0.0.1" 16 | parameters_value_1 = "10.0.0.2" 17 | parameters_value_2 = "10.0.0.3" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_binding_list.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_binding_list: 2 | type = nwfilter_binding_list 3 | start_vm = "yes" 4 | status_error = "no" 5 | kill_vm = "yes" 6 | option = "--type network --source default --config --live --model virtio" 7 | newfilter_1 = "clean-traffic" 8 | newfilter_2 = "allow-dhcp-server" 9 | variants: 10 | - variable_notation: 11 | parameters_name_0 = 'IP' 12 | parameters_value_0 = '10.0.0.1' 13 | parameters_name_1 = 'IP' 14 | parameters_value_1 = '10.0.0.2' 15 | parameters_name_2 = 'IP' 16 | parameters_value_2 = '10.0.0.3' 17 | parameters_dhcp_0 = 'DHCPSERVER' 18 | dhcp_value_0 = '192.168.122.1' 19 | parameters_dhcp_1 = 'DHCPSERVER' 20 | dhcp_value_1 = '192.168.122.2' 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_daemon_restart.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_vm_start.daemon_restart: 2 | type = nwfilter_daemon_restart 3 | start_vm = "no" 4 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_edit_uuid.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_edit_uuid: 2 | type = nwfilter_edit_uuid 3 | start_vm = "no" 4 | edit_filter_name = "no-mac-spoofing" 5 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_update_lock.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_update_lock: 2 | type = nwfilter_update_lock 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | filter_name = "allow-arp" 6 | bug_url = "https://bugzilla.redhat.com/show_bug.cgi?id=1034807" 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_update_vm_running.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_update_vm_running: 2 | type = nwfilter_update_vm_running 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | variants: 6 | - update_arp_rule: 7 | filter_name = "allow-arp" 8 | exist_filter = ${filter_name} 9 | rule = "rule_action=drop rule_direction=inout rule_priority=500 EOL" 10 | check_cmd = "ebtables -t nat -L O-DEVNAME-arp" 11 | expect_match = "-j DROP" 12 | check_vm_cmd = "arping -c 3 192.168.122.1" 13 | vm_expect_match = "Received\s0\sresponse" 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/nwfilter_vm_attach.cfg: -------------------------------------------------------------------------------- 1 | - nwfilter_vm_attach: 2 | type = nwfilter_vm_attach 3 | start_vm = "yes" 4 | status_error = "no" 5 | variants: 6 | - possitive_test: 7 | variants: 8 | - allow_arp: 9 | filter_name = "allow-arp" 10 | check_cmd = "ebtables -t nat -L I-DEVNAME-arp" 11 | expect_match = "-j ACCEPT" 12 | - negative_test: 13 | status_error = "yes" 14 | variants: 15 | - attach_none: 16 | filter_name = "noexist-filter" 17 | - attach_invalid_twice: 18 | attach_twice_invalid = "yes" 19 | filter_name = "noexist-filter" 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/nwfilter/vm_destroy_with_nwfilter.cfg: -------------------------------------------------------------------------------- 1 | - vm_destroy_with_nwfilter: 2 | type = vm_destroy_with_nwfilter 3 | start_vm = "yes" 4 | status_error = "no" 5 | kill_vm = "yes" 6 | variants: 7 | - check_libvirtd_log: 8 | filter_name = "clean-traffic" 9 | check_cmd = "grep "error" /var/log/libvirt/libvirtd.log" 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/passthrough/ap/libvirt_ap_passthrough.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_ap_passthrough: 2 | type = libvirt_ap_passthrough 3 | only s390-virtio 4 | variants: 5 | - hostdev_hotplug: 6 | plug = hot 7 | - hostdev_coldplug: 8 | plug = cold 9 | 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/passthrough/ap/libvirt_ap_passthrough_autostart.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_ap_passthrough.autostart: 2 | type = libvirt_ap_passthrough_autostart 3 | only s390-virtio 4 | kvm_module_parameters = 'nested=1' 5 | start_vm = no 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/passthrough/ap/libvirt_ap_passthrough_hotplug.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_ap_passthrough.hw_hotplug: 2 | type = libvirt_ap_passthrough_hotplug 3 | only s390-virtio 4 | kvm_module_parameters = 'nested=1' 5 | start_vm = no 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/passthrough/ccw/libvirt_ccw_passthrough.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_ccw_passthrough: 2 | type = libvirt_ccw_passthrough 3 | only s390-virtio 4 | devid = 5 | variants: 6 | - happy_path: 7 | - device_removal: 8 | device_removal_case = yes 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/passthrough/ccw/libvirt_ccw_passthrough_read_write.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_ccw_passthrough.read_write: 2 | type = libvirt_ccw_passthrough_read_write 3 | only s390-virtio 4 | devid = 5 | start_vm = yes 6 | variants: 7 | - happy_path: 8 | variants plugmethod: 9 | - hotplug_unplug: 10 | - coldplug_unplug: 11 | 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/passthrough/pci/ism_pci_passthrough.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_pci_passthrough.ism: 2 | only s390-virtio 3 | type = ism_pci_passthrough 4 | start_vm = yes 5 | pci_dev = LIBVIRT_PCI_NAME 6 | guest_iface = enc1 7 | variants: 8 | - happy_path: 9 | check = available 10 | - reboot: 11 | check = available_after_reboot 12 | - reboot_start: 13 | check = start_after_reboot 14 | - connect: 15 | check = smc_functional 16 | - hotplug: 17 | check = available_hotplug 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/ppc_device.cfg: -------------------------------------------------------------------------------- 1 | - ppc_device: 2 | take_regular_screendumps = no 3 | type = "ppc_device" 4 | start_vm = no 5 | only pseries 6 | 7 | variants: 8 | - panic_address: 9 | case = panic_address 10 | status_error = yes 11 | error_msg = error: unsupported configuration: setting the panic device address is not supported for model 'pseries' 12 | - unavail_pci_device: 13 | case = unavail_pci_device 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/remote_access/remote_tls_multiple_certs.cfg: -------------------------------------------------------------------------------- 1 | - virsh.remote_tls_multiple_certs: 2 | type = remote_tls_multiple_certs 3 | server_ip = ${remote_ip} 4 | server_user = ${remote_user} 5 | server_pwd = ${remote_pwd} 6 | client_ip = ${local_ip} 7 | client_user = root 8 | client_pwd = ${local_pwd} 9 | start_vm = "no" 10 | port = "22" 11 | tls_port = "16514" 12 | variants: 13 | - positive_testing: 14 | err_msg = "The certificate hasn't got a known issuer" 15 | 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/remove_guest.cfg: -------------------------------------------------------------------------------- 1 | - remove_guest: install setup image_copy unattended_install.cdrom 2 | type = remove_guest 3 | shutdown_method = shell 4 | kill_vm = yes 5 | kill_vm_gracefully = no 6 | force_remove_vm = yes 7 | start_vm = no 8 | variants: 9 | - without_disk: 10 | remove_image = no 11 | - with_disk: 12 | remove_image = yes 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/abort_managedsave.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.abort_managedsave: 2 | type = abort_managedsave 3 | save_opt = 4 | start_vm = no 5 | default_path = "/var/lib/libvirt/qemu/save" 6 | event_cmd = "event --loop --all" 7 | expected_event = ["Suspended Paused", "Resumed Unpaused"] 8 | stress_type = "stress" 9 | stress_cmds = "stress --cpu 8 --io 4 --vm 2 --vm-bytes 128M --vm-keep" 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/abort_save.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.abort_save: 2 | type = abort_save 3 | start_vm = no 4 | stress_type = "stress" 5 | stress_cmds = "stress --cpu 8 --io 4 --vm 2 --vm-bytes 128M --vm-keep" 6 | 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/managedsave_remove.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.managedsave_remove: 2 | type = managedsave_remove 3 | start_vm = yes 4 | status_error = no 5 | variants: 6 | - normal: 7 | file_state = normal 8 | only managedsaved 9 | - corrupt: 10 | file_state = corrupt 11 | - nonexist: 12 | file_state = nonexist 13 | variants: 14 | - managedsaved: 15 | vm_managedsaved = yes 16 | - not_managedsaved: 17 | vm_managedsaved = no 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/restore_from_nfs_file.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.restore_from_nfs_file: 2 | type = restore_from_nfs_file 3 | start_vm = no 4 | disk_seclabels = {'seclabels': [{'model': 'dac', 'relabel': 'no'}]} 5 | uid = 107 6 | gid = 107 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/restore_from_unqualified_file.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.restore_from_unqualified_file: 2 | type = restore_from_unqualified_file 3 | status_error = yes 4 | variants scenario: 5 | - non_exist: 6 | error_msg = No such file or directory 7 | - invalid: 8 | error_msg = failed to read qemu header 9 | - to_running_vm: 10 | error_msg = domain .* is already active 11 | - image_running_by_another_vm: 12 | vm_2nd = vm2 13 | vms += ' ${vm_2nd}' 14 | error_msg = Setting different SELinux label on .* which is already in use 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/save_image_define.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.save_image_define: 2 | type = save_image_define 3 | start_vm = no 4 | pre_state = running 5 | variants scenario: 6 | - no_opt: 7 | options = 8 | - running_opt: 9 | pre_state = paused 10 | after_state = running 11 | options = --running 12 | - paused_opt: 13 | after_state = paused 14 | options = --paused 15 | - exclusive_opt: 16 | status_error = yes 17 | options = --running --paused 18 | error_msg = 'are mutually exclusive' 19 | variants mode: 20 | - readonly: 21 | only no_opt 22 | readonly = yes 23 | status_error = yes 24 | error_msg = 'read only access prevents virDomainSaveImageDefineXML' 25 | - normal: 26 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/save_to_block.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.save_to_block: 2 | type = save_to_block 3 | start_vm = no 4 | variants: 5 | - qemu_namespace: 6 | expect_label = 'system_u:object_r:svirt_image_t:s0:c(\d+),c(\d+)' 7 | variants: 8 | - enabled: 9 | namespaces = ["mount"] 10 | - disabled: 11 | namespaces = [] 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/save_to_nfs.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.save_to_nfs: 2 | type = save_to_nfs 3 | start_vm = no 4 | local_boolean_varible = 'virt_use_nfs' 5 | nfs_mount_dir = '/var/lib/libvirt/nfs_dir' 6 | swtpm_lib = '/var/lib/swtpm-localca' 7 | swtpm_perms_file = "/tmp/permis.facl" 8 | variants: 9 | - root_squash: 10 | export_opt = 'rw,async,root_squash' 11 | variants: 12 | - dynamic_ownership_off: 13 | chown_img = "qemu:qemu" 14 | uid = 107 15 | gid = 107 16 | mod = 664 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/save_and_restore/save_with_formats.cfg: -------------------------------------------------------------------------------- 1 | - save_and_restore.save_with_formats: 2 | type = save_with_formats 3 | start_vm = no 4 | libvirtd_debug_level = 1 5 | libvirtd_debug_file = /var/log/libvirt/virtqemud.log 6 | variants: 7 | - positive_test: 8 | status_error = no 9 | variants save_format: 10 | - raw: 11 | - lzop: 12 | - gzip: 13 | - bzip2: 14 | - xz: 15 | - negative_test: 16 | status_error = yes 17 | variants save_format: 18 | - abc: 19 | error_msg = Invalid save image format specified in configuration file 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/scsi/scsi_command_test_hostdev.cfg: -------------------------------------------------------------------------------- 1 | - scsi_device.scsi_command_test.with_hostdev: 2 | type = scsi_command_test_hostdev 3 | start_vm = no 4 | variants: 5 | - guest_multipath: 6 | emulated_image = "emulated-iscsi" 7 | second_target = "iqn.2024-11.com.virttest:emulated-iscsi.target-2" 8 | mpath_conf_path = "/etc/multipath.conf" 9 | pkg_list = "['device-mapper-multipath', 'python3-rtslib']" 10 | hostdev_dict = {'type': 'scsi', 'source': {'untyped_address': {'target': '0', 'unit': '0', 'bus': '0'}, 'adapter_name': 'scsi_host%s'}, 'mode': 'subsystem', 'type_name': 'scsi', 'rawio': 'yes', 'sgio': 'filtered', 'managed': 'no'} 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/scsi/scsi_disk_attributes.cfg: -------------------------------------------------------------------------------- 1 | - scsi_device.scsi_disk_attributes.rawio: 2 | type = scsi_disk_attributes 3 | start_vm = no 4 | target_disk = "sdb" 5 | disk_type = "block" 6 | variants: 7 | - rawio_yes: 8 | rawio_value = "yes" 9 | expected_xpaths = [{'element_attrs': [".//disk[@rawio='yes']"]}] 10 | - rawio_no: 11 | rawio_value = "no" 12 | expected_xpaths = [{'element_attrs': [".//disk[@rawio='no']"]}] 13 | disk_dict = {'target': {'dev': '${target_disk}', 'bus': 'scsi'}, 'driver': {'name': 'qemu', 'type': 'raw'}, 'device': 'lun', 'sgio': 'filtered', 'rawio': '${rawio_value}', 'type_name': 'block', 'source': {'attrs': {'dev': '%s'}}} 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/secure_execution/confirm_environment.cfg: -------------------------------------------------------------------------------- 1 | - secure_execution.confirm_environment: 2 | only s390x 3 | type = confirm_environment 4 | take_regular_screendumps = "no" 5 | start_vm = "yes" 6 | host_se_cmd = "cat /sys/firmware/uv/prot_virt_host" 7 | guest_se_cmd = "cat /sys/firmware/uv/prot_virt_guest" 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/security/rng/virtio_rng.cfg: -------------------------------------------------------------------------------- 1 | - virtio_rng: 2 | type = virtio_rng 3 | start_vm = no 4 | 5 | variants test_case: 6 | - coldplug_unplug_random_backend: 7 | backend_dev = "/dev/urandom" 8 | rng_device_dict = {"rng_model": "virtio", "backend": {"backend_model": "random", "backend_dev": "${backend_dev}"}} 9 | - coldplug_unplug_egd_tcp_connect_mode: 10 | rng_port = "2345" 11 | rng_device_dict = {"rng_model": "virtio", "backend": {"backend_model": "egd", "backend_type": "tcp", "source": [{"mode": "connect", "host": "localhost", "service": "${rng_port}", "tls": "no"}], "backend_protocol": "raw"}} 12 | - coldplug_unplug_egd_tcp_bind_mode: 13 | rng_device_dict = {"rng_model": "virtio", "backend": {"backend_model": "egd", "backend_type": "tcp", "backend_protocol": "raw", "source": [{"mode": "bind", "host": "localhost", "service": "2345", "tls": "no"}]}} 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/security/virt_what_cvm.cfg: -------------------------------------------------------------------------------- 1 | - virt_what.cvm: 2 | type = virt_what_cvm 3 | start_vm = yes 4 | variants: 5 | - expected_value: 6 | # The EXPECTED_VALUE depends on the VM. A normal VM would have "". 7 | # man virt-what-cvm lists the available values for VM where 8 | # confidentiality is enabled 9 | expected_cvm = 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/delete_disk_and_memory_snapshot.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_delete.disk_and_memory_snap: 2 | type = delete_disk_and_memory_snapshot 3 | start_vm = no 4 | func_supported_since_libvirt_ver = (9, 10, 0) 5 | snap_names = ["s1", "s2"] 6 | file_path = "/mnt/data.txt" 7 | target_disk = "vdb" 8 | snapshot_disk_list = "[{'disk_name': 'vda', 'disk_snapshot': 'no'}, {'disk_name': '${target_disk}', 'disk_snapshot': 'external', 'source':{'attrs': {'file': '%s'}}}]" 9 | snapshot_dict = {'description': 'Snapshot test', 'snap_name': '%s', 'mem_snap_type': 'external', 'mem_file': '%s'} 10 | variants disk: 11 | - qcow2_format: 12 | disk_type = "file" 13 | disk_dict = {"type_name":"${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type":"qcow2"}} 14 | variants vm_status: 15 | - vm_running: 16 | - vm_paused: 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/delete_disk_only_snapshot.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_delete.disk_only_snap: 2 | type = delete_disk_only_snapshot 3 | func_supported_since_libvirt_ver = (9, 10, 0) 4 | start_vm = no 5 | snap_names = ['s1', 's2'] 6 | snap_options = " --disk-only" 7 | disk1 = 'vda' 8 | disk2 = 'vdb' 9 | file_name = '/tmp/test.txt' 10 | mem_path = "/tmp/mem." 11 | disk_type = 'file' 12 | disk_driver = {'driver': {'name': 'qemu', 'type': 'qcow2'}} 13 | disk_target = {'target': {'dev': '${disk2}', 'bus': 'virtio'}} 14 | disk_dict = {"type_name": '${disk_type}', 'device': 'disk', **${disk_target}, **${disk_driver}} 15 | snapshot_disk_list = "[{'disk_name': 'vda', 'disk_snapshot': 'no'}, {'disk_name': '${disk2}', 'disk_snapshot': 'external', 'source':{'attrs': {'file': '%s'}}}]" 16 | snapshot_dict = {'description': 'Snapshot test', 'snap_name': '%s'} 17 | 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/delete_external_after_reverting_internal.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_delete.after_reverting_internal: 2 | type = delete_external_after_reverting_internal 3 | start_vm = no 4 | internal_snap_name = s1 5 | internal_snap_option = "${internal_snap_name}" 6 | external_snap_name = s2 7 | external_snap_option = "${external_snap_name} --memspec file=%s,snapshot=external" 8 | disk_xpath = [{'element_attrs':[".//devices/disk/source[@file='%s']"]}, {'element_attrs':[".//devices/disk/backingStore[@type='file']"]}, {'element_attrs':[".//devices/disk/backingStore/source[@file='%s']"]}] 9 | variants: 10 | - seabios: 11 | only x86_64 12 | firmware_type = "seabios" 13 | - ovmf: 14 | only aarch64 15 | func_supported_since_libvirt_ver = (10, 10, 0) 16 | firmware_type = "ovmf" 17 | - no_firmware: 18 | only s390-virtio 19 | 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/delete_snapshot_after_disk_attached.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_delete.disk_attached: 2 | type = delete_snapshot_after_disk_attached 3 | func_supported_since_libvirt_ver = (9, 10, 0) 4 | start_vm = yes 5 | snap_names = ['s1', 's2', 's3','s4'] 6 | disk1 = 'vda' 7 | disk1_snap_option = " %s --disk-only" 8 | disk2 = 'vdb' 9 | mem_path = "/tmp/mem." 10 | snap_deleting_ongoing_tag = "" 11 | disk2_snap_option = "%s --memspec ${mem_path}%s,snapshot=external --diskspec ${disk1},snapshot=external ${disk2},snapshot=external" 12 | disk_type = 'file' 13 | disk_driver = {'driver': {'name': 'qemu', 'type': 'qcow2'}} 14 | disk_target = {'target': {'dev': '${disk2}', 'bus': 'virtio'}} 15 | disk_dict = {"type_name": '${disk_type}', 'device': 'disk', **${disk_target}, **${disk_driver}} 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/memory_snapshot_delete.cfg: -------------------------------------------------------------------------------- 1 | - memory_snapshot.delete: 2 | type = memory_snapshot_delete 3 | take_regular_screendumps = no 4 | start_vm = no 5 | snapshot_disk_list = "[{'disk_name': 'vda', 'disk_snapshot': 'no'}, {'disk_name': 'vdb', 'disk_snapshot': 'no'}]" 6 | snapshot_dict = {'description': 'Snapshot test', 'snap_name': '%s', 'mem_snap_type': 'external', 'mem_file': '%s'} 7 | func_supported_since_libvirt_ver = (9, 10, 0) 8 | variants disk_format: 9 | - type_qcow2: 10 | disk_driver = {'driver': {'name': 'qemu', 'type': 'qcow2'}} 11 | disk_target = {'target': {'dev': 'vdb', 'bus': 'virtio'}} 12 | disk_source = {'source': {'attrs': {'file': '%s'}}} 13 | disk_dict = {"type_name": 'file', 'device': 'disk', **${disk_target}, **${disk_driver}, **${disk_source}} 14 | variants vm_status: 15 | - running: 16 | - paused: 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/revert_disk_external_snap.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_revert.disk_external_snap: 2 | type = revert_disk_external_snap 3 | start_vm = no 4 | snap_names = ['s1', 's2', 's3'] 5 | file_list = ["/mnt/s1", "/mnt/s2", "/mnt/s3"] 6 | func_supported_since_libvirt_ver = (9, 10, 0) 7 | variants: 8 | - with_datastore: 9 | with_data_file = "yes" 10 | disk_target = "vdb" 11 | disk_type = "file" 12 | disk_dict = {"type_name":"${disk_type}", "target":{"dev":"${disk_target}", "bus":"virtio"}, "driver": {"name":"qemu", "type":"qcow2"}} 13 | func_supported_since_libvirt_ver = (10, 10, 0) 14 | data_file_option = " -o data_file=%s" 15 | - without_datastore: 16 | variants snap_type: 17 | - disk_only: 18 | snap_options = " %s --disk-only %s" 19 | - disk_and_memory: 20 | snap_options = "%s --memspec snapshot=external,file=/tmp/mem.%s" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/revert_memory_only_snap.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_revert.memory_only_snap: 2 | type = revert_memory_only_snap 3 | start_vm = no 4 | snap_names = ['s1', 's2', 's3'] 5 | target_disk = 'vda' 6 | disk_type = 'file' 7 | snap_options = "%s --memspec snapshot=external,file=/tmp/mem.%s --diskspec ${target_disk},snapshot=no" 8 | func_supported_since_libvirt_ver = (9, 10, 0) 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/revert_snap_based_on_state.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_revert.snap_and_domain_state: 2 | type = revert_snap_based_on_state 3 | start_vm = no 4 | snap_name = 's1' 5 | func_supported_since_libvirt_ver = (9, 10, 0) 6 | variants: 7 | - snap_running: 8 | snap_state = "running" 9 | expected_state = "${snap_state}" 10 | snap_options = " %s --memspec snapshot=external,file=/tmp/mem.s1 --diskspec vda,snapshot=external,file=/tmp/vda.s1" 11 | - snap_shutoff: 12 | snap_state = "shutoff" 13 | expected_state = "shut off" 14 | snap_options = " %s --diskspec vda,snapshot=external,file=/tmp/vda.s1" 15 | variants: 16 | - vm_running: 17 | vm_state = "running" 18 | - vm_shutoff: 19 | vm_state = "shut off" 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/revert_snap_for_guest_with_genid.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_revert.with_genid: 2 | no s390-virtio,aarch64 3 | type = revert_snap_for_guest_with_genid 4 | start_vm = no 5 | func_supported_since_libvirt_ver = (9, 10, 0) 6 | snap_names = ['s1', 's2'] 7 | snap_options = "%s --memspec snapshot=external,file=/tmp/mem.%s --diskspec vda,snapshot=external,file=/tmp/vda.%s" 8 | 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/snapshot/revert_snap_with_flags.cfg: -------------------------------------------------------------------------------- 1 | - snapshot_revert.with_flags: 2 | type = revert_snap_with_flags 3 | start_vm = no 4 | snap_names = ['s1', 's2'] 5 | target_disk = 'vda' 6 | disk_type = 'file' 7 | vars_path = "/var/lib/libvirt/qemu/nvram/${main_vm}_VARS.fd" 8 | aarch64: 9 | vars_path = "/var/lib/libvirt/qemu/nvram/${main_vm}_VARS.qcow2" 10 | snap1_options = "%s --diskspec vda,snapshot=external,file=/tmp/vda.%s" 11 | snap2_options = "%s --memspec snapshot=external,file=/tmp/mem.%s --diskspec vda,snapshot=external,file=/tmp/vda.%s" 12 | flags = [" --current --paused"," --running --reset-nvram", " --force"] 13 | func_supported_since_libvirt_ver = (9, 10, 0) 14 | s390-virtio: 15 | vars_path = 16 | flags = [" --current --paused"," --running", " --force"] 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/capabilities/sriov_capabilities_iommu_support.cfg: -------------------------------------------------------------------------------- 1 | - sriov.capabilities.iommu_support: 2 | type = sriov_capabilities_iommu_support 3 | start_vm = "no" 4 | only x86_64, aarch64 5 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/failover/sriov_failover_lifecycle.cfg: -------------------------------------------------------------------------------- 1 | - sriov.failover.lifecycle: 2 | type = sriov_failover_lifecycle 3 | expr_iface_no = 3 4 | br_dict = {'source': {'bridge': 'br0'}, 'teaming': {'type': 'persistent'}, 'alias': {'name': 'ua-3f13c36e-186b-4c6b-ba54-0ec483613931'}, 'mac_address': mac_addr, 'model': 'virtio', 'type_name': 'bridge'} 5 | 6 | only x86_64 7 | variants dev_type: 8 | - hostdev_interface: 9 | iface_dict = {'managed': 'yes', 'teaming': {'type': 'transient', 'persistent': 'ua-3f13c36e-186b-4c6b-ba54-0ec483613931'}, 'mac_address': mac_addr, 'type_name': 'hostdev', 'hostdev_address': {'type_name': 'pci', 'attrs': vf_pci_addr}} 10 | - hostdev_device: 11 | set_vf_mac = "yes" 12 | hostdev_dict = {'mode': 'subsystem', 'type': 'pci', 'source': {'untyped_address': vf_pci_addr}, 'managed': 'yes', 'teaming': {'type': 'transient', 'persistent': 'ua-3f13c36e-186b-4c6b-ba54-0ec483613931'}} 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/network/sriov_check_net_info_by_network_lifecycle_cmds.cfg: -------------------------------------------------------------------------------- 1 | - sriov.network.check_net_info_by_network_lifecycle_cmds: 2 | type = sriov_check_net_info_by_network_lifecycle_cmds 3 | only x86_64, aarch64 4 | start_vm = "no" 5 | network_dict = {'forward': {'mode': 'hostdev', 'managed': 'yes'}, 'name': 'hostnet', 'vf_list': [{'type_name': 'pci', 'attrs': vf_pci_addr}], 'uuid': 'e6ddbb96-5be5-494d-92f0-f7473e185876'} 6 | network_update_dict = {'portgroups': [{'name': 'dontpanic'}]} 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/network/sriov_check_network_connections.cfg: -------------------------------------------------------------------------------- 1 | - sriov.network.check_connections: 2 | type = sriov_check_network_connections 3 | only x86_64, aarch64 4 | 5 | start_vm = "no" 6 | network_dict = {'forward': {'mode': 'hostdev', 'managed': 'yes'}, 'name': 'hostdev_net', 'pf': {'dev': pf_name}} 7 | iface_dict = {'type_name': 'network', 'source': {'network': 'hostdev_net'}} 8 | iface_nums = 2 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/network/sriov_define_or_start_network_with_pf_addr.cfg: -------------------------------------------------------------------------------- 1 | - sriov.network.define_or_start_network_with_pf_addr: 2 | type = sriov_define_or_start_network_with_pf_addr 3 | only x86_64, aarch64 4 | start_vm = "no" 5 | err_msg = "SR-IOV Virtual Function" 6 | network_dict = {'forward': {'mode': 'hostdev', 'managed': 'yes'}, 'name': 'hostnet', 'vf_list': [{'type_name': 'pci', 'attrs': pf_pci_addr}], 'uuid': 'e6ddbb96-5be5-494d-92f0-f7473e185876'} 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/nodedev/sriov_nodedev_non_driver.cfg: -------------------------------------------------------------------------------- 1 | - sriov.nodedev.non_driver: 2 | type = sriov_nodedev_non_driver 3 | start_vm = "no" 4 | only x86_64, aarch64 5 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/nodedev/sriov_nodedev_reattach_detach.cfg: -------------------------------------------------------------------------------- 1 | - sriov.nodedev.nodedev_reattach_detach: 2 | type = sriov_nodedev_reattach_detach 3 | start_vm = "no" 4 | only x86_64, aarch64 5 | variants dev_name: 6 | - vf: 7 | - pf: 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/plug_unplug/sriov_attach_detach_interface.cfg: -------------------------------------------------------------------------------- 1 | - sriov.plug_unplug.attach_detach_interface: 2 | type = sriov_attach_detach_interface 3 | start_vm = "no" 4 | only x86_64, aarch64 5 | 6 | variants: 7 | - with_managed: 8 | attach_opt = "--managed" 9 | - without_managed: 10 | attach_opt = "" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/plug_unplug/sriov_attach_detach_interface_check_connections.cfg: -------------------------------------------------------------------------------- 1 | - sriov.plug_unplug.attach_detach_interface_check_connections: 2 | type = sriov_attach_detach_interface_check_connections 3 | only x86_64, aarch64 4 | 5 | start_vm = "no" 6 | vf_no = 4 7 | attach_extra_opts = "--model virtio" 8 | network_dict = {'forward': {'mode': 'hostdev', 'managed': 'yes'}, 'name': 'hostdev_net', 'pf': {'dev': pf_name}} 9 | iface_type = "hostdev" 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/plug_unplug/sriov_attach_detach_interface_from_network.cfg: -------------------------------------------------------------------------------- 1 | - sriov.plug_unplug.attach_detach_interface_from_network: 2 | type = sriov_attach_detach_interface_from_network 3 | start_vm = "no" 4 | network_dict = {'forward': {'mode': 'hostdev', 'managed': 'yes'}, 'name': 'hostdev_net', 'vf_list': [{'type_name': 'pci', 'attrs': vf_pci_addr}]} 5 | only x86_64, aarch64 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/plug_unplug/sriov_attach_detach_interface_special_situations.cfg: -------------------------------------------------------------------------------- 1 | - sriov.plug_unplug.attach_detach_interface_special_situations: 2 | type = sriov_attach_detach_interface_special_situations 3 | start_vm = "no" 4 | only x86_64, aarch64 5 | attach_opt = "--managed" 6 | 7 | variants test_scenario: 8 | - no_detach_for_no_managed: 9 | attach_opt = "" 10 | status_error = 'yes' 11 | err_msg = "must be manually detached from" 12 | - with_model: 13 | attach_opt = "--managed --model virtio" 14 | - module_auto_reload: 15 | - to_vm_with_hostdev_ifaces: 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/plug_unplug/sriov_attach_interface_to_vm_with_vf.cfg: -------------------------------------------------------------------------------- 1 | - sriov.plug_unplug.attach_interface_to_vm_with_vf: 2 | type = sriov_attach_interface_to_vm_with_vf 3 | start_vm = "no" 4 | network_dict = {'forward': {'mode': 'hostdev', 'managed': 'yes'}, 'name': 'hostnet', 'vf_list': [{'type_name': 'pci', 'attrs': vf_pci_addr}, {'type_name': 'pci', 'attrs': vf_pci_addr2}]} 5 | pre_iface_dict = {'type_name': 'network', 'source': {'network': 'hostnet'}} 6 | only x86_64, aarch64 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/plug_unplug/sriov_attach_released_hostdev.cfg: -------------------------------------------------------------------------------- 1 | - sriov.plug_unplug.attach_released_hostdev: 2 | type = sriov_attach_released_hostdev 3 | start_vm = "no" 4 | dev_type = "hostdev_interface" 5 | iface_dict = {'managed': 'yes', 'type_name': 'hostdev', 'hostdev_address': {'type_name': 'pci', 'attrs': vf_pci_addr}} 6 | only x86_64, aarch64 7 | variants test_scenario: 8 | - to_2nd_vm: 9 | vms = "ENTER.YOUR.VM1 ENTER.YOUR.VM2" 10 | - to_itself: 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/scalability/sriov_scalability_max_vf.cfg: -------------------------------------------------------------------------------- 1 | - sriov.scalability.max_vfs: 2 | type = sriov_scalability_max_vfs 3 | start_vm = "no" 4 | only x86_64 5 | vf_no = 63 6 | net_forward = {"mode": "hostdev", "managed": "yes"} 7 | variants: 8 | - maximum: 9 | iface_num = 64 10 | - exceed_maximum: 11 | iface_num = 65 12 | start_error = yes 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/scalability/sriov_scalability_repeated_at_dt.cfg: -------------------------------------------------------------------------------- 1 | - sriov.scalability.repeated_at_dt: 2 | type = sriov_scalability_repeated_at_dt 3 | start_vm = "no" 4 | iface_dict = {'managed': 'yes', 'type_name': 'hostdev', 'hostdev_address': {'type_name': 'pci', 'attrs': vf_pci_addr}} 5 | loop_time = 10 6 | only x86_64 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/update_device/sriov_update_device.cfg: -------------------------------------------------------------------------------- 1 | - sriov.update_device: 2 | type = sriov_update_device 3 | start_vm = "no" 4 | iface_dict = {'managed': 'yes', 'type_name': 'hostdev', 'hostdev_address': {'type_name': 'pci', 'attrs': vf_pci_addr}} 5 | update_iface = {'managed': 'no'} 6 | err_msg = "cannot change config of 'hostdev'" 7 | only x86_64, aarch64 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/attach_iommu_device.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.attach_iommu_device: 2 | type = attach_iommu_device 3 | start_vm = "no" 4 | err_msg = "attach of device 'iommu' is not supported" 5 | variants: 6 | - virtio: 7 | only q35, aarch64 8 | func_supported_since_libvirt_ver = (8, 3, 0) 9 | iommu_dict = {'model': 'virtio'} 10 | - intel: 11 | only q35 12 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on', 'caching_mode': 'on'}} 13 | - smmuv3: 14 | only aarch64 15 | func_supported_since_libvirt_ver = (5, 5, 0) 16 | iommu_dict = {'model': 'smmuv3'} 17 | variants: 18 | - cold_plug: 19 | attach_option = "--config" 20 | - hot_plug: 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/intel_iommu_aw_bits.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.intel_iommu.aw_bits: 2 | type = intel_iommu_aw_bits 3 | start_vm = "yes" 4 | enable_guest_iommu = "yes" 5 | only q35 6 | 7 | variants aw_bits_value: 8 | - 48: 9 | - 39: 10 | - 36: 11 | status_error = yes 12 | err_msg = "Supported values for aw-bits are" 13 | - 3423: 14 | status_error = yes 15 | err_msg = "Parameter 'aw-bits' expects uint8_t" 16 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on', 'caching_mode': 'on', 'iotlb': 'on', 'aw_bits': ${aw_bits_value}}} 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/intel_iommu_with_dma_translation.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.intel_iommu_with_dma_translation: 2 | type = intel_iommu_with_dma_translation 3 | start_vm = "yes" 4 | enable_guest_iommu = "yes" 5 | func_supported_since_libvirt_ver = (10, 7, 0) 6 | only q35 7 | variants: 8 | - enable_dma_translation: 9 | dma_translation = "on" 10 | - disable_dma_translation: 11 | dma_translation = "off" 12 | - disable_dma_translation_with_more_vcpus: 13 | with_more_vcpus = "yes" 14 | dma_translation = "off" 15 | eim_dict = {'eim': 'on'} 16 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on', 'caching_mode': 'on', 'iotlb': 'on', 'dma_translation': '${dma_translation}'}} 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/intel_iommu_without_enabling_caching_mode.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.intel_iommu.without_enabling_caching_mode: 2 | type = intel_iommu_without_enabling_caching_mode 3 | err_msg = "caching-mode=on for [i|I]ntel" 4 | start_vm = "yes" 5 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on'}} 6 | only q35 7 | 8 | variants dev_type: 9 | - hostdev_interface: 10 | iface_dict = {'managed': 'yes', 'type_name': 'hostdev', 'hostdev_address': {'type_name': 'pci', 'attrs': vf_pci_addr}} 11 | - hostdev_device: 12 | hostdev_dict = {'mode': 'subsystem', 'type': 'pci', 'source': {'untyped_address': vf_pci_addr}, 'managed': 'yes'} 13 | variants: 14 | - cold_plug: 15 | attach_option = "--config" 16 | - hot_plug: 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/intel_iommu_without_ioapic.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.intel_iommu.without_ioapic: 2 | type = intel_iommu_without_ioapic 3 | start_vm = "yes" 4 | enable_guest_iommu = "yes" 5 | feature_name = "ioapic" 6 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on'}} 7 | only q35 8 | variants: 9 | - default: 10 | iommu_dict = {'model': 'intel'} 11 | - intremap_on: 12 | func_supported_since_libvirt_ver = (10, 10, 0) 13 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on'}} 14 | auto_add_ioapic = "yes" 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/iommu_alias.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.iommu.alias: 2 | type = iommu_alias 3 | enable_guest_iommu = "yes" 4 | func_supported_since_libvirt_ver = (8, 7, 0) 5 | start_vm = "no" 6 | variants: 7 | - virtio: 8 | only q35, aarch64 9 | iommu_dict = {'model': 'virtio'} 10 | iommu_has_addr = "yes" 11 | - intel: 12 | only q35 13 | iommu_dict = {'model': 'intel', 'driver': {'intremap': 'on', 'caching_mode': 'on', 'eim': 'on', 'iotlb': 'on', 'aw_bits': '48'}} 14 | iommu_has_addr = "no" 15 | - smmuv3: 16 | only aarch64 17 | iommu_dict = {'model': 'smmuv3'} 18 | iommu_has_addr = "no" 19 | variants: 20 | - auto_gen: 21 | - customized: 22 | alias = {'name': 'ua-c1e12b91-175f-4436-b0d5-d7481de40f14'} 23 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vIOMMU/virtio_iommu_with_addtional_attributes.cfg: -------------------------------------------------------------------------------- 1 | - vIOMMU.virtio_iommu.addtional_attributes: 2 | type = virtio_iommu_with_addtional_attributes 3 | start_vm = "no" 4 | enable_guest_iommu = "yes" 5 | err_msg = "iommu model 'virtio' doesn't support additional attributes" 6 | func_supported_since_libvirt_ver = (8, 7, 0) 7 | only x86_64, aarch64 8 | variants: 9 | - caching_mode: 10 | iommu_dict = {'driver': {'caching_mode': 'on'}, 'model': 'virtio'} 11 | - eim: 12 | iommu_dict = {'driver': {'eim': 'on'}, 'model': 'virtio'} 13 | - iotlb: 14 | iommu_dict = {'driver': {'iotlb': 'on'}, 'model': 'virtio'} 15 | - aw_bits: 16 | iommu_dict = {'driver': {'aw_bits': '48'}, 'model': 'virtio'} 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/sriov/vm_lifecycle/sriov_vm_lifecycle_exclusive_check_running_domain.cfg: -------------------------------------------------------------------------------- 1 | - sriov.vm_lifecycle.exclusive_check.running_domain: 2 | type = sriov_vm_lifecycle_exclusive_check_running_domain 3 | vms = "ENTER.YOUR.VM1 ENTER.YOUR.VM2" 4 | start_vm = "no" 5 | only x86_64, aarch64 6 | dev_type = "hostdev_interface" 7 | iface_dict = {'managed': 'yes', 'type_name': 'hostdev', 'hostdev_address': {'type_name': 'pci', 'attrs': vf_pci_addr}} 8 | err_msg = "in use by driver" 9 | 10 | variants test_scenario: 11 | - start_2nd_vm: 12 | dev_type2 = hostdev_interface 13 | iface_dict2 = ${iface_dict} 14 | - assigned_VF_to_host: 15 | - hotplug: 16 | dev_type2 = "hostdev_device" 17 | hostdev_dict2 = {'mode': 'subsystem', 'type': 'pci', 'source': {'untyped_address': vf_pci_addr}, 'managed': 'yes'} 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/svirt/default_dac_check.cfg: -------------------------------------------------------------------------------- 1 | - default_dac_check: 2 | type = default_dac_check 3 | start_vm = no 4 | variants: 5 | - hugepage_file: 6 | umask = "027" 7 | huge_pages = "yes" 8 | check_type = "hugepage_file" 9 | s390-virtio: 10 | kvm_module_parameters = "hpage=1" 11 | page_size = 1024 12 | - default_dir: 13 | check_type = "default_dir" 14 | - socket_file: 15 | check_type = "socket_file" 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/svirt/label_restore_rules/label_restore_rules_on_failed_start.cfg: -------------------------------------------------------------------------------- 1 | - svirt.label_restore_rules.on_failed_start: 2 | type = label_restore_rules_on_failed_start 3 | start_vm = "no" 4 | error_msg = "SELinux label on.*already in use" 5 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/svirt/libvirt_keywrap.cfg: -------------------------------------------------------------------------------- 1 | - libvirt_keywrap: 2 | type = libvirt_keywrap 3 | start_vm = no 4 | only s390-virtio 5 | variants: 6 | - default: 7 | default = yes 8 | expect_token = yes 9 | - aes_off: 10 | default = no 11 | keyname = aes 12 | keystate = off 13 | expect_token = no 14 | 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/svirt/qemu_conf/svirt_qemu_security_confined.cfg: -------------------------------------------------------------------------------- 1 | - svirt.qemu_conf.security_confined: 2 | type = svirt_qemu_security_confined 3 | start_vm = "no" 4 | variants: 5 | - default_confined_0: 6 | qemu_conf_security_default_confined = "0" 7 | - default_confined_1: 8 | qemu_conf_security_default_confined = "1" 9 | variants: 10 | - require_confined_0: 11 | qemu_conf_security_require_confined = "0" 12 | - require_confined_1: 13 | qemu_conf_security_require_confined = "1" 14 | variants: 15 | - seclabel_none: 16 | seclabel_attr_type = "none" 17 | - @default: 18 | variants: 19 | - positive_test: 20 | only seclabel_none..require_confined_0, default..default_confined_1 21 | - negative_test: 22 | only seclabel_none..require_confined_1 23 | status_error = yes 24 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/svirt/shared_storage/svirt_nfs_disk.cfg: -------------------------------------------------------------------------------- 1 | - svirt.shared_storage.nfs_disk: 2 | type = svirt_nfs_disk 3 | start_vm = no 4 | storage_type = nfs 5 | local_boolean_varible = 'virt_use_nfs' 6 | setup_local_nfs = "yes" 7 | nfs_mount_src = "/var/lib/avocado/data/avocado-vt/images" 8 | nfs_mount_dir = "/var/lib/libvirt/nfs_dir" 9 | nfs_mount_options = "rw" 10 | export_ip = "*" 11 | export_dir = "/var/lib/avocado/data/avocado-vt/images" 12 | status_error = "yes" 13 | variants: 14 | - root_squash: 15 | export_options= "rw,root_squash" 16 | - no_root_squash: 17 | export_options= "rw,no_root_squash,sync" 18 | variants: 19 | - virt_use_nfs_on: 20 | no_root_squash: 21 | status_error = "no" 22 | - virt_use_nfs_off: 23 | only no_root_squash 24 | local_boolean_value = "off" 25 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/svirt/umask_value/svirt_umask_files_accessed_by_qemu.cfg: -------------------------------------------------------------------------------- 1 | - svirt.umask.files_accessed_by_qemu: 2 | type = svirt_umask_files_accessed_by_qemu 3 | start_vm = "no" 4 | umask_value = '027' 5 | mem_backing_attrs = {'hugepages': {}} 6 | s390-virtio: 7 | kvm_module_parameters = "hpage=1" 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_attach_passthrough_no_bus.cfg: -------------------------------------------------------------------------------- 1 | - virsh.attach_passthrough_no_bus: 2 | virt_test_type = libvirt 3 | type = virsh_attach_passthrough_no_bus 4 | start_vm = no 5 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_create_lxc.cfg: -------------------------------------------------------------------------------- 1 | - virsh.create_lxc: 2 | type = virsh_create_lxc 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | connect_uri = "lxc:///" 6 | vms = "lxc_test_vm1" 7 | variants: 8 | - with_passfds: 9 | create_lxc_fds_options = "--pass-fds" 10 | variants: 11 | - with_none: 12 | create_lxc_other_options = "" 13 | - with_console: 14 | create_lxc_other_options = "--console" 15 | - with_autodestroy_console: 16 | create_lxc_other_options = "--autodestroy --console" 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_domblkerror.cfg: -------------------------------------------------------------------------------- 1 | - virsh.domblkerror: 2 | type = virsh_domblkerror 3 | start_vm = "no" 4 | take_regular_screendumps = "no" 5 | variants: 6 | - undefinded_error: 7 | domblkerror_error_type = "unspecified error" 8 | domblkerror_img_size = "1G" 9 | - nospace_error: 10 | domblkerror_error_type = "no space" 11 | domblkerror_img_size = "12M" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_domfsfreeze_domfsthaw.cfg: -------------------------------------------------------------------------------- 1 | - virsh.domfsfreeze_domfsthaw: 2 | type = virsh_domfsfreeze_domfsthaw 3 | start_vm = "yes" 4 | take_regular_screendumps = "no" 5 | variants: 6 | - positive: 7 | variants: 8 | - normal: 9 | - with_mountpoint: 10 | mountpoint = "/" 11 | - negative: 12 | variants: 13 | - no_agent_channel: 14 | prepare_channel = no 15 | start_agent = no 16 | - no_agent: 17 | start_agent = no 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_managedsave_restore.cfg: -------------------------------------------------------------------------------- 1 | - virsh.managedsave_restore: 2 | type = virsh_managedsave_restore 3 | start_vm = yes 4 | variants: 5 | - managedsaved: 6 | vm_managedsaved = yes 7 | - not_managedsaved: 8 | only no_file 9 | vm_managedsaved = no 10 | variants: 11 | - no_file: 12 | file_state = nonexist 13 | - corrupt_file: 14 | file_state = corrupt 15 | expected_log = "Ignoring incomplete managed state" 16 | - occupied_file: 17 | file_state = occupied 18 | need_vm2 = yes 19 | expected_cmd_err = "Requested operation is not valid" 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_managedsave_special_name.cfg: -------------------------------------------------------------------------------- 1 | - virsh.managedsave_special_name: 2 | type = "virsh_managedsave_special_name" 3 | start_vm = "no" 4 | variants: 5 | - non_acsii: 6 | vmname = kīмсhīkīмсhīkīмсhī-∨м 7 | name_display = 'k\xff\xff\xff\xff\xff\xffh\xff\xffk\xff\xff\xff\xff\xff\xffh\xff\xffk\xff\xff\xff\xff\xff\xffh\xff\xff-\xff\xff\xff\xff\xff' 8 | - long: 9 | vmname = 12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012 10 | name_display = ${vmname} 11 | 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_managedsave_undefine.cfg: -------------------------------------------------------------------------------- 1 | - virsh.managedsave_undefine: 2 | type = "virsh_managedsave_undefine" 3 | start_vm = "yes" 4 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_qemu_agent_command_fs.cfg: -------------------------------------------------------------------------------- 1 | - virsh.qemu_agent_command_fs: 2 | type = virsh_qemu_agent_command_fs 3 | start_vm = "no" 4 | kill_vm = "yes" 5 | kill_vm_befor_test = "yes" 6 | status_cmd = "{"execute":"guest-fsfreeze-status"}" 7 | freeze_cmd = "{"execute":"guest-fsfreeze-freeze"}" 8 | thaw_cmd = "{"execute":"guest-fsfreeze-thaw"}" 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_qemu_attach.cfg: -------------------------------------------------------------------------------- 1 | - virsh.qemu_attach: 2 | type = virsh_qemu_attach 3 | vm_type = "default" 4 | vms = '' 5 | start_vm = no 6 | not_preprocess = "yes" 7 | variants: 8 | - normal_test: 9 | status_error = "no" 10 | - error_test: 11 | status_error = "yes" 12 | variants: 13 | - invalid_pid: 14 | pid = "invalid_pid" 15 | - invalid_pid1: 16 | pid = "99999" 17 | - none_pid: 18 | pid = "" 19 | - invalid_options: 20 | options = "--xyz" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_save_image_define.cfg: -------------------------------------------------------------------------------- 1 | - virsh.save_image_define: 2 | type = virsh_save_image_define 3 | vm_save = "vm.save" 4 | kill_vm_on_error = "no" 5 | xml_before = "" 6 | xml_after = "" 7 | s390-virtio: 8 | xml_before = "" 9 | xml_after = "" 10 | variants: 11 | - running: 12 | restore_state = "running" 13 | - paused: 14 | restore_state = "paused" 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_save_image_edit.cfg: -------------------------------------------------------------------------------- 1 | - virsh.save_image_edit: 2 | type = virsh_save_image_edit 3 | vm_save = "vm.save" 4 | kill_vm_on_error = "no" 5 | take_regular_screendumps = "no" 6 | xml_before = "" 7 | xml_after = "" 8 | s390-virtio: 9 | xml_before = "" 10 | xml_after = "" 11 | variants: 12 | - no_option: 13 | restore_state = "running" 14 | - running: 15 | restore_state = "running" 16 | - paused: 17 | restore_state = "paused" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_set_get_user_sshkeys.cfg: -------------------------------------------------------------------------------- 1 | - virsh.set_get_user_sshkeys: 2 | type = virsh_set_get_user_sshkeys 3 | start_vm = yes 4 | host_pre_added_user = "host_pre_added_user" 5 | host_test_user = "host_test_user" 6 | guest_user = "guest_user" 7 | guest_user_passwd = "123456" 8 | variants: 9 | - add_keys: 10 | - remove_keys: 11 | option = "--remove" 12 | - reset_keys: 13 | option = "--reset" 14 | variants: 15 | - with_sshkey_file: 16 | key_file = "yes" 17 | - without_sshkey_file: 18 | key_file = "no" 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/domain/virsh_sosreport.cfg: -------------------------------------------------------------------------------- 1 | - io-github-autotest-libvirt.virsh.sosreport: 2 | virt_test_type = libvirt 3 | provider = io-github-autotest-libvirt 4 | type = virsh_sosreport 5 | take_regular_screendumps = "no" 6 | variants: 7 | - generate_sos_report: 8 | generate_sos_report = "yes" 9 | - access_pool_stats: 10 | access_pool_stats = "yes" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/filter/virsh_nwfilter_dumpxml.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nwfilter_dumpxml: 2 | type = virsh_nwfilter_dumpxml 3 | main_vm = "" 4 | vms = "" 5 | start_vm = no 6 | dumpxml_filter_name = 'no-mac-spoofing' 7 | variants: 8 | - normal_test: 9 | status_error = "no" 10 | variants: 11 | - non_acl: 12 | - acl_test: 13 | setup_libvirt_polkit = "yes" 14 | unprivileged_user = "EXAMPLE" 15 | virsh_uri = "qemu:///system" 16 | - error_test: 17 | status_error = "yes" 18 | variants: 19 | - invalid_option: 20 | dumpxml_options_ref = "--xyz" 21 | - none_option: 22 | dumpxml_filter_name = "" 23 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/filter/virsh_nwfilter_edit.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nwfilter_edit: 2 | type = virsh_nwfilter_edit 3 | main_vm = "" 4 | vms = "" 5 | start_vm = no 6 | edit_filter_name = "no-mac-spoofing" 7 | edit_filter_ref = "name" 8 | edit_priority = "-100" 9 | variants: 10 | - positive_test: 11 | status_error = "no" 12 | variants: 13 | - use_name: 14 | - use_uuid: 15 | edit_filter_ref = "uuid" 16 | - negative_test: 17 | status_error = "yes" 18 | variants: 19 | - invalid_name: 20 | edit_filter_ref = "invalid_filter_name" 21 | - invalid_extra_option: 22 | edit_filter_ref = "invalid_filter_option" 23 | edit_extra_option = "--xyz" 24 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/filter/virsh_nwfilter_list.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nwfilter_list: 2 | type = virsh_nwfilter_list 3 | main_vm = "" 4 | vms = "" 5 | start_vm = no 6 | variants: 7 | - normal_test: 8 | status_error = "no" 9 | variants: 10 | - non_acl: 11 | - acl_test: 12 | setup_libvirt_polkit = "yes" 13 | unprivileged_user = "EXAMPLE" 14 | virsh_uri = "qemu:///system" 15 | - error_test: 16 | status_error = "yes" 17 | list_options_ref = "--xyz" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_capabilities.cfg: -------------------------------------------------------------------------------- 1 | - virsh.capabilities: 2 | type = virsh_capabilities 3 | vms = '' 4 | start_vm = no 5 | variants: 6 | - no_option: 7 | virsh_cap_options = "" 8 | status_error = "no" 9 | - unexpect_option: 10 | virsh_cap_options = "xyz" 11 | status_error = "yes" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_hostname.cfg: -------------------------------------------------------------------------------- 1 | - virsh.hostname: install setup image_copy unattended_install.cdrom 2 | type = virsh_hostname 3 | vms = '' 4 | start_vm = no 5 | libvirtd = "on" 6 | variants: 7 | - no_option: 8 | virsh_hostname_options = "" 9 | status_error = "no" 10 | - unexpect_option: 11 | virsh_hostname_options = " xyz" 12 | status_error = "yes" 13 | - with_libvirtd_stop: 14 | virsh_hostname_options = "" 15 | status_error = "yes" 16 | libvirtd = "off" 17 | - remote_connect: 18 | start_vm = yes 19 | virsh_hostname_options = "" 20 | remote_uri = "qemu+ssh://${remote_ip}/system" 21 | status_error = "no" 22 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_nodecpumap.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nodecpumap: 2 | type = virsh_nodecpumap 3 | vms = '' 4 | start_vm = no 5 | variants: 6 | - no_option: 7 | virsh_node_options = "" 8 | status_error = "no" 9 | - pretty_option: 10 | virsh_node_options = "--pretty" 11 | status_error = "no" 12 | - unexpect_option: 13 | virsh_node_options = "xyz" 14 | status_error = "yes" 15 | - cpu_off_on: 16 | cpu_off_on_test = "yes" 17 | virsh_node_options = "" 18 | status_error = "no" 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_nodecpustats.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nodecpustats: 2 | type = virsh_nodecpustats 3 | vms = '' 4 | start_vm = no 5 | # this is number of iterations the command will be executed and 6 | # the actual delta values will be listed at the end of testcase for 7 | # all iterations 8 | inner_test_iterations = 1 9 | libvirtd = "on" 10 | variants test_case: 11 | - all_options_all_cpus: 12 | - disable_enable_cpu: 13 | err_msg = 'Invalid cpuNum in virHostCPUGetStatsLinux' 14 | - with_libvirtd_stop: 15 | libvirtd = "off" 16 | - invalid_option: 17 | virsh_cpunodestats_options = "--xyz" 18 | - invalid_cpuNum: 19 | invalid_cpunum = "yes" 20 | err_msg = 'Invalid cpuNum in virHostCPUGetStatsLinux|malformed or out of range' 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_nodeinfo.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nodeinfo: 2 | type = virsh_nodeinfo 3 | vms = '' 4 | start_vm = no 5 | virsh_node_options = "" 6 | status_error = "no" 7 | check_frequency = yes 8 | aarch64: 9 | check_frequency = no 10 | variants test_case: 11 | - no_option: 12 | libvirtd = "on" 13 | - disable_enable_vcpu: 14 | disable_enable_vcpu = "yes" 15 | - unexpect_option: 16 | virsh_node_options = "xyz" 17 | status_error = "yes" 18 | libvirtd = "on" 19 | - with_libvirtd_stop: 20 | status_error = "yes" 21 | libvirtd = "off" 22 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_sysinfo.cfg: -------------------------------------------------------------------------------- 1 | - virsh.sysinfo: 2 | type = virsh_sysinfo 3 | vms = '' 4 | start_vm = no 5 | readonly = "no" 6 | variants: 7 | - normal_test: 8 | status_error = "no" 9 | virsh_sysinfo_options = "" 10 | variants: 11 | - no_option: 12 | - readonly_option: 13 | readonly = "yes" 14 | - error_test: 15 | status_error = "yes" 16 | virsh_sysinfo_options = "xyz" 17 | variants: 18 | - without_readonly: 19 | - with_readonly: 20 | readonly = "yes" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/host/virsh_version.cfg: -------------------------------------------------------------------------------- 1 | - virsh.version: install setup image_copy unattended_install.cdrom 2 | type = virsh_version 3 | vms = '' 4 | start_vm = no 5 | virsh_version_options = "" 6 | status_error = "no" 7 | variants: 8 | - no_option: 9 | - unexpect_option: 10 | virsh_version_options = "xyz" 11 | status_error = "yes" 12 | - with_libvirtd_stop: 13 | status_error = "yes" 14 | libvirtd = "off" 15 | - daemon_option: 16 | virsh_version_options = "--daemon" 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/interface/virsh_iface_edit.cfg: -------------------------------------------------------------------------------- 1 | - virsh.iface_edit: 2 | type = virsh_iface_edit 3 | vms = "" 4 | main_vm = "" 5 | start_vm = no 6 | variants: 7 | - positive_test: 8 | status_error = "no" 9 | variants: 10 | - loopback_iface: 11 | iface_name = "lo" 12 | - negative_test: 13 | status_error = "yes" 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/interface/virsh_iface_list.cfg: -------------------------------------------------------------------------------- 1 | - virsh.iface_list: 2 | type = virsh_iface_list 3 | status_error = "no" 4 | start_vm = "no" 5 | variants: 6 | - iface_list: 7 | test_list = "yes" 8 | variants: 9 | - inactive: 10 | opt = '--inactive' 11 | - active: 12 | opt = '' 13 | - all: 14 | opt = '--all' 15 | - iface_dumpxml: 16 | test_info = "yes" 17 | - iface_mac: 18 | test_info = "yes" 19 | test_mac = "yes" 20 | - iface_name: 21 | test_info = "yes" 22 | test_name = "yes" 23 | 24 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/monitor/virsh_backing_chain_domblkinfo.cfg: -------------------------------------------------------------------------------- 1 | - virsh.backing_chain.domblkinfo: 2 | type = virsh_backing_chain_domblkinfo 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | target_bus = "virtio" 6 | target_format = "qcow2" 7 | device_type = "disk" 8 | status_error = "no" 9 | define_error = "no" 10 | variants: 11 | - backingchain_operate: 12 | target_dev = "vdb" 13 | type_name = "file" 14 | virt_disk_device_source = "/var/lib/libvirt/images/domblkinfo.img" 15 | block_commit_option = "--active --shallow --wait --verbose --pivot --async" 16 | block_pull_option = "--wait --verbose --async" 17 | block_copy_option = "--blockdev --wait --verbose --async --pivot" 18 | variants: 19 | - coldplug: 20 | virt_device_hotplug = "no" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/nodedev/crypto_nodedev_create_destroy.cfg: -------------------------------------------------------------------------------- 1 | - virsh.crypto_nodedev_create_destroy: 2 | type = crypto_nodedev_create_destroy 3 | start_vm = "no" 4 | take_regular_screendumps = "no" 5 | only s390-virtio 6 | func_supported_since_libvirt_ver = (7, 0, 0) 7 | variants: 8 | - positive: 9 | check_device_attributes = yes 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/nodedev/virsh_nodedev_dumpxml_chain.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nodedev_dumpxml.chain: 2 | type = virsh_nodedev_dumpxml_chain 3 | vms = "" 4 | main_vm = "" 5 | start_vm = "no" 6 | device_ids = 0.0.1234,0.0.4567 7 | variants: 8 | - device_type_dasd: 9 | only s390-virtio 10 | checks = '[{"capability/block": r"/dev/dasd"},{"driver/name": r"dasd"},{"driver/name": r"(io_subchannel|vfio_ccw)"}]' 11 | chain_start_device_pattern = "block_dasd" 12 | - device_type_css: 13 | only s390-virtio 14 | checks = '[{"capability/channel_dev_addr/devno": r"0x[\w\d]{4}"}]' 15 | chain_start_device_pattern = "css_0_0_[\w\d]{4}" 16 | 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/nodedev/virsh_nodedev_persistence_mdev.cfg: -------------------------------------------------------------------------------- 1 | - virsh.nodedev_persistence.mdev: 2 | type = virsh_nodedev_persistence_mdev 3 | start_vm = "no" 4 | func_supported_since_libvirt_ver = (7, 6, 0) 5 | variants: 6 | - ccw: 7 | only s390-virtio 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/pool/virsh_pool_capabilities.cfg: -------------------------------------------------------------------------------- 1 | - virsh.pool_capabilities: 2 | type = virsh_pool_capabilities 3 | vms = '' 4 | start_vm = no 5 | status_error = "no" 6 | variants: 7 | - no_option: 8 | virsh_pool_cap_options = "" 9 | - unexpect_option: 10 | virsh_pool_cap_options = "xyz" 11 | status_error = "yes" 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/pool/virsh_pool_create_as.cfg: -------------------------------------------------------------------------------- 1 | - virsh.pool_create_as: 2 | type = virsh_pool_create_as 3 | vms = "" 4 | start_vm = no 5 | # type in [ 'dir', 'fs', 'netfs', 'disk', 'iscsi', 'logical' ] 6 | pool_type = dir 7 | pool_name = custom_pool_name 8 | pool_target = /mnt 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/secret/virsh_secret_dumpxml.cfg: -------------------------------------------------------------------------------- 1 | - virsh.secret_dumpxml: 2 | type = virsh_secret_dumpxml 3 | vms = "" 4 | main_vm = "" 5 | start_vm = no 6 | status_error = "no" 7 | encode_video_files = "no" 8 | skip_image_processing = "yes" 9 | take_regular_screendumps = "no" 10 | variants: 11 | - normal_test: 12 | secret_ref = "secret_valid_uuid" 13 | variants: 14 | - non_acl: 15 | - acl_test: 16 | setup_libvirt_polkit = "yes" 17 | unprivileged_user = "EXAMPLE" 18 | virsh_uri = "qemu:///system" 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/snapshot/virsh_snapshot.cfg: -------------------------------------------------------------------------------- 1 | - virsh.snapshot: 2 | type = virsh_snapshot 3 | # Switching screendumps off as creating can pause guest for long time 4 | # and cause error messages 5 | take_regular_screendumps = "no" 6 | variants: 7 | - live: 8 | snapshot_shutdown = "no" 9 | variants: 10 | - halt: 11 | snapshot_halt = "yes" 12 | - no_halt: 13 | snapshot_halt = "no" 14 | - offline: 15 | snapshot_shutdown = "yes" 16 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virsh_cmd/virsh_qemu_cmdline_core.cfg: -------------------------------------------------------------------------------- 1 | - io-github-autotest-libvirt.virsh.qemu_cmdline_core: 2 | virt_test_type = libvirt 3 | provider = io-github-autotest-libvirt 4 | type = virsh_qemu_cmdline_core 5 | variants: 6 | - qemu_cmdline_more_cpus: 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_admin/management/virt_admin_client_disconnect.cfg: -------------------------------------------------------------------------------- 1 | - virt_admin.client_disconnect: 2 | type = virt_admin_client_disconnect 3 | start_vm = no 4 | num_clients = 2 5 | server_cn = "ENTER.YOUR.SERVER_CN" 6 | client_cn = "ENTER.YOUR.CLIENT_CN" 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_admin/management/virt_admin_server_update_tls.cfg: -------------------------------------------------------------------------------- 1 | - virt_admin.server_update_tls: 2 | type = virt_admin_server_update_tls 3 | start_vm = no 4 | server_cn = "ENTER.YOUR.SERVER_CN" 5 | client_cn = "ENTER.YOUR.CLIENT_CN" 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_admin/monitor/virt_admin_srv_clients_info.cfg: -------------------------------------------------------------------------------- 1 | - virt_admin.srv_clients_info: 2 | type = virt_admin_srv_clients_info 3 | start_vm = no 4 | max_clients = 6000 5 | max_anonymous_clients = 30 6 | num_clients = 3 7 | server_cn = "ENTER.YOUR.SERVER_CN" 8 | client_cn = "ENTER.YOUR.CLIENT_CN" 9 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_admin/monitor/virt_admin_srv_list.cfg: -------------------------------------------------------------------------------- 1 | - virt_admin.srv_list: 2 | type = virt_admin_srv_list 3 | start_vm = no 4 | variants: 5 | - admin: 6 | server_name = "admin" 7 | - libvirtd: 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_admin/monitor/virt_admin_srv_threadpool_info.cfg: -------------------------------------------------------------------------------- 1 | - virt_admin.srv_threadpool_info: 2 | type = virt_admin_srv_threadpool_info 3 | start_vm = no 4 | variants: 5 | - libvirtd: 6 | min_workers = 6 7 | max_workers = 21 8 | prio_workers = 7 9 | - admin: 10 | server_name = "admin" 11 | admin_min_workers = 2 12 | admin_max_workers = 6 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_cmd/virt_clone.cfg: -------------------------------------------------------------------------------- 1 | - virt_clone: 2 | virt_test_type = libvirt 3 | type = virt_clone 4 | start_vm = no 5 | kill_vm_before_test = yes 6 | dest_vm = "test_clone" 7 | dest_image = "clone_image" 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_cmd/virt_top.cfg: -------------------------------------------------------------------------------- 1 | - virt_top: 2 | virt_test_type = libvirt 3 | type = virt_top 4 | variants: 5 | - positive_test: 6 | status_error = "no" 7 | variants: 8 | - stream_option: 9 | output_file = "virt_top_output" 10 | options = "--stream" 11 | - negative_test: 12 | status_error = "yes" 13 | variants: 14 | - display_option: 15 | options = "-0" 16 | - invalid_option: 17 | options = "--xyz" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virt_cmd/virt_what.cfg: -------------------------------------------------------------------------------- 1 | - virt_what: 2 | virt_test_type = libvirt 3 | type = virt_what 4 | variants: 5 | - lxc_fact: 6 | only lxc 7 | fact = "lxc" 8 | - kvm_fact: 9 | # Currently, we are assuming qemu is used only 10 | # for kvm guest. 11 | only qemu 12 | fact = "kvm" 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtio_transitional/virtio_transitional_serial.cfg: -------------------------------------------------------------------------------- 1 | - virtio_transitional_serial: 2 | type = virtio_transitional_serial 3 | no Windows 4 | only q35 5 | start_vm = no 6 | disk_model = "virtio-transitional" 7 | image_path = images/rhel6-x86_64-latest.qcow2 8 | guest_src_url = "http://download.libvirt.redhat.com/libvirt-CI-resources/RHEL-6.10-x86_64-latest.qcow2" 9 | set_crypto_policy = "LEGACY" 10 | variants: 11 | - @default: 12 | only virtio_transitional 13 | - with_pcie_to_pci_bridge: 14 | add_pcie_to_pci_bridge = yes 15 | variants: 16 | - virtio: 17 | virtio_model = "virtio" 18 | - virtio_transitional: 19 | no s390-virtio 20 | virtio_model = "virtio-transitional" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_device/input_devices_plug_unplug.cfg: -------------------------------------------------------------------------------- 1 | - virtual_devices.input_devices_plug_unplug: 2 | type = input_devices_plug_unplug 3 | take_regular_screendumps = no 4 | start_vm = no 5 | 6 | variants multiple_input_types: 7 | - tablet_mouse_keyboard: 8 | device_types = ['tablet', 'mouse', 'keyboard'] 9 | variants: 10 | - bus_usb: 11 | no s390-virtio 12 | bus_type = usb 13 | - bus_virtio: 14 | bus_type = virtio 15 | variants: 16 | - hot: 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_device/sound_device.cfg: -------------------------------------------------------------------------------- 1 | - virtual_devices.sound_device: 2 | type = sound_device 3 | start_vm = no 4 | variants: 5 | - no_codec_type: 6 | - codec_type_duplex: 7 | only sound_model_ich6 sound_model_ich9 8 | codec_type = duplex 9 | - codec_type_micro: 10 | only sound_model_ich6 sound_model_ich9 11 | codec_type = micro 12 | variants: 13 | - sound_model_ac97: 14 | no pseries 15 | sound_model = ac97 16 | - sound_model_ich6: 17 | no pseries 18 | sound_model = ich6 19 | - sound_model_ich9: 20 | no pseries 21 | slot_value = "0x1b" 22 | sound_model = ich9 23 | variants: 24 | - positive_test: 25 | status_error = "no" 26 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/vhostvdpa_block_backend_type/define_start_vm_with_multi_vhostvdpa_backend_disks.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.vhostvdpa.define_start_multi_disks: 2 | type = define_start_vm_with_multi_vhostvdpa_backend_disks 3 | start_vm = no 4 | simulator = "yes" 5 | func_supported_since_libvirt_ver = (9, 10, 0) 6 | only x86_64 7 | 8 | disk_vdpa_attrs = {"source": {"attrs": {"dev": "/dev/vhost-vdpa-0"}}, "type_name": "vhostvdpa"} 9 | disk2_vdpa_attrs = {"source": {"attrs": {"dev": "/dev/vhost-vdpa-1"}}, "type_name": "vhostvdpa"} 10 | disk_driver = {"driver": {"name": "qemu", "type": "raw", "cache": "none", "io": "threads", "copy_on_read": "on", "discard": "unmap", "detect_zeroes": "on"}} 11 | disk_attrs = {"device": "disk", "target": {"dev": "vdb", "bus": "virtio"}, **${disk_vdpa_attrs}, **${disk_driver}} 12 | disk2_attrs = {"device": "disk", "target": {"dev": "vdc", "bus": "virtio"}, **${disk2_vdpa_attrs}, **${disk_driver}} 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/vhostvdpa_block_backend_type/define_start_vms_with_same_vhostvdpa_backend_disk.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.vhostvdpa.define_start_multi_vms_with_same_disk: 2 | type = define_start_vms_with_same_vhostvdpa_backend_disk 3 | start_vm = no 4 | simulator = "yes" 5 | func_supported_since_libvirt_ver = (9, 10, 0) 6 | only x86_64 7 | 8 | vms = avocado-vt-vm1 vm2 9 | disk_vdpa_attrs = {"source": {"attrs": {"dev": "/dev/vhost-vdpa-0"}}, "type_name": "vhostvdpa"} 10 | disk_driver = {"driver": {"name": "qemu", "type": "raw", "cache": "none", "io": "threads", "copy_on_read": "on", "discard": "unmap", "detect_zeroes": "on"}} 11 | disk_attrs = {"device": "disk", "target": {"dev": "vdb", "bus": "virtio"}, **${disk_vdpa_attrs}, **${disk_driver}} 12 | err_msg = "vdpa device.*Device or resource busy" 13 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/vhostvdpa_block_backend_type/hotplug_vhostvdpa_backend_disk.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.vhostvdpa.hotplug: 2 | type = hotplug_vhostvdpa_backend_disk 3 | start_vm = no 4 | simulator = "yes" 5 | disk_vdpa_attrs = {"source": {"attrs": {"dev": "/dev/vhost-vdpa-0"}}, "type_name": "vhostvdpa"} 6 | func_supported_since_libvirt_ver = (9, 10, 0) 7 | only x86_64 8 | 9 | variants cache_mode: 10 | - none: 11 | - directsync: 12 | - no_specified_value: 13 | status_error = "yes" 14 | 15 | disk_driver = {"driver": {"name": "qemu", "type": "raw", "cache": "${cache_mode}", "io": "threads", "copy_on_read": "on", "discard": "unmap", "detect_zeroes": "on"}} 16 | disk_attrs = {"device": "disk", "target": {"dev": "vdb", "bus": "virtio"}, **${disk_vdpa_attrs}, **${disk_driver}} 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/vhostvdpa_block_backend_type/nodedev_vhostvdpa_disk.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.vhostvdpa.nodedev: 2 | type = nodedev_vhostvdpa_disk 3 | start_vm = no 4 | simulator = "yes" 5 | func_supported_since_libvirt_ver = (9, 10, 0) 6 | only x86_64 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/vhostvdpa_block_backend_type/vm_lifecycle_vhostvdpa_backend_disk.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.vhostvdpa.lifecycle: 2 | type = vm_lifecycle_vhostvdpa_backend_disk 3 | start_vm = no 4 | simulator = "yes" 5 | disk_vdpa_attrs = {"source": {"attrs": {"dev": "/dev/vhost-vdpa-0"}}, "type_name": "vhostvdpa"} 6 | disk_driver = {"driver": {"name": "qemu", "type": "raw", "cache": "none", "io": "threads", "copy_on_read": "on", "discard": "unmap", "detect_zeroes": "on"}} 7 | disk_attrs = {"device": "disk", "target": {"dev": "vdb", "bus": "virtio"}, **${disk_vdpa_attrs}, **${disk_driver}} 8 | func_supported_since_libvirt_ver = (9, 10, 0) 9 | only x86_64 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_audit_log_disk.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.audit_log_disk: 2 | type = virtual_disks_audit_log_disk 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | target_bus = "virtio" 6 | target_format = "qcow2" 7 | target_dev = "vdb" 8 | type_name = "file" 9 | device_type = "disk" 10 | status_error = "no" 11 | variants: 12 | - start_vm: 13 | variants: 14 | - file_backing: 15 | virt_disk_device_source = "/var/lib/libvirt/images/audit_log_disk.qcow2" 16 | variants: 17 | - hotplug: 18 | virt_device_hotplug = "yes" 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_backingstore_disk.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.backingstore_disk: 2 | type = virtual_disks_backingstore_disk 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | target_bus = "virtio" 6 | target_format = "qcow2" 7 | target_dev = "vdb" 8 | type_name = "volume" 9 | device_type = "disk" 10 | variants: 11 | - start_vm: 12 | variants: 13 | - volume_backingstore: 14 | pool_name = "images" 15 | image_base_name = "base.qcow2" 16 | overlay_image_name = "overlay.qcow2" 17 | virt_disk_device_source_base = "/var/lib/libvirt/images/${image_base_name}" 18 | virt_disk_device_source_overlay = "/var/lib/libvirt/images/${overlay_image_name}" 19 | backingstore_type = "volume" 20 | variants: 21 | - coldplug: 22 | virt_device_coldplug = "yes" 23 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_dasd.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.dasd: 2 | type = virtual_disks_dasd 3 | only s390-virtio 4 | devid = 5 | variants: 6 | - read_native_partition_table: 7 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_datastore.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.datastore: 2 | type = virtual_disks_datastore 3 | start_vm = no 4 | target_disk = "vdb" 5 | disk_type = "file" 6 | func_supported_since_libvirt_ver = (10, 10, 0) 7 | data_file_option = " -o data_file=%s" 8 | disk_dict = {"type_name":"${disk_type}", "target":{"dev": "${target_disk}", "bus": "virtio"}, "driver": {"name": "qemu", "type":"qcow2"}} 9 | variants: 10 | - start_vm: 11 | - hotplug_disk: 12 | with_hotplug = "yes" 13 | variants datastore_type: 14 | - with_file: 15 | - with_block: 16 | with_block = "yes" 17 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_io_tuning.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.io_tuning: 2 | type = virtual_disks_io_tuning 3 | status_error = "no" 4 | start_vm = "no" 5 | variants tuning_type: 6 | - io_uring: 7 | func_supported_since_libvirt_ver = (9, 3, 0) 8 | variants test_scenario: 9 | - normal_start: 10 | driver_attribute = {'name': "qemu", 'type': "qcow2", 'io': 'io_uring'} 11 | source_file_path = "/var/lib/libvirt/images/io_uring.qcow2" 12 | target_device = "sdb" 13 | variants plug_mode: 14 | - coldplug: 15 | coldplug = "yes" 16 | - hotplug: 17 | coldplug = "no" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_nvme.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.nvme: 2 | type = virtual_disks_nvme 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | target_format = "raw" 6 | type_name = "nvme" 7 | driver_type = 'raw' 8 | device_type = "disk" 9 | target_dev = "vdb" 10 | target_bus = "virtio" 11 | status_error = "no" 12 | define_error = "no" 13 | pkgs_host = "pciutils" 14 | variants: 15 | - attach_nvme: 16 | source_attrs = "{'type':'pci', 'managed':'yes', 'namespace':'1', 'index':'1'}" 17 | variants: 18 | - coldplug: 19 | virt_device_hotplug = "no" 20 | - hotplug: 21 | virt_device_hotplug = "yes" 22 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_optional_startuppolicy.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.optional_startuppolicy_config: 2 | type = virtual_disks_optional_startuppolicy 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | target_bus = "virtio" 6 | target_format = "raw" 7 | device_type = "disk" 8 | driver_type = 'raw' 9 | status_error = "no" 10 | define_error = "no" 11 | variants: 12 | - start_guest: 13 | variants: 14 | - set_optional_startuppolicy: 15 | target_dev = "vdb vdc vdd" 16 | type_name = "file block volume" 17 | startup_policy_value = "optional" 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_scsi3_persistent_reservation.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.scsi3_persistent_reservation: 2 | type = virtual_disks_scsi3_persistent_reservation 3 | take_regular_screendumps = "no" 4 | start_vm = "no" 5 | virt_disk_vm_ref = "name" 6 | virt_disk_device = "lun" 7 | iscsi_host = "127.0.0.1" 8 | iscsi_port = "3260" 9 | emulated_image = "iscsi" 10 | iscsi_image_size = "1G" 11 | virt_disk_device_target = "sdb" 12 | virt_disk_device_format = "raw" 13 | virt_disk_device_bus = "scsi" 14 | virt_disk_device_type = "block" 15 | variants: 16 | - hotplug_disk: 17 | hotplug_disk = "yes" 18 | - coldplug_disk: 19 | hotplug_disk = "no" 20 | variants: 21 | - reservations_managed: 22 | reservations_managed = "yes" 23 | - reservations_not_managed: 24 | reservations_managed = "no" 25 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_disks/virtual_disks_snapshot_blockresize.cfg: -------------------------------------------------------------------------------- 1 | - virtual_disks.snapshot_blockresize: 2 | type = virtual_disks_snapshot_blockresize 3 | take_regular_screendumps = "no" 4 | start_vm = "yes" 5 | snapshot_take = "4" 6 | variants: 7 | - backing_chain_element: 8 | snapshot_name = "blockresize" 9 | virt_disk_device_target = "vda" 10 | size = "1024" 11 | variants: 12 | - positive_test: 13 | status_error = "no" 14 | 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_interface/interface_update_device_negative.cfg: -------------------------------------------------------------------------------- 1 | - iface.update_device.negative: 2 | type = interface_update_device_negative 3 | start_vm = "yes" 4 | virsh_opt = "no_option" 5 | variants: 6 | - driver: 7 | variants: 8 | - rss: 9 | func_supported_since_libvirt_ver = (8, 4, 0) 10 | status_error = "yes" 11 | error_msg = "device driver attributes" 12 | pre_iface_dict = {'driver': {'driver_attr': {'queues': '4'}}} 13 | iface_dict = {'driver': {'driver_attr': {'queues': '4', 'rss': 'on', 'rss_hash_report': 'on'}}} 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_interface/interface_update_device_offline_domain.cfg: -------------------------------------------------------------------------------- 1 | - iface.update_device.offline_domain: 2 | type = interface_update_device_offline_domain 3 | start_vm = no 4 | iface_dict = {'link_state':'down'} 5 | 6 | variants test_scenario: 7 | - opts_affect_running_vm: 8 | status_error = "yes" 9 | error_msg = "domain is not running" 10 | variants virsh_opt: 11 | - live: 12 | - live_config: 13 | - opts_affect_offline_vm: 14 | variants virsh_opt: 15 | - config: 16 | - current: 17 | - persistent: 18 | - current_exclusive: 19 | status_error = "yes" 20 | error_msg = "exclusive" 21 | variants virsh_opt: 22 | - persistent_current: 23 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/address/virtual_network_address_tftp.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.address.tftp: 2 | type = virtual_network_address_tftp 3 | start_vm = "no" 4 | func_supported_since_libvirt_ver = (8, 5, 0) 5 | network_dict = {'bridge': {'name': 'virbr1', 'stp': 'on', 'delay': '0'}, 'forward': {'mode': 'nat'}, 'ips': [{'address': '192.168.120.1', 'netmask': '255.255.255.0', 'tftp_root': tftp_root}], 'nat_port': {'start': '1024', 'end': '65535'}, 'name': 'net_boot'} 6 | error_msg = "tftpboot inaccessible: No such file or directory" 7 | dnsmasq_setting = "enable-tftp" 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/attach_detach_device/attach_user_type_iface.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.attach_device.user_type_iface: 2 | type = attach_user_type_iface 3 | start_vm = no 4 | outside_ip = 'www.redhat.com' 5 | vm_ping_outside = pass 6 | variants user_type: 7 | - root_user: 8 | test_user = root 9 | virsh_uri = 'qemu:///system' 10 | - non_root_user: 11 | test_user = USER.EXAMPLE 12 | test_passwd = PASSWORD.EXAMPLE 13 | unpr_vm_name = UNPRIVILEGED_VM.EXAMPLE 14 | virsh_uri = 'qemu+ssh://${test_user}@localhost/session' 15 | iface_attrs = {'type_name': 'user', 'mac_address': '00:11:22:33:44:55', 'model': 'virtio', 'ips': [{'family': 'ipv4', 'address': '172.17.2.0', 'prefix': '24'}, {'family': 'ipv6', 'address': '2001:db8:ac10:fd01::', 'prefix': '64'}]} 16 | expect_ipv4 = 172.17.2.15 17 | expect_ipv6 = 2001:db8:ac10:fd01: 18 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/connectivity/connectivity_check_bridge_interface_unprivileged.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.connectivity_check.bridge_interface.unprivileged: 2 | type = connectivity_check_bridge_interface_unprivileged 3 | start_vm = no 4 | timeout = 240 5 | outside_ip = 'www.redhat.com' 6 | host_iface = 7 | variants: 8 | - default: 9 | vm_ping_outside = pass 10 | vm_ping_host_public = pass 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/connectivity/connectivity_check_mcast_interface.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.connectivity_check.mcast_interface: 2 | type = connectivity_check_mcast_interface 3 | start_vm = no 4 | vms = avocado-vt-vm1 vm2 5 | mcast_addr = 230.144.17.1 6 | iface_m_attrs = {'source': {'address': '${mcast_addr}', 'port': '5558'}, 'model': 'virtio', 'type_name': 'mcast'} 7 | iface_attrs = {'source': {'network': 'default'}, 'model': 'virtio', 'type_name': 'network'} 8 | variants: 9 | - default: 10 | expect_msg = 'unicast, xmt/rcv/%loss = \d+/\d+/0%.*\n.*multicast, xmt/rcv/%loss = \d+/\d+/0%' 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/connectivity/connectivity_check_tcp_tunnel_interface.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.connectivity_check.tcp_tunnel_interface: 2 | type = connectivity_check_tcp_tunnel_interface 3 | start_vm = no 4 | vms = avocado-vt-vm1 vm2 5 | source_port = 5558 6 | vm_ip = 10.10.10.10 7 | cli_vm_ip = 10.10.10.20 8 | netmask = 24 9 | cli_iface_attrs = {'source': {'address': '127.0.0.1', 'port': '${source_port}'}, 'model': 'virtio', 'type_name': 'client'} 10 | variants source_mode: 11 | - no_source_addr: 12 | iface_attrs = {'source': {'port': '${source_port}'}, 'model': 'virtio', 'type_name': 'server'} 13 | - with_source_addr: 14 | iface_attrs = {'source': {'address': '127.0.0.1', 'port': '${source_port}'}, 'model': 'virtio', 'type_name': 'server'} 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/connectivity/connectivity_check_udp_tunnel_interface.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.connectivity_check.udp_tunnel_interface: 2 | type = connectivity_check_udp_tunnel_interface 3 | start_vm = no 4 | vms = avocado-vt-vm1 vm2 5 | source_port = 5558 6 | vm_ip = 10.10.10.10 7 | cli_vm_ip = 10.10.10.20 8 | netmask = 24 9 | source_port = 5558 10 | local_port = 6667 11 | source_attrs = {'address': '127.0.0.1', 'port': '${source_port}'} 12 | local_attrs = {'address': '127.0.0.1', 'port': '${local_port}'} 13 | iface_attrs = {'source': ${source_attrs}, 'model': 'virtio', 'source_local': ${local_attrs}, 'type_name': 'udp'} 14 | cli_iface_attrs = {'source': ${local_attrs}, 'model': 'virtio', 'source_local': ${source_attrs}, 'type_name': 'udp'} 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/domifaddr.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.domifaddr: 2 | type = domifaddr 3 | start_vm = no 4 | libvirtd_debug_file = /var/log/libvirt/libvird.log 5 | libvirtd_debug_level = 1 6 | libvirtd_debug_filters = 1:qemu 1:libvirt 4:object 4:json 4:event 1:util 7 | variants: 8 | - default: 9 | net_name = default 10 | net_ipv6_attrs = {'address': '2001:db8:ca2:2::1', 'family': 'ipv6', 'prefix': '64', 'dhcp_ranges': {'attrs': {'start': '2001:db8:ca2:2:1::10', 'end': '2001:db8:ca2:2:1::ff'}}} 11 | iface_a_attrs = {'type_name': 'bridge', 'source': {'bridge': br_name}, 'model': 'virtio'} 12 | iface_b_attrs = {'type_name': 'network', 'source': {'network': net_name}, 'model': 'virtio'} 13 | err_msg = Failed to open file '/var/lib/libvirt/dnsmasq/.*status': No such file or directory 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/driver/check_vhost_cpu_affinity_with_emulatorpin.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.check_vhost_cpu_affinity_with_emulatorpin: 2 | type = check_vhost_cpu_affinity_with_emulatorpin 3 | start_vm = no 4 | timeout = 240 5 | vcpu_set = 0 6 | vm_attrs = {'vcpu': 1, 'cputune': {'vcpupins': [{'vcpu': '0', 'cpuset': '${vcpu_set}'}], 'emulatorpin': epin_set}} 7 | iface_attrs = {'model': 'virtio'} 8 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/elements_and_attributes/element_sndbuf.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.elements_and_attributes.sndbuf: 2 | type = element_sndbuf 3 | start_vm = no 4 | timeout = 240 5 | outside_ip = 'www.redhat.com' 6 | vm_ping_outside = pass 7 | variants sndbuf: 8 | - 0: 9 | - 1600: 10 | - 1800: 11 | iface_attrs = {'type_name': 'network', 'source': {'network': 'default'}, 'model': 'virtio', 'tune': {'sndbuf': ${sndbuf}}} 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/hotplug/attach_detach_device/rollback_vdpafd_on_hotplug_failure.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.hotplug.rollback.vdpa_interface: 2 | type = rollback_vdpafd_on_hotplug_failure 3 | start_vm = no 4 | test_target = mellanox 5 | vdpa_dev = "vdpa0" 6 | iface_dict = {'source': {'dev': '/dev/vhost-vdpa-0'}, 'acpi': {'index': '1'}} 7 | iface_dict2 = {'source': {'dev': '/dev/vhost-vdpa-1'}, 'acpi': {'index': '1'}} 8 | func_supported_since_libvirt_ver = (10, 8, 0) 9 | only x86_64 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/iface_nss.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.iface_nss: 2 | type = iface_nss 3 | start_vm = "yes" 4 | net_name = "default" 5 | variants: 6 | - nss_libvirt: 7 | nss_option = "libvirt" 8 | guest_name = "nssguest" 9 | - nss_libvirt_guest: 10 | nss_option = "libvirt_guest" 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/iface_rename.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.iface_rename: 2 | type = iface_rename 3 | start_vm = "no" 4 | variants: 5 | - check_log: 6 | name_1 = "test" 7 | name_2 = "test_new" 8 | config_libvirtd = "yes" 9 | log_file = "libvirtd.log" 10 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/iface_target.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.iface_target: 2 | type = iface_target 3 | start_vm = "no" 4 | take_regular_screendumps = "no" 5 | func_supported_since_libvirt_ver = (6, 6, 0) 6 | unsupported_err_msg = "libvirt does not support this feature since 6.6.0" 7 | variants: 8 | - default: 9 | - macvtap: 10 | test_macvtap = "yes" 11 | variants: 12 | - flush_with_occupation: 13 | flush_with_occupation = "yes" 14 | - flush_without_occupation: 15 | flush_with_occupation = "no" 16 | variants: 17 | - flush_after_detach: 18 | flush_after_detach = "yes" 19 | - no_flush_after_detach: 20 | flush_after_detach = "no" 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/iface_unprivileged.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.iface_unprivileged: 2 | type = iface_unprivileged 3 | start_vm = no 4 | outside_ip = "www.redhat.com" 5 | vm_ping_outside = pass 6 | host_iface = 7 | test_user = test 8 | test_passwd = test 9 | unpr_vm_name = unpr-vm 10 | variants: 11 | - precreated: 12 | case = 'precreated' 13 | variants: 14 | - host_tap: 15 | device_type = 'tap' 16 | tap_name = 'mytap' 17 | - host_macvtap: 18 | device_type = 'macvtap' 19 | macvtap_name = 'mymacvtap' 20 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/lifecycle/restart_service.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.lifecycle.restart_service: 2 | type = restart_service 3 | start_vm = no 4 | net_attrs = {'name': net_name, 'forward': {'mode': 'nat'}, 'ips': [{'dhcp_ranges': {'attrs': {'start': '192.168.23.2', 'end': '192.168.23.254'}}, 'netmask': '255.255.255.0', 'address': '192.168.23.1'}]} 5 | 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/locked_memory_vdpa/mem_lock_limit_multiple_mixed_interfaces.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.locked_memory.vdpa_interface.mixed_interfaces: 2 | type = mem_lock_limit_multiple_mixed_interfaces 3 | start_vm = no 4 | func_supported_since_libvirt_ver = (8, 10, 0) 5 | vm_attrs = {'max_mem_rt': 6291456, 'max_mem_rt_slots': 32, 'max_mem_rt_unit': 'K', 'current_mem':2, 'current_mem_unit': 'GiB','vcpu': 8, 'cpu': {'numa_cell': [{'id': '0', 'cpus': '0-3', 'memory': '1', 'unit': 'GiB'}, {'id': '1', 'cpus': '4-7', 'memory': '1', 'unit': 'GiB'}]}} 6 | iface_dict = {"source": {'dev':'/dev/vhost-vdpa-0'}} 7 | iface_dict2 = {"source": {'dev':'/dev/vhost-vdpa-1'}} 8 | only x86_64 9 | 10 | variants test_scenario: 11 | - cold_plug: 12 | hostdev_dict = {'mode': 'subsystem', 'type': 'pci', 'managed': 'yes'} 13 | - hot_plug: 14 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/locked_memory_vdpa/mem_lock_limit_multiple_vdpa_interfaces.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.locked_memory.vdpa_interface.multiple_interfaces: 2 | type = mem_lock_limit_multiple_vdpa_interfaces 3 | start_vm = no 4 | only x86_64 5 | 6 | func_supported_since_libvirt_ver = (8, 7, 0) 7 | vm_attrs = {'max_mem_rt': 6291456, 'max_mem_rt_slots': 32, 'max_mem_rt_unit': 'K', 'current_mem':3, 'current_mem_unit': 'GiB','vcpu': 8, 'cpu': {'numa_cell': [{'id': '0', 'cpus': '0-3', 'memory': '1', 'unit': 'GiB'}, {'id': '1', 'cpus': '4-7', 'memory': '1', 'unit': 'GiB'}]}} 8 | iface_dict1 = {"source": {'dev':'/dev/vhost-vdpa-0'}} 9 | iface_dict2 = {"source": {'dev':'/dev/vhost-vdpa-1'}} 10 | iface_dict3 = {"source": {'dev':'/dev/vhost-vdpa-2'}} 11 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/nodedev/nodedev_vdpa_interface.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.nodedev.vdpa_interface: 2 | type = nodedev_vdpa_interface 3 | start_vm = no 4 | only x86_64 5 | 6 | func_supported_since_libvirt_ver = (7, 3, 0) 7 | func_supported_since_qemu_kvm_ver = (6, 0, 0) 8 | dev_dict = {'path': 'vdpa0', 'name': 'vdpa_vdpa0', 'driver_name': 'vhost_vdpa', 'cap_type': 'vdpa'} 9 | variants test_target: 10 | - simulator: 11 | - mellanox: 12 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/start_vm_with_duplicate_target_dev_name.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.start_vm.with_duplicate_target_dev_name: 2 | type = start_vm_with_duplicate_target_dev_name 3 | func_supported_since_libvirt_ver = (9, 1, 0) 4 | start_vm = no 5 | status_error = yes 6 | host_iface_name = 7 | variants iface_type_a: 8 | - tap: 9 | error_msg = The .* interface already exists 10 | - macvtap: 11 | error_msg = The .* interface already exists 12 | - network: 13 | - direct: 14 | variants iface_type_b: 15 | - network: 16 | error_msg = The .* interface already exists 17 | - direct: 18 | error_msg = error creating macvtap interface .*@.* 19 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/update_device/update_driver_non_virtio.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.update_device.driver_non_virtio: 2 | type = update_driver_non_virtio 3 | start_vm = no 4 | timeout = 240 5 | outside_ip = "www.redhat.com" 6 | vm_ping_outside = pass 7 | func_supported_since_libvirt_ver = (10, 6, 0) 8 | variants test_scenario: 9 | - define_with_invaid_driver: 10 | exist_attrs = {'driver': {'driver_host': {'csum':'off'}}} 11 | update_setting = {"link_state": "down"} 12 | - update_with_invalid_driver: 13 | update_setting = {"link_state": "down", 'driver': {'driver_host': {'csum':'off'}}} 14 | variants model_type: 15 | - e1000e: 16 | only x86_64 17 | - rtl8139: 18 | only x86_64 19 | iface_attrs = {"model": "${model_type}", "type_name": "network", "source": {"network": "default"}} 20 | 21 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/virtual_network/update_device/update_iface_type_live.cfg: -------------------------------------------------------------------------------- 1 | - virtual_network.update_device.iface_type.live: 2 | type = update_iface_type_live 3 | host_iface = 4 | start_vm = no 5 | timeout = 240 6 | variants: 7 | - bridge_type: 8 | iface_type = bridge 9 | create_linux_br = yes 10 | net_attrs = {'name': net_name, 'bridge': {'name': linux_br}, 'forward': {'mode': 'bridge'}} 11 | - direct_type: 12 | iface_type = direct 13 | net_attrs = {'name': net_name, 'forward': {'mode': 'bridge'}, 'forward_interface': [{'dev': host_iface}]} 14 | iface_attrs = {'model': 'virtio', 'type_name': 'network', 'source': {'network': net_name}, 'mac_address': mac} 15 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/vm_create_destroy_concurrently.cfg: -------------------------------------------------------------------------------- 1 | - vm_create_destroy_concurrently: 2 | type = vm_create_destroy_concurrently 3 | start_vm = no 4 | num_threads = 3 5 | run_time = 1800 6 | -------------------------------------------------------------------------------- /libvirt/tests/cfg/vm_start_destroy_repeatedly.cfg: -------------------------------------------------------------------------------- 1 | - vm_start_destroy_repeatedly: 2 | type = vm_start_destroy_repeatedly 3 | num_cycles = 3000 4 | start_vm = no 5 | test_timeout = 288000 6 | check_hugepage_status = False 7 | variants: 8 | - hugepage: 9 | num_cycles = 100 10 | check_hugepage_status = True 11 | mb_params = {'hugepages': {}, 'source_type': 'memfd', 'access_mode': 'shared'} 12 | vm_attrs = {'memory': 8388608, 'memory_unit': 'KiB'} 13 | - @default: 14 | -------------------------------------------------------------------------------- /libvirt/tests/deps/cpu.xml: -------------------------------------------------------------------------------- 1 | 2 | Skylake-Client-IBRS 3 | Intel 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | SandyBridge-IBRS 13 | Intel 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /libvirt/tests/deps/cve_2023_3750.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # 4 | #This file is a parallel test script for testing CVE-2023-3750 5 | # 6 | POOL=$1 7 | VOL=$2 8 | function test_run() { 9 | while true;do 10 | seq 1 100 | xargs -n 1 -P 10 -I {} sh -c 'virsh -r "pool-list; vol-info --pool $1 --vol $2"' -- $1 $2 11 | done 12 | } 13 | 14 | export -f test_run 15 | 16 | timeout 20 bash -c "test_run $POOL $VOL" 17 | virsh vol-info --pool $POOL $VOL 18 | exit $? 19 | -------------------------------------------------------------------------------- /libvirt/tests/deps/hook_qemu_restore.py: -------------------------------------------------------------------------------- 1 | #! /usr/bin/python3 2 | 3 | """ 4 | This file works as a hook script which will be executed when vm is restored. This file 5 | needs to be copied and rename to /etc/libvirt/hooks/qemu before the hook is activated. 6 | """ 7 | 8 | import sys 9 | 10 | 11 | def input_xml(): 12 | xml = "" 13 | lines = sys.stdin.readlines() 14 | xml = "".join(lines) 15 | xml = xml.replace("restart\n", "destroy\n") 16 | sys.stdout.write(xml) 17 | 18 | 19 | def main(): 20 | if sys.argv[1] == '%s' and sys.argv[2] == 'restore': 21 | input_xml() 22 | 23 | 24 | if __name__ == '__main__': 25 | main() 26 | -------------------------------------------------------------------------------- /libvirt/tests/deps/qemu_wrapper.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import os 4 | import sys 5 | import time 6 | 7 | TIMEOUT = 5 8 | QEMU_PATH = "/usr/libexec/qemu-kvm" 9 | 10 | time.sleep(TIMEOUT) 11 | sys.argv[0] = QEMU_PATH 12 | os.execv(QEMU_PATH, sys.argv) 13 | -------------------------------------------------------------------------------- /libvirt/tests/src/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/daemon/check_users.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import re 3 | import ast 4 | 5 | from avocado.utils import process 6 | 7 | from virttest import libvirt_version 8 | 9 | 10 | LOGGER = logging.getLogger('avocado.' + __name__) 11 | 12 | 13 | def run(test, params, env): 14 | """ 15 | Check libvirt/qemu/kvm user/group are created via sysuser.d 16 | """ 17 | libvirt_version.is_libvirt_feature_supported(params) 18 | users_list = ast.literal_eval(params.get("users_list", "[]")) 19 | 20 | users_match_list = [] 21 | 22 | sysuser_dir = '/usr/lib/sysusers.d/' 23 | cmd = "grep -hr -E 'kvm|qemu|libvirt' %s" % sysuser_dir 24 | output = process.getoutput(cmd, shell=True) 25 | LOGGER.debug(output) 26 | 27 | for user in users_list: 28 | if not re.search(user, output): 29 | test.fail("%s not created by sysuser.d" % user) 30 | -------------------------------------------------------------------------------- /libvirt/tests/src/migration/abort_precopy_migration/abort_by_domjobabort_on_target.py: -------------------------------------------------------------------------------- 1 | from virttest import libvirt_version 2 | 3 | from provider.migration import base_steps 4 | 5 | 6 | def run(test, params, env): 7 | """ 8 | Test abort job by domjobabort on target host 9 | 10 | :param test: test object 11 | :param params: Dictionary with the test parameters 12 | :param env: Dictionary with test environment. 13 | """ 14 | vm_name = params.get("migrate_main_vm") 15 | 16 | libvirt_version.is_libvirt_feature_supported(params) 17 | 18 | vm = env.get_vm(vm_name) 19 | migration_obj = base_steps.MigrationBase(test, vm, params) 20 | 21 | try: 22 | migration_obj.setup_connection() 23 | migration_obj.run_migration() 24 | migration_obj.verify_default() 25 | finally: 26 | migration_obj.cleanup_connection() 27 | -------------------------------------------------------------------------------- /libvirt/tests/src/migration/async_job/async_job.py: -------------------------------------------------------------------------------- 1 | from provider.migration import base_steps 2 | 3 | 4 | def run(test, params, env): 5 | """ 6 | Test async job: 7 | 8 | 1. abort_migration_with_wrong_api_flag case: To verify that precopy 9 | migration can't be aborted by domjobabort with --postcopy, postcopy 10 | migration can't be aborted by domjobabort without --postcopy. 11 | 12 | """ 13 | vm_name = params.get("migrate_main_vm") 14 | 15 | vm = env.get_vm(vm_name) 16 | migration_obj = base_steps.MigrationBase(test, vm, params) 17 | 18 | try: 19 | migration_obj.setup_connection() 20 | migration_obj.run_migration() 21 | migration_obj.verify_default() 22 | finally: 23 | migration_obj.cleanup_connection() 24 | -------------------------------------------------------------------------------- /libvirt/tests/src/migration/async_ops/migrate_vm_again_during_migration.py: -------------------------------------------------------------------------------- 1 | from provider.migration import base_steps 2 | 3 | 4 | def run(test, params, env): 5 | """ 6 | To verify that libvirt can report clear error when migrating vm again 7 | before the last migration completes. 8 | 9 | :param test: test object 10 | :param params: Dictionary with the test parameters 11 | :param env: Dictionary with test environment. 12 | """ 13 | vm_name = params.get("migrate_main_vm") 14 | 15 | vm = env.get_vm(vm_name) 16 | migration_obj = base_steps.MigrationBase(test, vm, params) 17 | 18 | try: 19 | migration_obj.setup_connection() 20 | migration_obj.run_migration() 21 | migration_obj.verify_default() 22 | finally: 23 | migration_obj.cleanup_connection() 24 | -------------------------------------------------------------------------------- /libvirt/tests/src/migration/migration_uri/migration_network_data_transport.py: -------------------------------------------------------------------------------- 1 | from virttest import libvirt_version 2 | 3 | from provider.migration import base_steps 4 | 5 | 6 | def run(test, params, env): 7 | """ 8 | Test live migration with UNIX/Tunnelled transport. 9 | 10 | :param test: test object 11 | :param params: Dictionary with the test parameters 12 | :param env: Dictionary with test environment. 13 | """ 14 | 15 | libvirt_version.is_libvirt_feature_supported(params) 16 | 17 | vm_name = params.get("migrate_main_vm") 18 | vm = env.get_vm(vm_name) 19 | migration_obj = base_steps.MigrationBase(test, vm, params) 20 | 21 | try: 22 | migration_obj.setup_connection() 23 | migration_obj.run_migration() 24 | migration_obj.verify_default() 25 | finally: 26 | migration_obj.cleanup_connection() 27 | -------------------------------------------------------------------------------- /libvirt/tests/src/remove_guest.py: -------------------------------------------------------------------------------- 1 | from virttest import error_context 2 | 3 | 4 | @error_context.context_aware 5 | def run(test, params, env): 6 | """ 7 | everything is done by client.virt module 8 | """ 9 | pass 10 | -------------------------------------------------------------------------------- /libvirt/tests/src/sriov/capabilities/sriov_capabilities_iommu_support.py: -------------------------------------------------------------------------------- 1 | from virttest.libvirt_xml import capability_xml 2 | 3 | 4 | def run(test, params, env): 5 | """ 6 | check virsh capabilities output includes iommu support element 7 | """ 8 | cap_xml = capability_xml.CapabilityXML() 9 | if cap_xml.get_iommu().get('support') != 'yes': 10 | test.fail("IOMMU is disabled in capabilities: %s. " 11 | % cap_xml.get_iommu()) 12 | -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/domain/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/domain/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/filter/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/filter/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/host/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/host/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/interface/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/interface/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/monitor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/monitor/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/network/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/network/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/nodedev/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/nodedev/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/pool/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/pool/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/secret/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/secret/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/snapshot/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/snapshot/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/virsh_qemu_cmdline_core.py: -------------------------------------------------------------------------------- 1 | import subprocess 2 | 3 | 4 | def run(test, params, env): 5 | 6 | qemu_binary = params.get("qemu_binary") 7 | errors = ["tcg_region_init", "assertion failed", "Aborted", 8 | "core dumped", "Invalid CPU topology"] 9 | 10 | def check_qemu_cmdline(cpus=9999): 11 | 12 | print("checking with %d CPUs" % cpus) 13 | 14 | command = "%s -accel tcg -smp 10,maxcpus=%d" % (qemu_binary, cpus) 15 | global output 16 | output = subprocess.getoutput(command) 17 | 18 | for err in errors: 19 | if err in output: 20 | return True 21 | return False 22 | 23 | cpus = ["9000", "123", "97865", "56789", "123456789"] 24 | 25 | for cpu in cpus: 26 | failed = check_qemu_cmdline(int(cpu)) 27 | if failed: 28 | break 29 | 30 | if failed: 31 | test.fail(output) 32 | -------------------------------------------------------------------------------- /libvirt/tests/src/virsh_cmd/volume/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virsh_cmd/volume/__init__.py -------------------------------------------------------------------------------- /libvirt/tests/src/virt_admin/monitor/virt_admin_srv_list.py: -------------------------------------------------------------------------------- 1 | import re 2 | from virttest import virt_admin 3 | 4 | 5 | def run(test, params, env): 6 | """ 7 | Test command: virt-admin srv-list. 8 | 9 | 1) execute virt-admin srv-list 10 | 2) check whether the server names printed by the 11 | above execution are correct 12 | """ 13 | server_name = params.get("server_name") 14 | if not server_name: 15 | server_name = virt_admin.check_server_name() 16 | 17 | vp = virt_admin.VirtadminPersistent() 18 | result = vp.srv_list(ignore_status=True, debug=True) 19 | output = result.stdout.strip() 20 | 21 | if result.exit_status: 22 | test.fail("This operation should success " 23 | "but failed! output:\n%s " % result) 24 | else: 25 | if not re.search(server_name, output): 26 | test.fail("server %s is not listed! " % server_name) 27 | -------------------------------------------------------------------------------- /libvirt/tests/src/virtio_transitional/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/libvirt/tests/src/virtio_transitional/__init__.py -------------------------------------------------------------------------------- /provider/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/provider/__init__.py -------------------------------------------------------------------------------- /provider/chardev/check_points.py: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # 3 | # Copyright Redhat 4 | # 5 | # SPDX-License-Identifier: GPL-2.0 6 | 7 | # Author: Nan Li 8 | # 9 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 | from avocado.utils import process 11 | 12 | 13 | def check_audit_log(test, key_message=None): 14 | """ 15 | Check key messages in log file 16 | 17 | :param test: test object instance 18 | :param key_message: key message that needs to be captured 19 | """ 20 | for item in key_message: 21 | cmd = 'ausearch --start recent -m VIRT_RESOURCE -i | grep %s' % item 22 | result = process.run(cmd, ignore_status=True, shell=True) 23 | if result.exit_status: 24 | test.fail("Check %s failed in %s" % (item, cmd)) 25 | else: 26 | test.log.debug("Check %s exist", item) 27 | -------------------------------------------------------------------------------- /provider/cpu.py: -------------------------------------------------------------------------------- 1 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 2 | # 3 | # Copyright Red Hat 4 | # 5 | # SPDX-License-Identifier: GPL-2.0 6 | # 7 | # Author: smitterl@redhat.com 8 | # 9 | # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 10 | import platform 11 | 12 | 13 | def patch_total_cpu_count_s390x(cpu_module): 14 | """ 15 | On s390x, override the avocado utility functions because on an 16 | LPAR they would return the number of CPUs available on the CEC 17 | but not on the LPAR. 18 | 19 | :param cpu_module: the module after import in calling script 20 | """ 21 | 22 | if platform.machine() == "s390x": 23 | online_before_test = cpu_module.online_count() 24 | 25 | def _online_before_test(): 26 | return online_before_test 27 | cpu_module.total_cpus_count = _online_before_test 28 | cpu_module.total_count = _online_before_test 29 | -------------------------------------------------------------------------------- /provider/gpu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/provider/gpu/__init__.py -------------------------------------------------------------------------------- /provider/sriov/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/provider/sriov/__init__.py -------------------------------------------------------------------------------- /provider/viommu/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/autotest/tp-libvirt/32d38ca57741263e2f35c05096af12e61db4bc1a/provider/viommu/__init__.py -------------------------------------------------------------------------------- /requirements-travis.txt: -------------------------------------------------------------------------------- 1 | coverage==5.1 2 | nose==1.3.0 3 | nosexcover==1.0.8 4 | tox==1.5.0; python_version < '3.8' 5 | tox==4.16.0; python_version > '3.7' 6 | virtualenv==1.9.1; python_version < '3.8' 7 | virtualenv==20.26.6; python_version > '3.7' 8 | simplejson==3.8.1 9 | inspektor==0.5.2; python_version < '3.8' 10 | inspektor==0.5.3; python_version > '3.7' 11 | pylint==2.11.1; python_version < '3.8' 12 | pylint==3.2.6; python_version > '3.7' 13 | pyenchant 14 | importlib-metadata==4.13.0; python_version == '3.7' 15 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | coverage==3.6 2 | nose==1.3.0 3 | nosexcover==1.0.8 4 | tox==1.5.0 5 | virtualenv==20.26.6 6 | -------------------------------------------------------------------------------- /v2v/tests/cfg/libnbd/libnbd.cfg: -------------------------------------------------------------------------------- 1 | - libnbd: 2 | type = "libnbd" 3 | # Only keep one case for libnbd 4 | only source_none..dest_none 5 | vms = '' 6 | 7 | variants: 8 | - get_size: 9 | image_size = 1048576 10 | checkpoint = 'get_size' 11 | - is_zero: 12 | checkpoint = 'is_zero' 13 | -------------------------------------------------------------------------------- /v2v/tests/cfg/libnbd/nbdfuse.cfg: -------------------------------------------------------------------------------- 1 | - nbdfuse: 2 | type = "nbdfuse" 3 | # Only keep one case for libnbd 4 | only source_none..dest_none 5 | vms = '' 6 | 7 | image_qcow2_path = '' 8 | image_qcow2_size = '512M' 9 | nbdfuse_mp_filename = '' 10 | -------------------------------------------------------------------------------- /virttools/README.md: -------------------------------------------------------------------------------- 1 | The test type 'virttools' is meant to cover test cases for the tools in the 2 | virt-manager repository, e.g. virt-xml, virt-clone, virt-install. 3 | 4 | By using avocado-vt we obtain access to many existing test functions for libvirt 5 | and qemu. Please, note the related commit in avocado-vt to allow for the new 6 | test type. 7 | 8 | 'virttools' uses the same basic setup as the tp-libvirt/libvirt test type assuming 9 | most tests will suppose there's at least one existing vm 'avocado-vt-vm1' with 10 | image in the default location (avocado-vt/.../images/jeos-27-s390x.qcow2), e.g. 11 | for 'virt-xml avocado-vt-vm1 --add-device...', 'virt-clone avocado-vt-vm1'. 12 | -------------------------------------------------------------------------------- /virttools/tests/cfg/virt_install/blk_installation.cfg: -------------------------------------------------------------------------------- 1 | - virt_install.installation.blk_installation: 2 | type = blk_installation 3 | only s390-virtio 4 | start_vm = False 5 | install_tree_url = INSTALL_TREE_URL 6 | kickstart_url = KICKSTART_URL 7 | devid = 8 | -------------------------------------------------------------------------------- /virttools/tests/cfg/virt_install/hostdev_mdev.cfg: -------------------------------------------------------------------------------- 1 | - virt_install.hostdev.mdev: 2 | type = hostdev_mdev 3 | variants: 4 | - check_present_inside_guest: 5 | only s390-virtio 6 | mdev_type = vfio_ccw-io 7 | devid = 8 | -------------------------------------------------------------------------------- /virttools/tests/cfg/virt_install/kernel_cmdline.cfg: -------------------------------------------------------------------------------- 1 | - virt_install.kernel_cmdline: 2 | type = kernel_cmdline 3 | only s390-virtio 4 | variants: 5 | - boots_with_long_commandline: 6 | # contains a kernel version that supports long commandline 7 | location = https://dl.fedoraproject.org/pub/fedora-secondary/development/rawhide/Everything/s390x/os/ 8 | expected_status = 0 9 | - does_not_boot_with_long_commandline: 10 | # contains a kernel version that does not support long commandline 11 | location = https://dl.fedoraproject.org/pub/fedora-secondary/releases/35/Everything/s390x/os/ 12 | expected_status = 1 13 | -------------------------------------------------------------------------------- /virttools/tests/cfg/virt_install/pxe_installation.cfg: -------------------------------------------------------------------------------- 1 | - virt_install.installation.pxe_installation: 2 | type = pxe_installation 3 | only s390-virtio 4 | start_vm = False 5 | install_tree_url = INSTALL_TREE_URL 6 | kickstart_url = KICKSTART_URL 7 | -------------------------------------------------------------------------------- /virttools/tests/cfg/virt_install/vfio_installation.cfg: -------------------------------------------------------------------------------- 1 | - virt_install.installation.vfio_installation: 2 | type = vfio_installation 3 | only s390-virtio 4 | start_vm = False 5 | install_tree_url = INSTALL_TREE_URL 6 | kickstart_url = KICKSTART_URL 7 | devid = 8 | --------------------------------------------------------------------------------