├── .gitignore ├── .pre-commit-config.yaml ├── LICENCE.md ├── Makefile ├── README.md ├── documentation └── autoval_ssd.md ├── pyproject.toml ├── requirements.txt └── src └── autoval_ssd ├── cfg ├── Workload_Loop_Targets.json ├── drive_latency_monitor.json ├── nvme_smart │ └── nvme_validate.json └── nvme_smart_fdi │ └── nvme_validate_fdi.json ├── docs ├── gluster_configuration.md └── rpm_repo_hosting.md ├── lib └── utils │ ├── disk_utils.py │ ├── filesystem_utils.py │ ├── fio │ └── fio_synth_flash_utils.py │ ├── fio_runner.py │ ├── hdparm_utils.py │ ├── jobfile_templates │ ├── basic_read.fio │ ├── basic_stress_fio.fio │ ├── basic_verify.fio │ ├── basic_write.fio │ ├── bit_error_ratio.fio │ ├── boot_sert.job │ ├── filesystem_template.job │ ├── flash_fio_readiness.job │ ├── nvme_format_template.fio │ ├── precondition.fio │ ├── prep_flash.fio │ ├── random_job.fio │ ├── random_precondition.fio │ ├── rootfs_template.job │ ├── sequential_job.fio │ ├── sequential_precondition.fio │ ├── ssd_health_counter.job │ ├── stress_fio.fio │ └── trim_flash.fio │ ├── lsi_utils.py │ ├── md_utils.py │ ├── pci_utils.py │ ├── scrtnycli_utils.py │ ├── sdparm_utils.py │ ├── sed_util.py │ ├── sg_utils.py │ ├── storage │ ├── drive.py │ ├── nvme │ │ ├── latency_monitor_utils.py │ │ ├── lmparser.py │ │ ├── nvme_drive.py │ │ ├── nvme_factory.py │ │ ├── nvme_resize_utils.py │ │ ├── nvme_utils.py │ │ └── vendor │ │ │ ├── __init__.py │ │ │ └── nvme_vendor_utils.py │ ├── sas │ │ ├── __init__.py │ │ └── sas_drive.py │ ├── sata │ │ ├── __init__.py │ │ └── sata_drive.py │ ├── smart_validator.py │ ├── storage_device_factory.py │ ├── storage_test_base.py │ └── storage_utils.py │ ├── switchtec_utils.py │ └── system_utils.py ├── tests ├── ber_test │ ├── ber_test.py │ └── control_ber.json ├── drive_cache_check │ ├── drive_cache_check.json │ ├── drive_cache_check.py │ ├── drive_cache_check.rst │ ├── drive_cache_check_boot.json │ ├── drive_cache_check_boot_warm.json │ ├── drive_cache_check_warm.json │ ├── drive_cache_check_with_fs.json │ ├── drive_cache_check_with_fs_ext4.json │ ├── drive_cache_check_with_fs_ext4_warm.json │ └── drive_cache_check_with_fs_warm.json ├── drive_data_integrity │ ├── ddi_tests.rst │ ├── di_10_ac_boot_local.json │ ├── di_10_ac_boot_local_with_phy_location.json │ ├── di_10_ac_local.json │ ├── di_10_dc_cycle_graceful_shutdown_local.json │ ├── di_10_dc_cycle_notified_boot_local.json │ ├── di_10_dc_cycle_notified_local.json │ ├── di_10_dc_cycle_ungraceful_boot_local.json │ ├── di_10_dc_cycle_ungraceful_local.json │ ├── di_10_inband_warm_reboot.json │ ├── di_10_inband_warm_reboot_local.json │ ├── di_10_sled_boot_local.json │ ├── di_10_sled_local.json │ ├── di_1_ac_local.json │ ├── di_1_dc_cycle_notified_local.json │ ├── di_1_dc_cycle_ungraceful.json │ ├── di_1_dc_cycle_ungraceful_local.json │ ├── di_1_inband_warm_reboot.json │ ├── di_1_inband_warm_reboot_boot_local.json │ ├── di_1_inband_warm_reboot_local.json │ ├── di_1_sled_boot_local.json │ ├── di_1_sled_local.json │ ├── di_3_ac.json │ ├── di_3_ac_boot.json │ ├── di_3_ac_md.json │ ├── di_3_dc_cycle_notified.json │ ├── di_3_dc_cycle_notified_boot.json │ ├── di_3_dc_cycle_notified_boot_hwc.json │ ├── di_3_dc_cycle_notified_md.json │ ├── di_3_dc_cycle_ungraceful.json │ ├── di_3_dc_cycle_ungraceful_boot.json │ ├── di_3_dc_cycle_ungraceful_md.json │ ├── di_3_inband_warm_reboot.json │ ├── di_3_inband_warm_reboot_remote.json │ └── drive_data_integrity.py ├── drive_md5_verify │ ├── data_retension_10m.json │ ├── data_retension_12hrs.json │ ├── data_retension_6h.json │ ├── drive_md5_verify.py │ ├── drive_md5_verify.rst │ ├── fio_and_md5function_with_fs.json │ ├── fio_boot.json │ ├── fio_with_fs.json │ └── fio_with_raw.json ├── fio_fb │ ├── boot_sert.json │ ├── fio_fb.py │ ├── fio_fb.rst │ ├── fio_fb_filesystem.json │ ├── fio_fb_perf.json │ ├── fio_fb_rootfs.json │ ├── fio_fb_rootfs_systemd_workload_slice.json │ ├── fio_fb_stress_1h.json │ ├── fio_fb_stress_6h.json │ ├── fio_fb_stress_6h_skip_iops.json │ ├── fio_fb_stress_6h_with_periodic_drive_monitor.json │ ├── fio_fb_stress_bc_6h.json │ ├── fio_stress_all_2days.json │ ├── fio_stress_all_8days.json │ ├── fio_stress_ssd_2days.json │ ├── flash_fio_readiness.json │ ├── ssd_health_counter_10G.json │ ├── ssd_health_counter_1G.json │ ├── ssd_health_counter_20G.json │ ├── ssd_health_counter_2G.json │ ├── storage_data_collector.json │ └── storage_data_collector_500s.json ├── fio_internal_flush │ ├── dirty_power_cycle_1.json │ ├── dirty_power_cycle_3.json │ ├── dirty_power_cycle_3_boot.json │ ├── dirty_power_cycle_3_fio_internal_flush.json │ ├── dirty_power_cycle_3_fio_internal_flush_with_fs.json │ ├── dirty_power_cycle_3_fio_internal_flush_with_fs_ext4.json │ ├── dirty_power_cycle_3_with_fs.json │ ├── dirty_power_cycle_3_with_fs_ext4.json │ ├── fio_internal_flush.py │ ├── fio_internal_flush.rst │ ├── power_cycle_1_boot.json │ ├── power_cycle_1_with_flush.json │ ├── power_cycle_1_with_flush_with_fs.json │ ├── power_cycle_1_with_flush_with_fs_ext4.json │ ├── power_cycle_1_without_flush.json │ ├── power_cycle_3_with_flush.json │ ├── power_cycle_3_with_flush_with_fs.json │ ├── power_cycle_3_with_flush_with_fs_ext4.json │ ├── power_cycle_3_without_flush.json │ ├── power_cycle_3_without_flush_with_fs.json │ └── power_cycle_3_without_flush_with_fs_ext4.json ├── fio_synth_flash │ ├── UBOOTT_Workload_loop.json │ ├── UBOOTT_workload_stress.rst │ ├── USSDT_Workload_loop.json │ └── fio_synth_flash.py ├── fsync │ ├── control.json │ └── fsync.py ├── iogo │ ├── control.json │ └── iogo.py ├── namespace_utilization_test │ ├── namespace_utilization.json │ ├── namespace_utilization.rst │ └── namespace_utilization_test.py ├── nvme_cli │ ├── control.json │ ├── control_disable_boot_drive.json │ ├── control_no_crypto_erase.json │ ├── nvme_cli.py │ ├── nvme_cli.rst │ └── nvme_cli_with_nvme_version.json ├── nvme_format │ ├── control_nvme.json │ ├── control_nvme_crypto_erase.json │ ├── control_nvme_no_secure_erase.json │ ├── control_nvme_sanity_check.json │ ├── control_nvme_user_data_erase.json │ ├── nvme_format.py │ ├── nvme_format.rst │ └── nvme_format_1_cycle.json ├── nvme_ns_resize │ ├── nvme_ns_resize.py │ ├── nvme_ns_resize.rst │ ├── op_pct_sweep_control.json │ └── usercap_TB_sweep_500G_control.json └── sed_check │ ├── sed_check.py │ └── sed_take_ownership.py ├── tools ├── fsync.c └── ioT6.go └── unittest ├── mock ├── lib │ ├── mock_autoval.py │ ├── mock_connection_dispatcher.py │ ├── mock_host.py │ ├── mock_openbmc.py │ ├── mock_test_base_init.py │ └── mock_threadpool_executor.py ├── testbed │ └── testbed.json └── util_outputs │ ├── empty │ ├── fstrim │ ├── id_ctrl │ ├── id_ctrl.json │ ├── id_ctrlH │ ├── lsblk │ ├── lsblk_J │ ├── lspci_vvv_1 │ ├── lspci_vvv_3 │ ├── lsscsi │ ├── mkfs │ ├── mkfs_xfs │ ├── nvme_list │ ├── nvme_list.json │ ├── nvme_ocp_command.json │ ├── nvme_smartlog.json │ ├── power_state_change_counter │ ├── rpm_qi_bash │ ├── rpm_qpi_agfhc_rpm │ ├── sg_utils │ ├── sdparm_capacity │ ├── sg_inq │ └── sg_readcap │ ├── smartctl_sas_drive │ ├── smartctl_x_sda │ └── valuemd5 ├── test_disk_utils.py ├── test_drive.py ├── test_filesystem_utils.py ├── test_hdparm_utils.py ├── test_lsi_utils.py ├── test_md_utils.py ├── test_nvme_drive.py ├── test_nvme_resize_utils.py ├── test_nvme_utils.py ├── test_pci_utils.py ├── test_scrtnycli_utils.py ├── test_sdparm_utils.py ├── test_sed_util.py ├── test_sg_utils.py ├── test_smart_validator.py ├── test_storage_device_factory.py ├── test_storage_utils.py └── test_system_utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | /dist 2 | /build 3 | __pycache__ 4 | *.egg-info 5 | /.vscode 6 | /env 7 | 8 | -------------------------------------------------------------------------------- /.pre-commit-config.yaml: -------------------------------------------------------------------------------- 1 | repos: 2 | - repo: https://github.com/omnilib/ufmt 3 | rev: v2.0.0 4 | hooks: 5 | - id: ufmt 6 | args: [—all-files] 7 | -------------------------------------------------------------------------------- /LICENCE.md: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) Meta Platforms, Inc. and affiliates. 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | #Run precommit hook linting 2 | lint: 3 | pre-commit run --all-files 4 | 5 | #Run ufmt across entire repo 6 | lint_all: 7 | find . -path ./venv -prune -o -name "*.py" -exec ufmt check {} + 8 | 9 | #Format code using ufmt 10 | format: 11 | find . -path ./venv -prune -o -name "*.py" -exec ufmt format {} + 12 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ocp-diag-autoval-ssd 2 | **ocp-diag-autoval-ssd** is a collection of SSD tests using the **ocp-diag-autoval** test framework. 3 | 4 | ## Overview 5 | At a high level, the following steps are necessary to install, build, and use autoval 6 | 1. [Installation](#installation) 7 | 2. [Environment Setup](#environment-setup) 8 | 3. [Executing Tests](#executing-tests) 9 | 10 | ## Installation 11 | 12 | *This installation steps are a work in progress. They will be improved/simplified in the future.* 13 | 14 | 1. Clone the following repos: 15 | ```bash 16 | $ git clone https://github.com/opencomputeproject/ocp-diag-autoval.git 17 | $ git clone https://github.com/opencomputeproject/ocp-diag-autoval-ssd.git 18 | ``` 19 | 2. Create a virtual environment: 20 | ```bash 21 | $ python -m venv env 22 | $ source ./env/bin/activate 23 | $ pip install build 24 | ``` 25 | 3. Build and install `ocptv-autoval` 26 | ```bash 27 | $ cd ocp-diag-autoval 28 | $ python -m build 29 | $ pip install ./dist/ocptv_autoval-0.0.1.tar.gz 30 | ``` 31 | 4. Build and install `ocptv-autoval-ssd` 32 | ```bash 33 | $ cd ocp-diag-autoval-ssd 34 | $ python -m build 35 | $ pip install --no-deps ./dist/ocptv_autoval_ssd-0.0.1.tar.gz 36 | ``` 37 | ## Environment Setup 38 | There are two parts to environment setup: 39 | 1. Creating and maintaining a repo to host RPMs and tools needed by autoval tests 40 | 2. Creating a `site_settings.json` file. 41 | ### Creating and maintaining a repo 42 | Local repo hosting is required because many tests require specific RPMs in order to run (e.g. `fio`, `fio-synth`) and automatically download and install them at test setup time. We strongly recommend naming the repo autoval-tools (as it aligns with default configuration defined in `site_settings.json`). 43 | ### Creating a site settings file 44 | Before running tests for the first time, you'll need to create a `site_settings.json` file. 45 | Here is an example with some basic defaults. 46 | ``` json 47 | { 48 | "control_server_logdir": "/autoval/logs/", 49 | "control_server_tmpdir": "/tmp/autoval/", 50 | "dut_logdir": "/autoval/logs/", 51 | "dut_tmpdir": "/tmp/autoval/", 52 | "resultsdir": "/autoval/results/", 53 | "ssh_key_path": ["/USERNAME/.ssh/id_rsa"], # Replace contents with a path to your public SSH key 54 | "plugin_config_path" : "plugins/plugin_config.json", 55 | "repository_dir": "/autoval/repository/", 56 | "test_utils_plugin_config_path" : "plugins/test_utils_plugin_config.json", 57 | "cleanup_dut_logdirs" : false, 58 | "yum_repo": "autoval-tools" 59 | } 60 | ``` 61 | 62 | See [rpm_repo_hosting.md](rpm_repo_hosting.md) for detailed instructions on how to configure a DNF repo on the test server. 63 | ## Executing Tests 64 | ### Host configuration file 65 | Before executing tests, you need to create a `hosts.json` file. 66 | The `{-c|--config} CONFIG` autoval option is used to specify DUT configuration in well-formed JSON. This configuration contains the following information: 67 | * `hostname`: IP address of the host 68 | * `oob_addr`: IP address of the BMC 69 | * `username`: Name of the host user 70 | * `password`: Password for the host user 71 | * `oob_username`: Name of the OOB user 72 | * `oob_password`: Password for the OOB user 73 | 74 | Example: 75 | ```JSON 76 | { 77 | "hosts": [ 78 | { 79 | "hostname": "10::CD97:E10F:FE82:9A1C", 80 | "username": "root", 81 | "password": "password", 82 | "oob_addr": "10::CD97:E10F:FE82:9A19", 83 | "oob_username": "root", 84 | "oob_password": "password" 85 | } 86 | ] 87 | } 88 | ``` 89 | ### Update control.json file with boot drive physical location 90 | Every test has its control parameters provided by the `control.json` file located in the same directory as the test module. 91 | `boot_drive_physical_location` should be passed as a test control parameter using the following *BDF* format. 92 | ``` 93 | Format: ::. 94 | ``` 95 | Example: 96 | ```json 97 | "boot_drive_physical_location": "0000:64:00.0" 98 | ``` 99 | 100 | ### Test Execution 101 | Now that you have a `hosts.json` file, you can run a test as follows. 102 | ```bash 103 | $ export SITE_SETTINGS="path/to/site_settings.json" 104 | $ python -m autoval.autoval_test_runner autoval_ssd.tests.nvme_cli.nvme_cli \ 105 | --config ./hosts.json \ 106 | --test_control ~/bin/ocp-diag-autoval-ssd/autoval_ssd/tests/nvme_cli/control.json 107 | ``` 108 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [build-system] 2 | requires = ["setuptools>=61.0.0", "wheel"] 3 | build-backend = "setuptools.build_meta" 4 | 5 | [project] 6 | name = "ocptv-autoval-ssd" 7 | version = "0.0.1" 8 | description = "Autoval SSD tests" 9 | readme = "README.md" 10 | authors = [ 11 | { name = "OCP Test & Validation", email = "ocp-test-validation@OCP-All.groups.io" }, 12 | ] 13 | license = { file = "LICENSE" } 14 | classifiers = [ 15 | "License :: OSI Approved :: MIT License", 16 | "Programming Language :: Python", 17 | "Programming Language :: Python :: 3.8", 18 | "Topic :: System :: Hardware", 19 | ] 20 | keywords = ["ocp", "ocptv", "autoval", "hardware", "testing", "storage", "ssd", "nvme"] 21 | dependencies = [ 22 | "ocptv-autoval==0.0.1" 23 | ] 24 | requires-python = ">=3.8" 25 | 26 | [project.optional-dependencies] 27 | publish = ["build", "twine"] 28 | 29 | [project.urls] 30 | "Homepage" = "https://github.com/opencomputeproject/ocp-diag-autoval-ssd" 31 | "Bug reports" = "https://github.com/opencomputeproject/ocp-diag-autoval-ssd/issues" 32 | "Source" = "https://github.com/opencomputeproject/ocp-diag-autoval-ssd" 33 | 34 | [tool.setuptools.package-data] 35 | 36 | "*" = ["*.json", "*.fio", "*.job", "*.go", "*.c"] 37 | 38 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pre-commit 2 | -------------------------------------------------------------------------------- /src/autoval_ssd/cfg/drive_latency_monitor.json: -------------------------------------------------------------------------------- 1 | {} 2 | -------------------------------------------------------------------------------- /src/autoval_ssd/cfg/nvme_smart/nvme_validate.json: -------------------------------------------------------------------------------- 1 | { 2 | "nvme": { 3 | "smart-log": { 4 | "temperature": "<348", 5 | "critical_warning": "==", 6 | "avail_spare": ">80", 7 | "media_errors": "==" 8 | }, 9 | "ocp-smart-add-log": { 10 | "Bad user nand blocks - Raw": "~3", 11 | "Bad system nand blocks - Raw": "~3", 12 | "Uncorrectable read error count": ">=0", 13 | "End to end corrected errors": "~2", 14 | "End to end detected errors": "~2", 15 | "PCIe correctable error count": "~50", 16 | "Incomplete shutdowns": ">=0" 17 | } 18 | } 19 | } 20 | -------------------------------------------------------------------------------- /src/autoval_ssd/cfg/nvme_smart_fdi/nvme_validate_fdi.json: -------------------------------------------------------------------------------- 1 | { 2 | "nvme": { 3 | "smart-log": { 4 | "temperature": "<348", 5 | "critical_warning": "==", 6 | "avail_spare": ">80", 7 | "media_errors": "==" 8 | } 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /src/autoval_ssd/docs/gluster_configuration.md: -------------------------------------------------------------------------------- 1 | # How to configure and administer glusterfs on an AutoVal test environment 2 | 3 | This guide provides instructions on how to configure a Test Server to export a shared filesystem to host logs, tools, and other data for use by DUTs in an AutoVal test environment. 4 | 5 | ## Create and mount a logical volume on the Test Server 6 | 7 | > [!NOTE] 8 | > Before proceeding, ensure that RPMs to support LVM are installed on your system. (e.g. `pvcreate`, `vgcreate`, `lvcreate` will need to be available) 9 | 10 | 1. Create physical volume 11 | ```bash 12 | pvcreate /dev/nvme1n1 13 | ``` 14 | 15 | 2. Create volume group for the physical volume 16 | ```bash 17 | vgcreate gluster /dev/nvme1n1 18 | ``` 19 | 20 | 3. Use the full size of the partition to create a logical volume 21 | ```bash 22 | lvcreate -l 100%FREE -n shared gluster 23 | ``` 24 | 25 | 4. Create file system on the logical volume 26 | ```bash 27 | mkfs.xfs /dev/gluster/shared 28 | ``` 29 | 30 | 5. Mount logical volume on ``/shared`` 31 | 32 | Create mountpoint 33 | ```bash 34 | mkdir -p /shared 35 | ``` 36 | Add the following entry to the `/etc/fstab` file 37 | ``` 38 | /dev/gluster/shared /shared xfs defaults 0 0 39 | ``` 40 | 41 | Mount the filesystem 42 | ```bash 43 | mount /shared 44 | ``` 45 | 46 | ## Create gluster volume and mount on `/shared/autoval` for all hosts 47 | 48 | > [!NOTE] 49 | > Before proceeding, ensure that RPMs to support GlusterFS are installed on your system. 50 | > On systems we tested, the following RPMs needed to be installed, but your results may vary: 51 | > * glusterfs 52 | > * glusterfs-api 53 | > * glusterfs-cli 54 | > * glusterfs-client-xlators 55 | > * glusterfs-fuse 56 | > * glusterfs-libs 57 | > * qemu-kvm-block-gluster 58 | 59 | 2. Start the Gluster service 60 | ```bash 61 | systemctl start glusterd 62 | ``` 63 | 64 | 3. Create Gluster volume using a single node. 65 | ```bash 66 | gluster volume create autoval HOSTNAME:/shared/.brick force 67 | ``` 68 | Where ``HOSTNAME`` is substituted with name of the host being tested. 69 | 70 | 4. Start the Gluster volume 71 | 72 | ```bash 73 | gluster volume status autoval 74 | gluster volume start autoval 75 | ``` 76 | 77 | 5. Set various configuration parameters on the Gluster volume 78 | 79 | These performance parameters are known to perform well in our own ``AutoVal`` tests and deployments. 80 | ```bash 81 | gluster volume set autoval performance.io-thread-count 32 82 | gluster volume set autoval performance.cache-size 1GB 83 | gluster volume set autoval server.event-threads 4 84 | ``` 85 | 86 | 6. Ensure `volume management --> transport.address-family` option is set to `inet6` 87 | 88 | Ensure the following line is present and uncommented in ``/etc/glusterfs/glusterd.vol``: 89 | ``` 90 | option transport.address-family inet6 91 | ``` 92 | 93 | ## Mount glusterfs volume on Test Server and all DUTs 94 | We recommend using ``/shared/autoval`` as the mount point because multiple configuration settings in [site_settings_vendor.json](../../havoc/autoval/cfg/site_settings/site_settings_vendor.json) depend on it. 95 | 96 | 1. Create a mountpoint 97 | On Test Server and all DUTs: 98 | ```bash 99 | mkdir -p /shared/autoval 100 | ``` 101 | 102 | 2. Add entry to ``/etc/fstab`` 103 | On Test Server and all DUTs add the following entry to ``/etc/fstab``: 104 | ```bash 105 | controller:autoval /shared/autoval glusterfs xlator-option=transport.address-family=inet6,defaults,_netdev 0 0 106 | ``` 107 | 108 | 3. Regenerate systemd configuration 109 | On Test Server and all DUTs 110 | ```bash 111 | systemctl daemon-reload 112 | ``` 113 | 114 | 4. Mount ``/shared/autoval`` and verify that it is correctly mounted 115 | On Test Server and all DUTs and examine output 116 | ```bash 117 | mount /shared/autoval 118 | df /shared/autoval 119 | ``` 120 | 121 | > [!IMPORTANT] 122 | > On CentOS 9, there is a known issue where glusterfs ``/etc/fstab`` entries aren't always automatically mounted at boot time. This happens because of an ordering depending where the `/etc/fstab` file is processed **before** gluterfsd has a change to start up. In order to work around this issue, install the script [install_glusterfs_automounter.sh](../../scripts/install_glusterfs_automounter.sh) after step 2. 123 | -------------------------------------------------------------------------------- /src/autoval_ssd/docs/rpm_repo_hosting.md: -------------------------------------------------------------------------------- 1 | # How to administer RPM hosting in an AutoVal environment 2 | 3 | ## Background 4 | 5 | Some autoval tests require specific tools (RPMs) in order to function correctly and are designed to automatically download and install them to the DUT if they are not available. RPMs are installed using ``dnf install...`` on DUTs (and ``yum install...`` is used on DUTs running CentOS 7 and older). The default name of the repo is ``autoval-tools`` (configured using the ``yum_repo`` setting in ``site_settings.json``). 6 | 7 | This guide describes how to: 8 | 9 | 1. Configure the Test Server as a DNF repository host and 10 | 2. Configure each DUT as a DNF client 11 | 12 | This guide assumes that ``/shared/autoval`` network filesystem is already mounted on all DUTs (see [gluster_configuration.md](gluster_configuration.md) for more info.) 13 | 14 | ## Overview 15 | 16 | DNF configuration requires changes on both the Test Server and the DUTs 17 | 18 | On the Test Server: 19 | 20 | * Install createrepo utility 21 | * Create a repository directory 22 | * Put RPM files into the repository directory 23 | * Create the repository metadata 24 | 25 | On each DUT: 26 | 27 | * Create the repository configuration file 28 | 29 | ## Configuration Steps 30 | 31 | ### 1. Install ``createrepo`` on the Test Server 32 | 33 | To create a DNF repository we need to install the ``createrepo`` software package 34 | Example: 35 | 36 | ```bash 37 | dnf install createrepo_c 38 | ``` 39 | 40 | ### 2. Create a repository directory on the Test Server 41 | 42 | We recommend using ``/shared/autoval/pkgs/`` as the directory to contain the RPMs. 43 | 44 | Example: 45 | ```bash 46 | mkdir -p /shared/autoval/pkgs/ 47 | ``` 48 | 49 | ### 3. Copy RPM files into the repository directory 50 | 51 | Download and copy RPMs into the repository directory based on test requirements. We recommend the following (at a minimum): 52 | 53 | * ``smartmontools`` 54 | * ``fiosynth`` 55 | * ``fio-engine-libaio`` 56 | * ``fio`` 57 | * ``hdparm`` 58 | * ``libaio`` 59 | * ``nvme-cli`` 60 | * ``sdparm`` 61 | 62 | #### Generate `fiosynth` rpm from source 63 | The following instructions can be used to build FioSynthFlash rpm from source (it is not generally pre-packaged as part linux OS distributions): 64 | 65 | ```bash 66 | git clone https://github.com/facebookincubator/FioSynth 67 | cd FioSynth/ 68 | sudo python3 setup.py bdist_rpm 69 | cd dist/ 70 | ``` 71 | After the rpm is successfully built, you can upload it from the current directory to the RPM repository. 72 | 73 | > [!NOTE] 74 | > Specific versions of RPMs (e.g. ``fio`` or ``nvme-cli`` may be needed for specific tests or types of testing). 75 | 76 | ### 4. Generate the repository metadata 77 | 78 | The ``createrepo`` command reads through the repository directory and generates the metadata necessary for it to function as a DNF repository (it creates th ``repodata/`` subdirectoryfor this purpose). 79 | 80 | On the Test Server: 81 | 82 | ```bash 83 | cd /shared/autoval && createrepo --update pkg 84 | ``` 85 | 86 | > [!IMPORTANT] 87 | > Each time RPM package files are added to the repository directory, the metadata must be regenerated. 88 | 89 | ### 5. Configure DUTs to use the ``autoval-tools`` repository 90 | 91 | On each DUT, create a file named ``/etc/yum.repos.d/autoval.repo`` with the following contents: 92 | 93 | ```toml 94 | [autoval-tools] 95 | name="CentOS9 - tools for autoval support" 96 | baseurl="file:///shared/autoval/pkgs" 97 | enabled=1 98 | gpgcheck=0 99 | ``` 100 | 101 | > [!NOTE] 102 | > A more advanced (and typical) configuration is to use the ``https`` as the DNF protocol but this would require configuration and maintenance of a web server (e.g. ``nginx``) on the Test Server and is out of scope for this guide. 103 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/basic_read.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=RW 3 | direct=1 4 | ioengine=libaio 5 | size=SIZE 6 | bs=BLKSIZE 7 | iodepth=DEPTH 8 | runtime=RUNTIME 9 | verify=VERIFY 10 | time_based 11 | do_verify=1 12 | verify_backlog=10000000 13 | verify_state_load=1 14 | verify_async=4 15 | group_reporting=1 16 | verify_fatal=1 17 | verify_dump=1 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/basic_stress_fio.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=randrw 3 | rwmixread=70 4 | size=SIZE 5 | blocksize=BLKSIZE 6 | blockalign=BLKSIZE 7 | iodepth=IODEPTH 8 | direct=1 9 | norandommap 10 | do_verify=1 11 | time_based 12 | ramp_time=5 13 | runtime=RUNTIME 14 | group_reporting=1 15 | numjobs=1 16 | ioengine=libaio 17 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/basic_verify.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=RW 3 | direct=1 4 | ioengine=libaio 5 | verify=VERIFY 6 | do_verify=1 7 | size=SIZE 8 | bs=BLKSIZE 9 | iodepth=DEPTH 10 | verify_backlog=10000000 11 | verify_state_load=1 12 | verify_async=4 13 | group_reporting=1 14 | verify_fatal=1 15 | verify_dump=1 16 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/basic_write.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=RW 3 | direct=1 4 | ioengine=libaio 5 | size=SIZE 6 | bs=BLKSIZE 7 | iodepth=DEPTH 8 | runtime=RUNTIME 9 | verify=VERIFY 10 | time_based 11 | do_verify=1 12 | verify_backlog=10000000 13 | verify_state_save=1 14 | verify_async=4 15 | verify_fatal=1 16 | verify_dump=1 17 | group_reporting=1 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/bit_error_ratio.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | name=NAME 3 | blockalign=BLK_ALIGN 4 | blocksize=BLK_SIZE 5 | ioengine=libaio 6 | io_size=SIZE 7 | iodepth=32 8 | loops=LOOPS 9 | invalidate=1 10 | rw=RDWR 11 | rwmixread=RWMIX_READ 12 | rwmixwrite=RWMIX_WRITE 13 | group_reporting=1 14 | norandommap 15 | randrepeat=0 16 | direct=1 17 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/boot_sert.job: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=RW 3 | direct=1 4 | size=SIZE 5 | bs=BLKSIZE 6 | iodepth=DEPTH 7 | loops=LOOPS 8 | group_reporting=1 9 | stonewall 10 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/filesystem_template.job: -------------------------------------------------------------------------------- 1 | [global] 2 | norandommap 3 | ioengine=libaio 4 | randrepeat=0 5 | direct=1 6 | invalidate=1 7 | name=NAME 8 | size=SIZE 9 | iodepth=DEPTH 10 | group_reporting=1 11 | refill_buffers 12 | rw=RW 13 | bs=BLKSIZE 14 | runtime=RUNTIME 15 | rwmixwrite=MIXWRITE 16 | rwmixread=MIXREAD 17 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/flash_fio_readiness.job: -------------------------------------------------------------------------------- 1 | [global] 2 | description=Facebook FIO Readiness test 3 | random_generator=lfsr 4 | time_based 5 | ramp_time=10 6 | runtime=RUNTIME 7 | direct=1 8 | iodepth=128 9 | numjobs=1 10 | name=8k 11 | rwmixread=60 12 | ioengine=libaio 13 | group_reporting=1 14 | verify=md5 15 | do_verify=0 16 | rw=randrw 17 | bs=8k 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/nvme_format_template.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | norandommap 3 | ioengine=libaio 4 | randrepeat=0 5 | direct=1 6 | invalidate=1 7 | name=NAME 8 | iodepth=DEPTH 9 | group_reporting=1 10 | refill_buffers 11 | rw=RW 12 | bs=BLKSIZE 13 | runtime=RUNTIME 14 | time_based 15 | offset=0 16 | do_verify=DO_VERIFY 17 | buffer_pattern=BUFFER_PATTERN 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/precondition.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | direct=1 3 | rw=write 4 | bs=128k 5 | iodepth=8 6 | ioengine=libaio 7 | numjobs=1 8 | size=100% 9 | name=precond_seq 10 | group_reporting=1 11 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/prep_flash.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=write 3 | blocksize=BLKSIZE 4 | size=100% 5 | ioengine=libaio 6 | iodepth=DEPTH 7 | direct=1 8 | loops=LOOPS 9 | invalidate=1 10 | group_reporting=1 11 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/random_job.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | norandommap 3 | ioengine=libaio 4 | randrepeat=0 5 | direct=1 6 | size=100% 7 | iodepth=IODEPTH 8 | numjobs=NUM_JOBS 9 | group_reporting 10 | time_based 11 | refill_buffers 12 | runtime=RUNTIME 13 | rw=RW 14 | rwmixread=MIXREAD 15 | rwmixwrite=MIXWRITE 16 | bs=BLKSIZE 17 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/random_precondition.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | direct=1 3 | rw=randwrite 4 | bs=4k 5 | iodepth=4 6 | ioengine=libaio 7 | numjobs=1 8 | size=100% 9 | norandommap 10 | randrepeat=0 11 | name=precond_rand 12 | invalidate=1 13 | group_reporting=1 14 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/rootfs_template.job: -------------------------------------------------------------------------------- 1 | [global] 2 | norandommap 3 | ioengine=libaio 4 | randrepeat=0 5 | direct=1 6 | invalidate=1 7 | name=NAME 8 | time_based 9 | runtime=RUNTIME 10 | size=SIZE 11 | iodepth=DEPTH 12 | group_reporting=1 13 | refill_buffers 14 | rw=RW 15 | bs=BLKSIZE 16 | rwmixwrite=MIXWRITE 17 | rwmixread=MIXREAD 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/sequential_job.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | direct=1 3 | rw=RW 4 | bs=BLKSIZE 5 | iodepth=IODEPTH 6 | ioengine=libaio 7 | numjobs=NUM_JOBS 8 | runtime=RUNTIME 9 | time_based 10 | group_reporting 11 | name=NAME 12 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/sequential_precondition.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | direct=1 3 | rw=write 4 | bs=128k 5 | iodepth=8 6 | ioengine=libaio 7 | numjobs=1 8 | size=100% 9 | name=precond_seq 10 | group_reporting=1 11 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/ssd_health_counter.job: -------------------------------------------------------------------------------- 1 | [global] 2 | norandommap 3 | ioengine=libaio 4 | randrepeat=0 5 | direct=1 6 | invalidate=1 7 | name=ssd_health_counter 8 | size=SIZE 9 | iodepth=128 10 | loops=10 11 | group_reporting=1 12 | time_based 13 | refill_buffers 14 | rw=randrw 15 | bs=128k 16 | runtime=5m 17 | rwmixwrite=50 18 | rwmixread=50 19 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/stress_fio.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=randrw 3 | rwmixread=70 4 | size=SIZE 5 | blocksize=BLKSIZE 6 | blockalign=4096 7 | iodepth=IODEPTH 8 | direct=1 9 | do_verify=1 10 | norandommap 11 | time_based 12 | ramp_time=10 13 | runtime=RUNTIME 14 | group_reporting=1 15 | numjobs=8 16 | ioengine=libaio 17 | 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/jobfile_templates/trim_flash.fio: -------------------------------------------------------------------------------- 1 | [global] 2 | rw=randtrim 3 | norandommap 4 | blocksize=BLKSIZE 5 | blockalign=BLKALIGN 6 | iodepth=256 7 | group_reporting=1 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/storage/nvme/nvme_factory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # pyre-unsafe 4 | from typing import Optional 5 | 6 | from autoval_ssd.lib.utils.storage.nvme.nvme_drive import NVMeDrive 7 | 8 | 9 | class NVMeDriveFactory: 10 | @staticmethod 11 | def create(host, drive: str, config: Optional[str] = None) -> NVMeDrive: 12 | """ 13 | @param Host host: 14 | @param String drive: e.g. nvme1n1 15 | @param String config: config file name 16 | """ 17 | return NVMeDrive(host, drive, config=config) 18 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/storage/nvme/vendor/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opencomputeproject/ocp-diag-autoval-ssd/4d47fc5fe5c4c236275539c791c5b5a3995e01ff/src/autoval_ssd/lib/utils/storage/nvme/vendor/__init__.py -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/storage/sas/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opencomputeproject/ocp-diag-autoval-ssd/4d47fc5fe5c4c236275539c791c5b5a3995e01ff/src/autoval_ssd/lib/utils/storage/sas/__init__.py -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/storage/sata/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opencomputeproject/ocp-diag-autoval-ssd/4d47fc5fe5c4c236275539c791c5b5a3995e01ff/src/autoval_ssd/lib/utils/storage/sata/__init__.py -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/storage/storage_device_factory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # pyre-unsafe 4 | import re 5 | from typing import List, Optional 6 | 7 | from autoval.lib.host.host import Host 8 | from autoval.lib.utils.async_utils import AsyncJob, AsyncUtils 9 | from autoval.lib.utils.autoval_log import AutovalLog 10 | from autoval.lib.utils.autoval_utils import AutovalUtils 11 | from autoval_ssd.lib.utils.storage.drive import Drive 12 | from autoval_ssd.lib.utils.storage.nvme.nvme_factory import NVMeDriveFactory 13 | from autoval_ssd.lib.utils.storage.sas.sas_drive import SASDrive 14 | from autoval_ssd.lib.utils.storage.sata.sata_drive import SATADrive 15 | 16 | 17 | class StorageDeviceFactory: 18 | """ 19 | Generate drive objects for the provided block names. 20 | """ 21 | 22 | def __init__( 23 | self, host: Host, block_names: List[str], config: Optional[str] = None 24 | ) -> None: 25 | """ 26 | @param Host host: host object 27 | @param String[] block_names: list of drive names; e.g. sdb, sdc 28 | @param String config: json file that controls how drive data is 29 | collected and validated 30 | """ 31 | self.host = host 32 | self.block_names = block_names 33 | self.nvme_list = [] 34 | self.sata_drive_list = [] 35 | self.emmc_drive_list = ["mmcblk0"] 36 | self.config = config 37 | 38 | def create(self) -> List[Drive]: 39 | self._cache_nvme_names() 40 | self._cache_sata_drives_name() 41 | return AsyncUtils.run_async_jobs( 42 | [ 43 | AsyncJob(func=self._create_drive, args=[block_name]) 44 | for block_name in self.block_names 45 | ] 46 | ) 47 | 48 | def _create_drive(self, block_name: str) -> Drive: 49 | obj = None 50 | host = self._get_host(thread=True) 51 | if self._is_drive_nvme(block_name): 52 | obj = NVMeDriveFactory.create(host, block_name, config=self.config) 53 | elif self._is_drive_sata(block_name): 54 | obj = SATADrive(host, block_name, config=self.config) 55 | elif self._is_drive_sas(block_name): 56 | obj = SASDrive(host, block_name, config=self.config) 57 | else: 58 | obj = Drive(host, block_name, config=self.config) 59 | AutovalLog.log_info( 60 | "WARNING: Drive %s interface not determined" % block_name 61 | ) 62 | return obj 63 | 64 | def _is_drive_nvme(self, block_name: str) -> bool: 65 | if str(block_name) in self.nvme_list: 66 | return True 67 | return False 68 | 69 | def _cache_sata_drives_name(self) -> None: 70 | """ 71 | Run `lsscsi` command to get all SATA drive names on the system. 72 | """ 73 | patt = r"ATA.*\/dev\/(sd(?:\w+))" 74 | output = self._get_host().run("lsscsi") 75 | # Example output SATA drives 76 | # [6:0:76:0] disk ATA /dev/sdd 77 | # [6:0:108:0] disk ATA /dev/sdaj 78 | # ... 79 | # Example output SAS drives 80 | # [6:0:99:0] disk C110 /dev/sdaa 81 | # [6:0:97:0] disk C110 /dev/sdy 82 | # ... 83 | self.sata_drive_list.extend(re.findall(patt, output)) 84 | 85 | def _cache_nvme_names(self) -> None: 86 | """ 87 | `nvme list` shows info of all nvme drives on the system. Run this 88 | command once and cache its output to avoid repeated call for 89 | each drive 90 | """ 91 | cmd_output = self._get_host().run("nvme list") 92 | nvme_list = re.findall(r"^/dev/(\w+)", cmd_output, re.M) 93 | self.nvme_list.extend(nvme_list) 94 | 95 | def _is_drive_sas(self, block_name: str) -> bool: 96 | smart = self._get_host(thread=True).run( 97 | "smartctl -x /dev/%s" % block_name, ignore_status=True 98 | ) 99 | pattern = r"Transport\sprotocol:\s+SAS" 100 | if re.search(pattern, smart): 101 | return True 102 | return False 103 | 104 | def _is_drive_sata(self, block_name: str) -> bool: 105 | """ 106 | @param String block_name: drive name in /dev/ path 107 | @return boolean 108 | """ 109 | if str(block_name) in self.sata_drive_list: 110 | return True 111 | return False 112 | 113 | def _get_host(self, thread: bool = False) -> Host: 114 | """ 115 | Provide a new host object for multi-threaded calls in create() 116 | """ 117 | return Host(AutovalUtils.get_host_dict(self.host)) if thread else self.host 118 | -------------------------------------------------------------------------------- /src/autoval_ssd/lib/utils/switchtec_utils.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | import json 3 | import re 4 | import time 5 | 6 | from autoval.lib.utils.autoval_exceptions import TestError 7 | from autoval.lib.utils.autoval_log import AutovalLog 8 | from autoval.lib.utils.file_actions import FileActions 9 | 10 | 11 | class SwitchtecUtils: 12 | def power_cycle_slots(self, host, slot_address) -> None: 13 | # Get all the SSD's slot address 14 | for slot in slot_address: 15 | slots = self._get_pci_slots(host, slot) 16 | # Slot Power OFF command 17 | slot_cmd = "echo 0 > /sys/bus/pci/slots/%s/power" % slots 18 | try: 19 | host.run(slot_cmd) 20 | except Exception: 21 | pass 22 | # Sleep between each slot power cycle 23 | time.sleep(1) 24 | 25 | # Once all the slots are Powered OFF, wait for sometime 26 | AutovalLog.log_info("Powered OFF all Slots") 27 | time.sleep(60) 28 | 29 | for slot in slot_address: 30 | slots = self._get_pci_slots(host, slot) 31 | # Slot Power ON command 32 | slot_cmd = "echo 1 > /sys/bus/pci/slots/%s/power" % slots 33 | try: 34 | host.run(slot_cmd) 35 | except Exception: 36 | pass 37 | # Sleep between each slot power cycle 38 | time.sleep(1) 39 | 40 | # Once all the slots are Powered ON, wait for enumeration 41 | AutovalLog.log_info("Powered ON all Slots") 42 | time.sleep(60) 43 | 44 | def _get_pci_slots(self, host, slot): 45 | sys_add = "/sys/bus/pci/slots/*/address" 46 | cmd = r"grep %s %s | sed 's/\// /g' | awk '{print $5}'" % (slot, sys_add) 47 | slots = host.run(cmd) 48 | if slots: 49 | return slots 50 | else: 51 | raise TestError("Getting Slots Failed for - %s" % (slot)) 52 | 53 | def get_switchtec_event_counter(self, host, device): 54 | """ 55 | Get an event counters in JSON from a Switchtec Device. 56 | Format: {stack_number: {event_key: event_value}} 57 | """ 58 | cmd = "switchtec evcntr /dev/%s" % device 59 | event_counter = {} 60 | out = host.run(cmd) 61 | for line in out.splitlines(): 62 | m = re.search(r"Stack\s+(\d+):", line) 63 | if m: 64 | stack = int(m.group(1)) 65 | else: 66 | try: 67 | line_list = line.split() 68 | value = int(line_list[-1]) 69 | key = line_list[-2] 70 | _dict = {stack: {key: value}} 71 | event_counter.update(_dict) 72 | if value != 0: 73 | AutovalLog.log_info("WARNING: Stack %s" % str(_dict)) 74 | except Exception: 75 | pass 76 | return event_counter 77 | 78 | def get_switchtec_devices(self, host): 79 | cmd = "switchtec list" 80 | out = host.run(cmd, ignore_status=True) 81 | device_list = re.findall(r"(switchtec\d+)\s+", out) 82 | return device_list 83 | 84 | def check_if_switchtec_installed(self, host) -> None: 85 | cmd = "rpm -qa | grep switchtec | tr '\n' ' '" 86 | out = host.run_get_result(cmd, ignore_status=True) # noqa 87 | if out.return_code != 0: 88 | self.install_switchtec(host) 89 | 90 | def install_switchtec(self, host) -> None: 91 | cmd = "dnf install switchtec" 92 | host.run(cmd, ignore_status=True) # noqa 93 | 94 | def collect_switchtec_throughput(self, host, file_name: str, switchtech) -> None: 95 | out = host.run(f"switchtec bw {switchtech}") 96 | return_json = {switchtech: self.convert_switchtech_output_to_json(out)} 97 | with FileActions.file_open(file_name, "a") as f: 98 | # `Union[IO[bytes], IO[str]]`. 99 | json.dump(return_json, f, indent=4, sort_keys=False) 100 | 101 | def convert_switchtech_output_to_json(self, out): 102 | lines = out.splitlines() 103 | line_num = 0 104 | key_dict = {} 105 | while line_num < len(lines) and "Partition" in lines[line_num]: 106 | key, _ = lines[line_num].split(":") 107 | line_num += 1 108 | level1 = {} 109 | while line_num < len(lines) and "Logical" in lines[line_num]: 110 | key1, _ = lines[line_num].split(":") 111 | line_num += 1 112 | level2 = {} 113 | while line_num < len(lines) and ( 114 | "Out" in lines[line_num] or "In" in lines[line_num] 115 | ): 116 | key2, value = lines[line_num].split(":") 117 | level2.update({key2.lstrip(): value.lstrip()}) 118 | line_num += 1 119 | level1.update({key1.lstrip(): level2}) 120 | key_dict.update({key: level1}) 121 | return key_dict 122 | 123 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/ber_test/control_ber.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_delay": 675, 5 | "runtime": 108000, 6 | "switchtec_delay": 720, 7 | "fio_timeout": 216000, 8 | "run_definition": { 9 | "bit_error_io": { 10 | "template": "bit_error_ratio.fio", 11 | "args": { 12 | "NAME": "ReadWriteSeq", 13 | "BLK_ALIGN": "128K", 14 | "BLK_SIZE": "128K", 15 | "SIZE": "143750G", 16 | "RDWR": "readwrite", 17 | "RWMIX_READ": 50, 18 | "RWMIX_WRITE": 50, 19 | "LOOPS": 1, 20 | "skip_iops_validation": true 21 | } 22 | } 23 | } 24 | } 25 | 26 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": false, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "include_boot_drive": true, 6 | "write_fio": { 7 | "write": { 8 | "template": "basic_write.fio", 9 | "args": { 10 | "BLKSIZE": "4k", 11 | "DEPTH": 128, 12 | "RUNTIME": "10m", 13 | "SIZE": "100%", 14 | "RW": "randwrite", 15 | "VERIFY": "md5" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "read": { 21 | "template": "basic_read.fio", 22 | "args": { 23 | "BLKSIZE": "4k", 24 | "DEPTH": 128, 25 | "RUNTIME": "10m", 26 | "SIZE": "100%", 27 | "RW": "randread", 28 | "VERIFY": "md5" 29 | } 30 | } 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Drive Cache Check 3 | ========================= 4 | * **Test Module** - drive_cache_check.drive_cache_check 5 | * **Test Control file** - Control file differs based on the drive type, power trigger and file system used to run the drive cache check. 6 | - *all drives without power trigger and no file system - /autoval_ssd/tests/drive_cache_check/drive_cache_check.json* 7 | - *all drives with power and no file system - /autoval_ssd/tests/drive_cache_check/drive_cache_check_warm.json* 8 | - *all drives without power trigger and include file system - /autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs.json* 9 | - *all drives with power trigger of warm cycle and include file system - /autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs_warm.json* 10 | - *all drives without power trigger and ext4 file system - /autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs_ext4.json* 11 | - *all drives with power trigger of warm cycle and ext4 file system - /autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs_ext4_warm.json* 12 | - *only boot drive without power trigger - /autoval_ssd/tests/drive_cache_check/drive_cache_check_boot.json* 13 | - *only boot drive with power trigger of warm cycle - /autoval_ssd/tests/drive_cache_check/drive_cache_check_boot_warm.json* 14 | 15 | ---------------- 16 | Test Description 17 | ---------------- 18 | **This test validates the performance of the SSD during a fio operation by disabling and enabling the internal volatile write cache and then comparing the results.** 19 | 20 | --------------------------------------------------------- 21 | Test execution and steps involved 22 | --------------------------------------------------------- 23 | * Validate the drives which are supporting the cache features by using the below command. 24 | - cmd: nvme get-feature /dev/ -f 0x6 25 | * Enable the write cache and execute the write fio job along with the power trigger is passed from the input parameter json file. 26 | - cmd: nvme set-feature /dev/ -f 0x6 -v 1 27 | * Disable the write cache and execute the write fio job along with the power trigger is passed from the input parameter json file. 28 | - cmd: nvme set-feature /dev/ -f 0x6 -v 0 29 | * Validate is there any error present the output files. 30 | * Compare the iops values. 31 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": false, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "only_boot_drive": true, 6 | "write_fio": { 7 | "write": { 8 | "template": "basic_write.fio", 9 | "args": { 10 | "BLKSIZE": "4k", 11 | "DEPTH": 128, 12 | "RUNTIME": "10m", 13 | "SIZE": "100%", 14 | "RW": "randwrite", 15 | "VERIFY": "md5" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "read": { 21 | "template": "basic_read.fio", 22 | "args": { 23 | "BLKSIZE": "4k", 24 | "DEPTH": 128, 25 | "RUNTIME": "10m", 26 | "SIZE": "100%", 27 | "RW": "randread", 28 | "VERIFY": "md5" 29 | } 30 | } 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_boot_warm.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": true, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "only_boot_drive": true, 6 | "write_fio": { 7 | "write": { 8 | "template": "basic_write.fio", 9 | "args": { 10 | "BLKSIZE": "4k", 11 | "DEPTH": 128, 12 | "RUNTIME": "10m", 13 | "SIZE": "100%", 14 | "RW": "randwrite", 15 | "VERIFY": "md5" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "read": { 21 | "template": "basic_read.fio", 22 | "args": { 23 | "BLKSIZE": "4k", 24 | "DEPTH": 128, 25 | "RUNTIME": "10m", 26 | "SIZE": "100%", 27 | "RW": "randread", 28 | "VERIFY": "md5" 29 | } 30 | } 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_warm.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": true, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "include_boot_drive": true, 6 | "write_fio": { 7 | "write": { 8 | "template": "basic_write.fio", 9 | "args": { 10 | "BLKSIZE": "4k", 11 | "DEPTH": 128, 12 | "RUNTIME": "10m", 13 | "SIZE": "100%", 14 | "RW": "randwrite", 15 | "VERIFY": "md5" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "read": { 21 | "template": "basic_read.fio", 22 | "args": { 23 | "BLKSIZE": "4k", 24 | "DEPTH": 128, 25 | "RUNTIME": "10m", 26 | "SIZE": "100%", 27 | "RW": "randread", 28 | "VERIFY": "md5" 29 | } 30 | } 31 | } 32 | 33 | } 34 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": false, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "write_fio": { 6 | "write": { 7 | "template": "basic_write.fio", 8 | "filesystem": true, 9 | "args": { 10 | "BLKSIZE": "4k", 11 | "DEPTH": 128, 12 | "RUNTIME": "10m", 13 | "SIZE": "100G", 14 | "RW": "randwrite", 15 | "VERIFY": "md5" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "read": { 21 | "template": "basic_read.fio", 22 | "filesystem": true, 23 | "skip_fs": true, 24 | "args": { 25 | "BLKSIZE": "4k", 26 | "DEPTH": 128, 27 | "RUNTIME": "10m", 28 | "SIZE": "100G", 29 | "RW": "randread", 30 | "VERIFY": "md5" 31 | } 32 | } 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs_ext4.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": false, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "write_fio": { 6 | "write": { 7 | "template": "basic_write.fio", 8 | "filesystem_type" : "ext4", 9 | "filesystem_options" : "", 10 | "filesystem": true, 11 | "args": { 12 | "BLKSIZE": "4k", 13 | "DEPTH": 128, 14 | "RUNTIME": "10m", 15 | "SIZE": "100G", 16 | "RW": "randwrite", 17 | "VERIFY": "md5" 18 | } 19 | } 20 | }, 21 | "read_fio": { 22 | "read": { 23 | "template": "basic_read.fio", 24 | "filesystem_type" : "ext4", 25 | "filesystem_options" : "", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "BLKSIZE": "4k", 30 | "DEPTH": 128, 31 | "RUNTIME": "10m", 32 | "SIZE": "100G", 33 | "RW": "randread", 34 | "VERIFY": "md5" 35 | } 36 | } 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs_ext4_warm.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": true, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "write_fio": { 6 | "write": { 7 | "template": "basic_write.fio", 8 | "filesystem_type" : "ext4", 9 | "filesystem_options" : "", 10 | "filesystem": true, 11 | "args": { 12 | "BLKSIZE": "4k", 13 | "DEPTH": 128, 14 | "RUNTIME": "10m", 15 | "SIZE": "100G", 16 | "RW": "randwrite", 17 | "VERIFY": "md5" 18 | } 19 | } 20 | }, 21 | "read_fio": { 22 | "read": { 23 | "template": "basic_read.fio", 24 | "filesystem_type" : "ext4", 25 | "filesystem_options" : "", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "BLKSIZE": "4k", 30 | "DEPTH": 128, 31 | "RUNTIME": "10m", 32 | "SIZE": "100G", 33 | "RW": "randread", 34 | "VERIFY": "md5" 35 | } 36 | } 37 | } 38 | 39 | } 40 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_cache_check/drive_cache_check_with_fs_warm.json: -------------------------------------------------------------------------------- 1 | { 2 | "power_trigger": true, 3 | "power_cycle": "warm", 4 | "power_random_time": false, 5 | "write_fio": { 6 | "write": { 7 | "template": "basic_write.fio", 8 | "filesystem": true, 9 | "args": { 10 | "BLKSIZE": "4k", 11 | "DEPTH": 128, 12 | "RUNTIME": "10m", 13 | "SIZE": "100G", 14 | "RW": "randwrite", 15 | "VERIFY": "md5" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "read": { 21 | "template": "basic_read.fio", 22 | "filesystem": true, 23 | "skip_fs": true, 24 | "args": { 25 | "BLKSIZE": "4k", 26 | "DEPTH": 128, 27 | "RUNTIME": "10m", 28 | "SIZE": "100G", 29 | "RW": "randread", 30 | "VERIFY": "md5" 31 | } 32 | } 33 | } 34 | 35 | } 36 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_ac_boot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "ac", 4 | "only_boot_drive": true, 5 | "remote_fio": false 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_ac_boot_local_with_phy_location.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "ac", 4 | "only_boot_drive": true, 5 | "remote_fio": false, 6 | "boot_drive_physical_location": "0000:64:00.0" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_ac_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "ac", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_dc_cycle_graceful_shutdown_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "graceful_30s_cycle", 4 | "include_boot_drive": true, 5 | "remote_fio": false, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_dc_cycle_notified_boot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "dc", 4 | "only_boot_drive": true, 5 | "remote_fio": false 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_dc_cycle_notified_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "dc", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_dc_cycle_ungraceful_boot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "warm", 4 | "only_boot_drive": true, 5 | "remote_fio": false 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_dc_cycle_ungraceful_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "warm", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_inband_warm_reboot.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "reboot", 4 | "include_boot_drive": true, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_inband_warm_reboot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "reboot", 4 | "include_boot_drive": true, 5 | "remote_fio": false, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_sled_boot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "sled-cycle", 4 | "only_boot_drive": true, 5 | "remote_fio": false 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_10_sled_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 10, 3 | "cycle_type": "sled-cycle", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_ac_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "ac", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_dc_cycle_notified_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "dc", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_dc_cycle_ungraceful.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "warm", 4 | "remote_fio": true, 5 | "pre_condition":false, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_dc_cycle_ungraceful_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "warm", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_inband_warm_reboot.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "reboot", 4 | "remote_fio": true, 5 | "include_boot_drive": true, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_inband_warm_reboot_boot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "only_boot_drive": true, 4 | "cycle_type": "reboot", 5 | "remote_fio": false 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_inband_warm_reboot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "reboot", 4 | "include_boot_drive": true, 5 | "remote_fio": false, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_sled_boot_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "sled-cycle", 4 | "only_boot_drive": true, 5 | "remote_fio": false 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_1_sled_local.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 1, 3 | "cycle_type": "sled-cycle", 4 | "remote_fio": false, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_ac.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "ac", 4 | "remote_fio": true, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_ac_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "ac", 4 | "only_boot_drive": true, 5 | "remote_fio": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_ac_md.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "ac", 4 | "is_md": true, 5 | "remote_fio": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_notified.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "dc", 4 | "remote_fio": true, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_notified_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "dc", 4 | "only_boot_drive": true, 5 | "remote_fio": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_notified_boot_hwc.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "dc", 4 | "only_boot_drive": true, 5 | "oob_mode": "hwc", 6 | "remote_fio": true 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_notified_md.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "dc", 4 | "is_md": true, 5 | "remote_fio": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_ungraceful.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "warm", 4 | "remote_fio": true, 5 | "drive_type": "ssd" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_ungraceful_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "warm", 4 | "only_boot_drive": true, 5 | "remote_fio": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_dc_cycle_ungraceful_md.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "warm", 4 | "is_md": true, 5 | "remote_fio": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_inband_warm_reboot.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "cycle_type": "reboot", 4 | "include_boot_drive": true, 5 | "remote_fio": false, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_data_integrity/di_3_inband_warm_reboot_remote.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_count": 3, 3 | "include_boot_drive": true, 4 | "cycle_type": "reboot", 5 | "remote_fio": true, 6 | "drive_type": "ssd" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/data_retension_10m.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_type_list": ["off","on"], 3 | "wait_time" : 600, 4 | "percent_write_size" : 10, 5 | "filesystem" : true, 6 | "write_fio": { 7 | "ssd_md5": { 8 | "template": "basic_write.fio", 9 | "filesystem_type" : "ext4", 10 | "filesystem_options" : "", 11 | "filesystem": true, 12 | "args": { 13 | "NAME": "fio_filesystem_test", 14 | "BLKSIZE": "128k", 15 | "SIZE": "1g", 16 | "DEPTH": 128, 17 | "RUNTIME": "10m", 18 | "VERIFY": "md5", 19 | "RW": "randwrite" 20 | } 21 | } 22 | }, 23 | "read_fio": { 24 | "ssd_md5": { 25 | "template": "basic_read.fio", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "NAME": "fio_filesystem_test", 30 | "BLKSIZE": "128k", 31 | "SIZE": "1g", 32 | "DEPTH": 128, 33 | "RUNTIME": "10m", 34 | "VERIFY": "md5", 35 | "RW": "randread" 36 | } 37 | } 38 | }, 39 | "verify_fio": { 40 | "ssd_md5": { 41 | "template": "basic_verify.fio", 42 | "filesystem": true, 43 | "skip_fs": true, 44 | "args": { 45 | "NAME": "fio_filesystem_test", 46 | "BLKSIZE": "128k", 47 | "SIZE": "1g", 48 | "DEPTH": 128, 49 | "VERIFY": "md5", 50 | "RW": "randread" 51 | } 52 | } 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/data_retension_12hrs.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_type_list": ["off","on"], 3 | "wait_time" : 43200, 4 | "percent_write_size" : 10, 5 | "filesystem" : true, 6 | "write_fio": { 7 | "ssd_md5": { 8 | "template": "basic_write.fio", 9 | "filesystem_type" : "ext4", 10 | "filesystem_options" : "", 11 | "filesystem": true, 12 | "args": { 13 | "NAME": "fio_filesystem_test", 14 | "BLKSIZE": "128k", 15 | "SIZE": "3g", 16 | "DEPTH": 128, 17 | "RUNTIME": "10m", 18 | "VERIFY": "md5", 19 | "RW": "randwrite" 20 | } 21 | } 22 | }, 23 | "read_fio": { 24 | "ssd_md5": { 25 | "template": "basic_read.fio", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "NAME": "fio_filesystem_test", 30 | "BLKSIZE": "128k", 31 | "SIZE": "3g", 32 | "DEPTH": 128, 33 | "RUNTIME": "10m", 34 | "VERIFY": "md5", 35 | "RW": "randread" 36 | } 37 | } 38 | }, 39 | "verify_fio": { 40 | "ssd_md5": { 41 | "template": "basic_verify.fio", 42 | "filesystem": true, 43 | "skip_fs": true, 44 | "args": { 45 | "NAME": "fio_filesystem_test", 46 | "BLKSIZE": "128k", 47 | "SIZE": "3g", 48 | "DEPTH": 128, 49 | "VERIFY": "md5", 50 | "RW": "randread" 51 | } 52 | } 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/data_retension_6h.json: -------------------------------------------------------------------------------- 1 | { 2 | "cycle_type_list": ["off","on"], 3 | "wait_time" : 21600, 4 | "percent_write_size" : 10, 5 | "filesystem" : true, 6 | "write_fio": { 7 | "ssd_md5": { 8 | "template": "basic_write.fio", 9 | "filesystem_type" : "ext4", 10 | "filesystem_options" : "", 11 | "filesystem": true, 12 | "args": { 13 | "NAME": "fio_filesystem_test", 14 | "BLKSIZE": "128k", 15 | "SIZE": "3g", 16 | "DEPTH": 128, 17 | "RUNTIME": "10m", 18 | "VERIFY": "md5", 19 | "RW": "randwrite" 20 | } 21 | } 22 | }, 23 | "read_fio": { 24 | "ssd_md5": { 25 | "template": "basic_read.fio", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "NAME": "fio_filesystem_test", 30 | "BLKSIZE": "128k", 31 | "SIZE": "3g", 32 | "DEPTH": 128, 33 | "RUNTIME": "10m", 34 | "VERIFY": "md5", 35 | "RW": "randread" 36 | } 37 | } 38 | }, 39 | "verify_fio": { 40 | "ssd_md5": { 41 | "template": "basic_verify.fio", 42 | "filesystem": true, 43 | "skip_fs": true, 44 | "args": { 45 | "NAME": "fio_filesystem_test", 46 | "BLKSIZE": "128k", 47 | "SIZE": "3g", 48 | "DEPTH": 128, 49 | "VERIFY": "md5", 50 | "RW": "randread" 51 | } 52 | } 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/drive_md5_verify.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Drive MD5 Verify/Data Retention Test 3 | ========================= 4 | * **Test Module** - drive_md5_verify.drive_md5_verify 5 | * **Test Control file** - Control file differs based on the drive type, file system, run time and cycle_type used to run the drive md5 veriry. 6 | 7 | ---------------- 8 | Test Description 9 | ---------------- 10 | **The purpose of the drive_md5_verify test case is to validate the data integrity of storage devices (drives) after a power cycle event. This test ensures that the stored data remains uncorrupted during an unexpected power loss or reboot scenario, which is crucial for mission-critical applications and systems where data consistency and reliability are paramount. By comparing the MD5 checksum values before and after the power cycle, this test helps identify any potential issues with the drive's ability to maintain data integrity under these conditions.** 11 | 12 | --------------------------------------------------------- 13 | Test execution and steps involved 14 | --------------------------------------------------------- 15 | * Check if the drive is a filesystem type; if so, create a file on the mounted drive, otherwise, write directly to the device. 16 | * Perform an FIO write operation on the specified drives. 17 | * Calculate the MD5 checksum value for the written data. 18 | * Perform a power cycle (reboot) of the DUT (Device Under Test). 19 | * After the DUT boots back up, calculate the MD5 checksum value again for the written data. 20 | * Compare the pre- and post-reboot MD5 checksum values to ensure they match. If they don't, it indicates data corruption due to the power cycle event. 21 | * Clean up by unmounting any mounted directories. 22 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/fio_and_md5function_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "skip_fs" : false, 3 | "cycle_type_list": ["warm"], 4 | "percent_write_size" : 10, 5 | "filesystem" : true, 6 | "write_fio": { 7 | "ssd_md5": { 8 | "template": "basic_write.fio", 9 | "filesystem_type" : "ext4", 10 | "filesystem_options" : "", 11 | "filesystem": true, 12 | "args": { 13 | "NAME": "fio_filesystem_test", 14 | "BLKSIZE": "128k", 15 | "SIZE": "100g", 16 | "DEPTH": 128, 17 | "RUNTIME": "1h", 18 | "VERIFY": "md5", 19 | "RW": "randwrite" 20 | } 21 | } 22 | }, 23 | "read_fio": { 24 | "ssd_md5": { 25 | "template": "basic_read.fio", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "NAME": "fio_filesystem_test", 30 | "BLKSIZE": "128k", 31 | "SIZE": "100g", 32 | "DEPTH": 128, 33 | "RUNTIME": "1h", 34 | "VERIFY": "md5", 35 | "RW": "randread" 36 | } 37 | } 38 | }, 39 | "verify_fio": { 40 | "ssd_md5": { 41 | "template": "basic_verify.fio", 42 | "filesystem": true, 43 | "skip_fs": true, 44 | "args": { 45 | "NAME": "fio_filesystem_test", 46 | "BLKSIZE": "128k", 47 | "SIZE": "100g", 48 | "DEPTH": 128, 49 | "VERIFY": "md5", 50 | "RW": "randread" 51 | } 52 | } 53 | } 54 | 55 | } 56 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/fio_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "only_boot_drive": true, 3 | "cycle_type_list": ["warm"], 4 | "percent_write_size": 10, 5 | "write_fio": { 6 | "ssd_md5": { 7 | "template": "basic_write.fio", 8 | "args": { 9 | "NAME": "fio_raw_test", 10 | "BLKSIZE": "128k", 11 | "SIZE": "6G", 12 | "DEPTH": 128, 13 | "RUNTIME": "5m", 14 | "VERIFY": "md5", 15 | "RW": "randwrite" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "ssd_md5": { 21 | "template": "basic_read.fio", 22 | "args": { 23 | "NAME": "fio_raw_test", 24 | "BLKSIZE": "128k", 25 | "SIZE": "6G", 26 | "DEPTH": 128, 27 | "RUNTIME": "5m", 28 | "VERIFY": "md5", 29 | "RW": "randread" 30 | } 31 | } 32 | }, 33 | "verify_fio": { 34 | "ssd_md5": { 35 | "template": "basic_verify.fio", 36 | "args": { 37 | "NAME": "fio_raw_test", 38 | "BLKSIZE": "128k", 39 | "SIZE": "6G", 40 | "DEPTH": 128, 41 | "RW": "randread", 42 | "VERIFY": "md5" 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/fio_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "skip_fs" : false, 3 | "cycle_count" : 1, 4 | "cycle_type_list": ["warm"], 5 | "percent_write_size" : 10, 6 | "filesystem" : true, 7 | "drive_type": "ssd", 8 | "serialize_overlap": 1, 9 | "write_fio": { 10 | "ssd_md5": { 11 | "template": "basic_write.fio", 12 | "filesystem_type" : "ext4", 13 | "filesystem_options" : "", 14 | "filesystem": true, 15 | "args": { 16 | "NAME": "fio_filesystem_test", 17 | "BLKSIZE": "128k", 18 | "SIZE": "100g", 19 | "DEPTH": 128, 20 | "RUNTIME": "1h", 21 | "RW": "randwrite", 22 | "VERIFY": "md5" 23 | } 24 | } 25 | }, 26 | "read_fio": { 27 | "ssd_md5": { 28 | "template": "basic_read.fio", 29 | "filesystem": true, 30 | "skip_fs": true, 31 | "args": { 32 | "NAME": "fio_filesystem_test", 33 | "BLKSIZE": "128k", 34 | "SIZE": "100g", 35 | "DEPTH": 128, 36 | "RUNTIME": "1h", 37 | "RW": "randread", 38 | "VERIFY": "md5" 39 | } 40 | } 41 | }, 42 | "verify_fio": { 43 | "ssd_md5": { 44 | "template": "basic_verify.fio", 45 | "filesystem": true, 46 | "skip_fs": true, 47 | "args": { 48 | "NAME": "fio_filesystem_test", 49 | "BLKSIZE": "128k", 50 | "SIZE": "100g", 51 | "DEPTH": 128, 52 | "VERIFY": "md5", 53 | "RW": "randread" 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/drive_md5_verify/fio_with_raw.json: -------------------------------------------------------------------------------- 1 | { 2 | "include_boot_drive": true, 3 | "cycle_type_list": ["warm"], 4 | "percent_write_size" : 10, 5 | "write_fio": { 6 | "ssd_md5": { 7 | "template": "basic_write.fio", 8 | "args": { 9 | "NAME": "fio_raw_test", 10 | "BLKSIZE": "128k", 11 | "SIZE": "6G", 12 | "DEPTH": 128, 13 | "RUNTIME": "1h", 14 | "VERIFY": "md5", 15 | "RW": "randwrite" 16 | } 17 | } 18 | }, 19 | "read_fio": { 20 | "ssd_md5": { 21 | "template": "basic_read.fio", 22 | "args": { 23 | "NAME": "fio_raw_test", 24 | "BLKSIZE": "128k", 25 | "SIZE": "6G", 26 | "DEPTH": 128, 27 | "RUNTIME": "1h", 28 | "VERIFY": "md5", 29 | "RW": "randread" 30 | } 31 | } 32 | }, 33 | "verify_fio": { 34 | "ssd_md5": { 35 | "template": "basic_verify.fio", 36 | "args": { 37 | "NAME": "fio_raw_test", 38 | "BLKSIZE": "128k", 39 | "SIZE": "6G", 40 | "DEPTH": 128, 41 | "RW": "randread", 42 | "VERIFY": "md5" 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/boot_sert.json: -------------------------------------------------------------------------------- 1 | { 2 | "only_boot_drive": true, 3 | "run_definition": { 4 | "Write": { 5 | "template": "boot_sert.job", 6 | "args": { 7 | "RW": "write", 8 | "SIZE": "10G", 9 | "BLKSIZE": "128k", 10 | "DEPTH": 32, 11 | "LOOPS": 50 12 | } 13 | }, 14 | "Read": { 15 | "template": "boot_sert.job", 16 | "args": { 17 | "RW": "read", 18 | "SIZE": "10G", 19 | "BLKSIZE": "128k", 20 | "DEPTH": 32, 21 | "LOOPS": 50 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Flash Fio Readines 3 | ========================= 4 | * **Test Module** - drive_md5_verify.drive_md5_verify 5 | * **Test Control file** - Control file differs based on the drive type, file system, run time that you want to stress the drives. 6 | 7 | ---------------- 8 | Test Description 9 | ---------------- 10 | **This test uses the Fio tool which is a public domain tool for testing drives and NVME's. This test validates performance by stressing the drives by creating and running the fio jobs.** 11 | 12 | --------------------------------------------------------- 13 | Test execution and steps involved 14 | --------------------------------------------------------- 15 | * Pass the fio templates from the test control files. 16 | *Example:* 17 | rootfs_template.job 18 | 19 | boot_sert.job 20 | 21 | filesystem_template.job 22 | 23 | sequential_job.fio 24 | 25 | stress_fio.fio 26 | 27 | flash_fio_readiness.job 28 | 29 | ssd_health_counter.job 30 | * Validate the provided FIO job file or creating the workload file if needed. 31 | * Run the FIO workloads on the target storage devices. 32 | * Validate the FIO result and removing temporary files created during the test 33 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_filesystem.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "random": { 4 | "template": "filesystem_template.job", 5 | "filesystem": "True", 6 | "args": { 7 | "NAME": "fio_filesystem_test", 8 | "RW": "randrw", 9 | "BLKSIZE": "128k", 10 | "SIZE": "10G", 11 | "DEPTH": "128", 12 | "RUNTIME": "1h", 13 | "MIXWRITE":50, 14 | "MIXREAD":50 15 | } 16 | } 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_perf.json: -------------------------------------------------------------------------------- 1 | { 2 | "trim_arg": {"BLKSIZE": "1G", "BLKALIGN": "1G"}, 3 | "run_definition": { 4 | "sequential": { 5 | "template": "sequential_job.fio", 6 | "precondition_loops": 2, 7 | "precondition_template": "sequential_precondition.fio", 8 | "args": { 9 | "RUNTIME": "1200s", 10 | "RW": ["write", "read"], 11 | "BLKSIZE": ["256k", "512k"], 12 | "IODEPTH": ["1", "4", "8", "16"], 13 | "NUM_JOBS": 1 14 | } 15 | }, 16 | "random": { 17 | "template": "random_job.fio", 18 | "precondition_loops": 2, 19 | "precondition_template": "random_precondition.fio", 20 | "args": { 21 | "RUNTIME": "1200s", 22 | "MIXREAD": 70, 23 | "MIXWRITE": 30, 24 | "BLKSIZE": ["256k", "512k"], 25 | "IODEPTH": ["1", "4", "8", "16"], 26 | "NUM_JOBS": ["1", "4", "8"], 27 | "RW": "randrw" 28 | } 29 | } 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_rootfs.json: -------------------------------------------------------------------------------- 1 | { 2 | "check_not_empty_test_drives": false, 3 | "only_boot_drive": true, 4 | "run_definition": { 5 | "random": { 6 | "template": "rootfs_template.job", 7 | "files": {"file": "/root/havoc_fio_file"}, 8 | "args": { 9 | "NAME": "fio_rootfs_test", 10 | "RW": "randrw", 11 | "BLKSIZE": "128k", 12 | "SIZE": "60G", 13 | "RUNTIME": "30m", 14 | "DEPTH": 128, 15 | "MIXWRITE": 50, 16 | "MIXREAD": 50 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_rootfs_systemd_workload_slice.json: -------------------------------------------------------------------------------- 1 | { 2 | "prefix_command_name": "workload.slice", 3 | "check_not_empty_test_drives": false, 4 | "only_boot_drive": true, 5 | "run_definition": { 6 | "random": { 7 | "template": "rootfs_template.job", 8 | "files": {"file": "/root/havoc_fio_file"}, 9 | "args": { 10 | "NAME": "fio_rootfs_test", 11 | "RW": "randrw", 12 | "BLKSIZE": "128k", 13 | "SIZE": "60G", 14 | "RUNTIME": "30m", 15 | "DEPTH": 128, 16 | "MIXWRITE": 50, 17 | "MIXREAD": 50 18 | } 19 | } 20 | } 21 | } 22 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_stress_1h.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "1h", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_stress_6h.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "6h", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_stress_6h_skip_iops.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "6h", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | }, 14 | "skip_iops_validation": true 15 | } 16 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_stress_6h_with_periodic_drive_monitor.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "6h", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | }, 14 | "enable_periodic_drive_monitor": true 15 | } 16 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_fb_stress_bc_6h.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "basic_stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "6h", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_stress_all_2days.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "2d", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_stress_all_8days.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "precondition_loops": 2, 6 | "args": { 7 | "RUNTIME": "7d", 8 | "BLKSIZE": "8k", 9 | "IODEPTH": 32, 10 | "SIZE": "100%" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/fio_stress_ssd_2days.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "run_definition": { 4 | "randrw": { 5 | "template": "stress_fio.fio", 6 | "precondition_loops": 2, 7 | "args": { 8 | "RUNTIME": "2d", 9 | "BLKSIZE": "8k", 10 | "IODEPTH": 32, 11 | "SIZE": "100%" 12 | } 13 | } 14 | } 15 | } 16 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/flash_fio_readiness.json: -------------------------------------------------------------------------------- 1 | { 2 | "skip_iops_validation": true, 3 | "drive_type": "ssd", 4 | "run_definition": { 5 | "randrw": { 6 | "template": "flash_fio_readiness.job", 7 | "args": { 8 | "RUNTIME": "30m" 9 | } 10 | } 11 | } 12 | } 13 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/ssd_health_counter_10G.json: -------------------------------------------------------------------------------- 1 | { 2 | "check_not_empty_test_drives": true, 3 | "remove_partition":false, 4 | "drive_type":"ssd", 5 | "drive_interface":"nvme", 6 | "run_definition": { 7 | "random": { 8 | "template": "ssd_health_counter.job", 9 | "args": { 10 | "SIZE": "10G" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/ssd_health_counter_1G.json: -------------------------------------------------------------------------------- 1 | { 2 | "check_not_empty_test_drives": true, 3 | "remove_partition":false, 4 | "drive_type":"ssd", 5 | "drive_interface":"nvme", 6 | "run_definition": { 7 | "random": { 8 | "template": "ssd_health_counter.job", 9 | "args": { 10 | "SIZE": "1G" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/ssd_health_counter_20G.json: -------------------------------------------------------------------------------- 1 | { 2 | "check_not_empty_test_drives": true, 3 | "remove_partition":false, 4 | "drive_type":"ssd", 5 | "drive_interface":"nvme", 6 | "run_definition": { 7 | "random": { 8 | "template": "ssd_health_counter.job", 9 | "args": { 10 | "SIZE": "20G" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/ssd_health_counter_2G.json: -------------------------------------------------------------------------------- 1 | { 2 | "check_not_empty_test_drives": true, 3 | "remove_partition":false, 4 | "drive_type":"ssd", 5 | "drive_interface":"nvme", 6 | "run_definition": { 7 | "random": { 8 | "template": "ssd_health_counter.job", 9 | "args": { 10 | "SIZE": "2G" 11 | } 12 | } 13 | } 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/storage_data_collector.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "args": { 6 | "RUNTIME": "30m", 7 | "BLKSIZE": "8k", 8 | "SIZE": "100%", 9 | "IODEPTH": "32" 10 | } 11 | } 12 | }, 13 | "skip_iops_validation": true 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_fb/storage_data_collector_500s.json: -------------------------------------------------------------------------------- 1 | { 2 | "run_definition": { 3 | "randrw": { 4 | "template": "stress_fio.fio", 5 | "args": { 6 | "RUNTIME": "500s", 7 | "BLKSIZE": "8k", 8 | "SIZE": "100%", 9 | "IODEPTH": "32" 10 | } 11 | } 12 | } 13 | } 14 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_1.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": true, 6 | "iteration_count": 1, 7 | "cycle_type": "warm", 8 | "workloads":{ 9 | "nvme_flush_write": { 10 | "nvme_flush": { 11 | "template": "basic_write.fio", 12 | "args": { 13 | "BLKSIZE": "4k", 14 | "SIZE": "100%", 15 | "DEPTH": 128, 16 | "RUNTIME": "10m", 17 | "VERIFY": "md5", 18 | "RW": "randwrite" 19 | } 20 | } 21 | }, 22 | "nvme_flush_read": { 23 | "nvme_flush": { 24 | "template": "basic_read.fio", 25 | "args": { 26 | "BLKSIZE": "4k", 27 | "SIZE": "100%", 28 | "DEPTH": 128, 29 | "RUNTIME": "10m", 30 | "VERIFY": "md5", 31 | "RW": "randread" 32 | } 33 | } 34 | }, 35 | "nvme_flush_verify": { 36 | "nvme_flush": { 37 | "template": "basic_verify.fio", 38 | "args": { 39 | "RW": "randread", 40 | "VERIFY": "md5", 41 | "BLKSIZE": "4k", 42 | "SIZE": "100%", 43 | "DEPTH": 128 44 | } 45 | } 46 | } 47 | } 48 | } 49 | 50 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": true, 6 | "iteration_count": 3, 7 | "cycle_type": "warm", 8 | "workloads":{ 9 | "nvme_flush_write": { 10 | "nvme_flush": { 11 | "template": "basic_write.fio", 12 | "args": { 13 | "BLKSIZE": "4k", 14 | "SIZE": "100%", 15 | "DEPTH": 128, 16 | "RUNTIME": "1h", 17 | "VERIFY": "md5", 18 | "RW": "randwrite" 19 | } 20 | } 21 | }, 22 | "nvme_flush_read": { 23 | "nvme_flush": { 24 | "template": "basic_read.fio", 25 | "args": { 26 | "BLKSIZE": "4k", 27 | "SIZE": "100%", 28 | "DEPTH": 128, 29 | "RUNTIME": "1h", 30 | "VERIFY": "md5", 31 | "RW": "randread" 32 | } 33 | } 34 | }, 35 | "nvme_flush_verify": { 36 | "nvme_flush": { 37 | "template": "basic_verify.fio", 38 | "args": { 39 | "RW": "randread", 40 | "VERIFY": "md5", 41 | "BLKSIZE": "4k", 42 | "SIZE": "100%", 43 | "DEPTH": 128 44 | } 45 | } 46 | } 47 | } 48 | } 49 | 50 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "nvme_flush": true, 3 | "drive_type": "ssd", 4 | "drive_interface": "nvme", 5 | "only_boot_drive": true, 6 | "power_trigger": true, 7 | "iteration_count": 3, 8 | "cycle_type": "warm", 9 | "workloads": { 10 | "nvme_flush_write": { 11 | "nvme_flush": { 12 | "template": "basic_write.fio", 13 | "args": { 14 | "RW": "write", 15 | "VERIFY": "md5", 16 | "BLKSIZE": "4k", 17 | "RUNTIME": "10m", 18 | "SIZE": "100G", 19 | "DEPTH": 128 20 | } 21 | } 22 | }, 23 | "nvme_flush_read": { 24 | "nvme_flush": { 25 | "template": "basic_read.fio", 26 | "args": { 27 | "RW": "read", 28 | "VERIFY": "md5", 29 | "BLKSIZE": "4k", 30 | "RUNTIME": "10m", 31 | "SIZE": "100G", 32 | "DEPTH": 128 33 | } 34 | } 35 | }, 36 | "nvme_flush_verify": { 37 | "nvme_flush": { 38 | "template": "basic_verify.fio", 39 | "args": { 40 | "RW": "read", 41 | "VERIFY": "md5", 42 | "BLKSIZE": "4k", 43 | "SIZE": "100G", 44 | "DEPTH": 128 45 | } 46 | } 47 | } 48 | } 49 | } 50 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3_fio_internal_flush.json: -------------------------------------------------------------------------------- 1 | { 2 | "nvme_flush": false, 3 | "power_trigger": true, 4 | "iteration_count": 3, 5 | "cycle_type": "warm", 6 | "workloads": { 7 | "nvme_flush_write": { 8 | "nvme_flush": { 9 | "template": "basic_write.fio", 10 | "args": { 11 | "RW": "write", 12 | "VERIFY": "md5", 13 | "BLKSIZE": "4k", 14 | "RUNTIME": "10m", 15 | "SIZE": "100G", 16 | "DEPTH": 128 17 | } 18 | } 19 | }, 20 | "nvme_flush_read": { 21 | "nvme_flush": { 22 | "template": "basic_read.fio", 23 | "args": { 24 | "RW": "read", 25 | "VERIFY": "md5", 26 | "BLKSIZE": "4k", 27 | "RUNTIME": "10m", 28 | "SIZE": "100G", 29 | "DEPTH": 128 30 | } 31 | } 32 | }, 33 | "nvme_flush_verify": { 34 | "nvme_flush": { 35 | "template": "basic_verify.fio", 36 | "args": { 37 | "RW": "read", 38 | "VERIFY": "md5", 39 | "BLKSIZE": "4k", 40 | "SIZE": "100G", 41 | "DEPTH": 128 42 | } 43 | } 44 | } 45 | } 46 | } 47 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3_fio_internal_flush_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "nvme_flush": false, 3 | "power_trigger": true, 4 | "iteration_count": 3, 5 | "cycle_type": "warm", 6 | "workloads": { 7 | "nvme_flush_write": { 8 | "nvme_flush": { 9 | "template": "basic_write.fio", 10 | "filesystem": true, 11 | "args": { 12 | "RW": "write", 13 | "VERIFY": "md5", 14 | "RUNTIME": "10m", 15 | "BLKSIZE": "4k", 16 | "SIZE": "100G", 17 | "DEPTH": 128 18 | } 19 | } 20 | }, 21 | "nvme_flush_read": { 22 | "nvme_flush": { 23 | "template": "basic_read.fio", 24 | "filesystem": true, 25 | "skip_fs": true, 26 | "args": { 27 | "RW": "read", 28 | "VERIFY": "md5", 29 | "RUNTIME": "10m", 30 | "BLKSIZE": "4k", 31 | "SIZE": "100G", 32 | "DEPTH": 128 33 | } 34 | } 35 | }, 36 | "nvme_flush_verify": { 37 | "nvme_flush": { 38 | "template": "basic_verify.fio", 39 | "filesystem": true, 40 | "skip_fs": true, 41 | "args": { 42 | "RW": "read", 43 | "VERIFY": "md5", 44 | "BLKSIZE": "4k", 45 | "SIZE": "100G", 46 | "DEPTH": 128 47 | } 48 | } 49 | } 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3_fio_internal_flush_with_fs_ext4.json: -------------------------------------------------------------------------------- 1 | { 2 | "nvme_flush": false, 3 | "power_trigger": true, 4 | "iteration_count": 3, 5 | "cycle_type": "warm", 6 | "workloads": { 7 | "nvme_flush_write": { 8 | "nvme_flush": { 9 | "template": "basic_write.fio", 10 | "filesystem_type" : "ext4", 11 | "filesystem_options" : "", 12 | "filesystem": true, 13 | "args": { 14 | "RW": "write", 15 | "VERIFY": "md5", 16 | "BLKSIZE": "4k", 17 | "RUNTIME": "10m", 18 | "SIZE": "100G", 19 | "DEPTH": 128 20 | } 21 | } 22 | }, 23 | "nvme_flush_read": { 24 | "nvme_flush": { 25 | "template": "basic_read.fio", 26 | "filesystem_type" : "ext4", 27 | "filesystem_options" : "", 28 | "filesystem": true, 29 | "skip_fs": true, 30 | "args": { 31 | "RW": "read", 32 | "VERIFY": "md5", 33 | "BLKSIZE": "4k", 34 | "RUNTIME": "10m", 35 | "SIZE": "100G", 36 | "DEPTH": 128 37 | } 38 | } 39 | }, 40 | "nvme_flush_verify": { 41 | "nvme_flush": { 42 | "template": "basic_verify.fio", 43 | "filesystem_type" : "ext4", 44 | "filesystem_options" : "", 45 | "filesystem": true, 46 | "skip_fs": true, 47 | "args": { 48 | "RW": "read", 49 | "VERIFY": "md5", 50 | "BLKSIZE": "4k", 51 | "SIZE": "100G", 52 | "DEPTH": 128 53 | } 54 | } 55 | } 56 | } 57 | } 58 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": true, 6 | "iteration_count": 3, 7 | "cycle_type": "warm", 8 | "workloads":{ 9 | "nvme_flush_write": { 10 | "nvme_flush": { 11 | "template": "basic_write.fio", 12 | "filesystem": true, 13 | "args": { 14 | "BLKSIZE": "4k", 15 | "SIZE": "100G", 16 | "DEPTH": 128, 17 | "RUNTIME": "1h", 18 | "VERIFY": "md5", 19 | "RW": "randwrite" 20 | } 21 | } 22 | }, 23 | "nvme_flush_read": { 24 | "nvme_flush": { 25 | "template": "basic_read.fio", 26 | "filesystem": true, 27 | "skip_fs": true, 28 | "args": { 29 | "BLKSIZE": "4k", 30 | "SIZE": "100G", 31 | "DEPTH": 128, 32 | "RUNTIME": "1h", 33 | "VERIFY": "md5", 34 | "RW": "randread" 35 | } 36 | } 37 | }, 38 | "nvme_flush_verify": { 39 | "nvme_flush": { 40 | "template": "basic_verify.fio", 41 | "filesystem": true, 42 | "skip_fs": true, 43 | "args": { 44 | "RW": "randread", 45 | "VERIFY": "md5", 46 | "BLKSIZE": "4k", 47 | "SIZE": "100G", 48 | "DEPTH": 128 49 | } 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/dirty_power_cycle_3_with_fs_ext4.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": true, 6 | "iteration_count": 3, 7 | "cycle_type": "warm", 8 | "workloads":{ 9 | "nvme_flush_write": { 10 | "nvme_flush": { 11 | "template": "basic_write.fio", 12 | "filesystem_type" : "ext4", 13 | "filesystem_options" : "", 14 | "filesystem": true, 15 | "skip_fs": true, 16 | "args": { 17 | "BLKSIZE": "4k", 18 | "SIZE": "100G", 19 | "DEPTH": 128, 20 | "RUNTIME": "1h", 21 | "VERIFY": "md5", 22 | "RW": "randwrite" 23 | } 24 | } 25 | }, 26 | "nvme_flush_read": { 27 | "nvme_flush": { 28 | "template": "basic_read.fio", 29 | "filesystem_type" : "ext4", 30 | "filesystem_options" : "", 31 | "filesystem": true, 32 | "skip_fs": true, 33 | "args": { 34 | "BLKSIZE": "4k", 35 | "SIZE": "100G", 36 | "DEPTH": 128, 37 | "RUNTIME": "1h", 38 | "VERIFY": "md5", 39 | "RW": "randread" 40 | } 41 | } 42 | }, 43 | "nvme_flush_verify": { 44 | "nvme_flush": { 45 | "template": "basic_verify.fio", 46 | "filesystem_type" : "ext4", 47 | "filesystem_options" : "", 48 | "filesystem": true, 49 | "args": { 50 | "RW": "randread", 51 | "VERIFY": "md5", 52 | "BLKSIZE": "4k", 53 | "SIZE": "100G", 54 | "DEPTH": 128 55 | } 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/fio_internal_flush.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Fio Internal Flush 3 | ========================= 4 | * **Test Module** - fio_intenal_flush.fio_internal_flush 5 | * **Test Control file** - Control file differs based on the drive type, file system, cycle count used to run the fio internal flush. 6 | 7 | ---------------- 8 | Test Description 9 | ---------------- 10 | **The primary purpose of this test case is to maintain data integrity by guaranteeing that all cached data is committed to non-volatile memory before any potential power loss events or system shutdowns occur. This is particularly important for SSDs that use write caching to improve performance because it reduces the risk of data loss or corruption due to incomplete writes in case of unexpected power interruptions.** 11 | 12 | --------------------------------------------------------- 13 | Test execution and steps involved 14 | --------------------------------------------------------- 15 | * Write the data into the drives using basic_write.fio job file 16 | * Do the nvme flush using the below command. 17 | 18 | EX: 19 | nvme flush dev/nvme0n1 20 | * Do the cycle operation. 21 | * Do the fio read using basic_read.fio job file 22 | * Do the verify using the basic_verify.fio job file 23 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_1_boot.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "only_boot_drive": true, 5 | "nvme_flush": true, 6 | "power_trigger": false, 7 | "iteration_count": 1, 8 | "workloads":{ 9 | "nvme_flush_write": { 10 | "nvme_flush": { 11 | "template": "basic_write.fio", 12 | "args": { 13 | "BLKSIZE": "4k", 14 | "SIZE": "100G", 15 | "DEPTH": 128, 16 | "RUNTIME": "10m", 17 | "VERIFY": "md5", 18 | "RW": "randwrite" 19 | } 20 | } 21 | }, 22 | "nvme_flush_read": { 23 | "nvme_flush": { 24 | "template": "basic_read.fio", 25 | "args": { 26 | "BLKSIZE": "4k", 27 | "SIZE": "100G", 28 | "DEPTH": 128, 29 | "RUNTIME": "10m", 30 | "VERIFY": "md5", 31 | "RW": "randread" 32 | } 33 | } 34 | }, 35 | "nvme_flush_verify": { 36 | "nvme_flush": { 37 | "template": "basic_verify.fio", 38 | "args": { 39 | "RW": "randread", 40 | "BLKSIZE": "4k", 41 | "SIZE": "100G", 42 | "DEPTH": 128, 43 | "VERIFY": "md5" 44 | } 45 | } 46 | } 47 | } 48 | } 49 | 50 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_1_with_flush.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": true, 5 | "power_trigger": false, 6 | "iteration_count": 1, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "args": { 12 | "BLKSIZE": "4k", 13 | "SIZE": "100%", 14 | "DEPTH": 128, 15 | "RUNTIME": "10m", 16 | "VERIFY": "md5", 17 | "RW": "randwrite" 18 | } 19 | } 20 | }, 21 | "nvme_flush_read": { 22 | "nvme_flush": { 23 | "template": "basic_read.fio", 24 | "args": { 25 | "BLKSIZE": "4k", 26 | "SIZE": "100%", 27 | "DEPTH": 128, 28 | "RUNTIME": "10m", 29 | "VERIFY": "md5", 30 | "RW": "randread" 31 | } 32 | } 33 | }, 34 | "nvme_flush_verify": { 35 | "nvme_flush": { 36 | "template": "basic_verify.fio", 37 | "args": { 38 | "RW": "randread", 39 | "BLKSIZE": "4k", 40 | "SIZE": "100%", 41 | "DEPTH": "128", 42 | "VERIFY": "md5" 43 | } 44 | } 45 | } 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_1_with_flush_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": true, 5 | "power_trigger": false, 6 | "iteration_count": 1, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "filesystem": true, 12 | "args": { 13 | "BLKSIZE": "4k", 14 | "SIZE": "100%", 15 | "DEPTH": 128, 16 | "RUNTIME": "10m", 17 | "VERIFY": "md5", 18 | "RW": "randwrite" 19 | } 20 | } 21 | }, 22 | "nvme_flush_read": { 23 | "nvme_flush": { 24 | "template": "basic_read.fio", 25 | "filesystem": true, 26 | "skip_fs": true, 27 | "args": { 28 | "BLKSIZE": "4k", 29 | "SIZE": "100%", 30 | "DEPTH": 128, 31 | "RUNTIME": "10m", 32 | "VERIFY": "md5", 33 | "RW": "randread" 34 | } 35 | } 36 | }, 37 | "nvme_flush_verify": { 38 | "nvme_flush": { 39 | "template": "basic_verify.fio", 40 | "filesystem": true, 41 | "skip_fs": true, 42 | "args": { 43 | "RW": "randread", 44 | "BLKSIZE": "4k", 45 | "SIZE": "100%", 46 | "DEPTH": "128", 47 | "VERIFY": "md5" 48 | } 49 | } 50 | } 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_1_with_flush_with_fs_ext4.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": true, 5 | "skip_iops_validation": true, 6 | "power_trigger": false, 7 | "iteration_count": 1, 8 | "workloads":{ 9 | "nvme_flush_write": { 10 | "nvme_flush": { 11 | "template": "basic_write.fio", 12 | "filesystem_type" : "ext4", 13 | "filesystem_options" : "", 14 | "filesystem": true, 15 | "args": { 16 | "BLKSIZE": "4k", 17 | "SIZE": "100%", 18 | "DEPTH": 128, 19 | "RUNTIME": "10m", 20 | "VERIFY": "md5", 21 | "RW": "randwrite" 22 | } 23 | } 24 | }, 25 | "nvme_flush_read": { 26 | "nvme_flush": { 27 | "template": "basic_read.fio", 28 | "filesystem_type" : "ext4", 29 | "filesystem_options" : "", 30 | "filesystem": true, 31 | "skip_fs": true, 32 | "args": { 33 | "BLKSIZE": "4k", 34 | "SIZE": "100%", 35 | "DEPTH": 128, 36 | "RUNTIME": "10m", 37 | "VERIFY": "md5", 38 | "RW": "randread" 39 | } 40 | } 41 | }, 42 | "nvme_flush_verify": { 43 | "nvme_flush": { 44 | "template": "basic_verify.fio", 45 | "filesystem_type" : "ext4", 46 | "filesystem_options" : "", 47 | "filesystem": true, 48 | "skip_fs": true, 49 | "args": { 50 | "RW": "randread", 51 | "BLKSIZE": "4k", 52 | "SIZE": "100%", 53 | "DEPTH": "128", 54 | "VERIFY": "md5" 55 | } 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_1_without_flush.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": false, 6 | "iteration_count": 1, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "args": { 12 | "BLKSIZE": "4k", 13 | "SIZE": "100%", 14 | "DEPTH": 128, 15 | "RUNTIME": "2m", 16 | "VERIFY": "md5", 17 | "RW": "randwrite" 18 | } 19 | } 20 | }, 21 | "nvme_flush_read": { 22 | "nvme_flush": { 23 | "template": "basic_read.fio", 24 | "args": { 25 | "BLKSIZE": "4k", 26 | "SIZE": "100%", 27 | "DEPTH": 128, 28 | "RUNTIME": "2m", 29 | "VERIFY": "md5", 30 | "RW": "randread" 31 | } 32 | } 33 | }, 34 | "nvme_flush_verify": { 35 | "nvme_flush": { 36 | "template": "basic_verify.fio", 37 | "args": { 38 | "RW": "randread", 39 | "BLKSIZE": "4k", 40 | "SIZE": "100%", 41 | "DEPTH": 128, 42 | "VERIFY": "md5" 43 | } 44 | } 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_3_with_flush.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": true, 5 | "power_trigger": false, 6 | "iteration_count": 3, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "args": { 12 | "BLKSIZE": "4k", 13 | "SIZE": "100G", 14 | "DEPTH": 128, 15 | "RUNTIME": "1h", 16 | "VERIFY": "md5", 17 | "RW": "randwrite" 18 | } 19 | } 20 | }, 21 | "nvme_flush_read": { 22 | "nvme_flush": { 23 | "template": "basic_read.fio", 24 | "args": { 25 | "BLKSIZE": "4k", 26 | "SIZE": "100G", 27 | "DEPTH": 128, 28 | "RUNTIME": "1h", 29 | "VERIFY": "md5", 30 | "RW": "randread" 31 | } 32 | } 33 | }, 34 | "nvme_flush_verify": { 35 | "nvme_flush": { 36 | "template": "basic_verify.fio", 37 | "args": { 38 | "RW": "randread", 39 | "BLKSIZE": "4k", 40 | "SIZE": "100G", 41 | "DEPTH": "128", 42 | "VERIFY": "md5" 43 | } 44 | } 45 | } 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_3_with_flush_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": true, 5 | "power_trigger": false, 6 | "iteration_count": 3, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "filesystem": true, 12 | "args": { 13 | "BLKSIZE": "4k", 14 | "SIZE": "100G", 15 | "DEPTH": 128, 16 | "RUNTIME": "1h", 17 | "VERIFY": "md5", 18 | "RW": "randwrite" 19 | } 20 | } 21 | }, 22 | "nvme_flush_read": { 23 | "nvme_flush": { 24 | "template": "basic_read.fio", 25 | "filesystem": true, 26 | "skip_fs": true, 27 | "args": { 28 | "BLKSIZE": "4k", 29 | "SIZE": "100G", 30 | "DEPTH": 128, 31 | "RUNTIME": "1h", 32 | "VERIFY": "md5", 33 | "RW": "randread" 34 | } 35 | } 36 | }, 37 | "nvme_flush_verify": { 38 | "nvme_flush": { 39 | "template": "basic_verify.fio", 40 | "filesystem": true, 41 | "skip_fs": true, 42 | "args": { 43 | "RW": "randread", 44 | "BLKSIZE": "4k", 45 | "SIZE": "100G", 46 | "RUNTIME": 0, 47 | "DEPTH": "128", 48 | "VERIFY": "md5" 49 | } 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_3_with_flush_with_fs_ext4.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": true, 5 | "power_trigger": false, 6 | "iteration_count": 3, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "filesystem_type" : "ext4", 12 | "filesystem_options" : "", 13 | "filesystem": true, 14 | "args": { 15 | "BLKSIZE": "4k", 16 | "SIZE": "100G", 17 | "DEPTH": 128, 18 | "RUNTIME": "1h", 19 | "VERIFY": "md5", 20 | "RW": "randwrite" 21 | } 22 | } 23 | }, 24 | "nvme_flush_read": { 25 | "nvme_flush": { 26 | "template": "basic_read.fio", 27 | "filesystem_type" : "ext4", 28 | "filesystem_options" : "", 29 | "filesystem": true, 30 | "skip_fs": true, 31 | "args": { 32 | "BLKSIZE": "4k", 33 | "SIZE": "100G", 34 | "DEPTH": 128, 35 | "RUNTIME": "1h", 36 | "VERIFY": "md5", 37 | "RW": "randread" 38 | } 39 | } 40 | }, 41 | "nvme_flush_verify": { 42 | "nvme_flush": { 43 | "template": "basic_verify.fio", 44 | "filesystem_type" : "ext4", 45 | "filesystem_options" : "", 46 | "filesystem": true, 47 | "skip_fs": true, 48 | "args": { 49 | "RW": "randread", 50 | "BLKSIZE": "4k", 51 | "SIZE": "100G", 52 | "RUNTIME": 0, 53 | "DEPTH": "128", 54 | "VERIFY": "md5" 55 | } 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_3_without_flush.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": false, 6 | "iteration_count": 3, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "args": { 12 | "BLKSIZE": "4k", 13 | "SIZE": "100G", 14 | "DEPTH": 128, 15 | "RUNTIME": "1h", 16 | "VERIFY": "md5", 17 | "RW": "randwrite" 18 | } 19 | } 20 | }, 21 | "nvme_flush_read": { 22 | "nvme_flush": { 23 | "template": "basic_read.fio", 24 | "args": { 25 | "BLKSIZE": "4k", 26 | "SIZE": "100G", 27 | "DEPTH": 128, 28 | "RUNTIME": "1h", 29 | "VERIFY": "md5", 30 | "RW": "randread" 31 | } 32 | } 33 | }, 34 | "nvme_flush_verify": { 35 | "nvme_flush": { 36 | "template": "basic_verify.fio", 37 | "args": { 38 | "RW": "randread", 39 | "BLKSIZE": "4k", 40 | "SIZE": "100G", 41 | "DEPTH": 128, 42 | "VERIFY": "md5" 43 | } 44 | } 45 | } 46 | } 47 | } 48 | 49 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_3_without_flush_with_fs.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": false, 6 | "iteration_count": 3, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "filesystem": true, 12 | "args": { 13 | "BLKSIZE": "4k", 14 | "SIZE": "100G", 15 | "DEPTH": 128, 16 | "RUNTIME": "1h", 17 | "VERIFY": "md5", 18 | "RW": "randwrite" 19 | } 20 | } 21 | }, 22 | "nvme_flush_read": { 23 | "nvme_flush": { 24 | "template": "basic_read.fio", 25 | "filesystem": true, 26 | "skip_fs": true, 27 | "args": { 28 | "BLKSIZE": "4k", 29 | "SIZE": "100G", 30 | "DEPTH": 128, 31 | "RUNTIME": "1h", 32 | "VERIFY": "md5", 33 | "RW": "randread" 34 | } 35 | } 36 | }, 37 | "nvme_flush_verify": { 38 | "nvme_flush": { 39 | "template": "basic_verify.fio", 40 | "filesystem": true, 41 | "skip_fs": true, 42 | "args": { 43 | "RW": "randread", 44 | "BLKSIZE": "4k", 45 | "SIZE": "100G", 46 | "RUNTIME": 0, 47 | "DEPTH": 128, 48 | "VERIFY": "md5" 49 | } 50 | } 51 | } 52 | } 53 | } 54 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_internal_flush/power_cycle_3_without_flush_with_fs_ext4.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "nvme_flush": false, 5 | "power_trigger": false, 6 | "iteration_count": 3, 7 | "workloads":{ 8 | "nvme_flush_write": { 9 | "nvme_flush": { 10 | "template": "basic_write.fio", 11 | "filesystem_type" : "ext4", 12 | "filesystem_options" : "", 13 | "filesystem": true, 14 | "args": { 15 | "BLKSIZE": "4k", 16 | "SIZE": "100G", 17 | "DEPTH": 128, 18 | "RUNTIME": "1h", 19 | "VERIFY": "md5", 20 | "RW": "randwrite" 21 | } 22 | } 23 | }, 24 | "nvme_flush_read": { 25 | "nvme_flush": { 26 | "template": "basic_read.fio", 27 | "filesystem_type" : "ext4", 28 | "filesystem_options" : "", 29 | "filesystem": true, 30 | "skip_fs": true, 31 | "args": { 32 | "BLKSIZE": "4k", 33 | "SIZE": "100G", 34 | "DEPTH": 128, 35 | "RUNTIME": "1h", 36 | "VERIFY": "md5", 37 | "RW": "randread" 38 | } 39 | } 40 | }, 41 | "nvme_flush_verify": { 42 | "nvme_flush": { 43 | "template": "basic_verify.fio", 44 | "filesystem_type" : "ext4", 45 | "filesystem_options" : "", 46 | "filesystem": true, 47 | "skip_fs": true, 48 | "args": { 49 | "RW": "randread", 50 | "BLKSIZE": "4k", 51 | "SIZE": "100G", 52 | "RUNTIME": 0, 53 | "DEPTH": 128, 54 | "VERIFY": "md5" 55 | } 56 | } 57 | } 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_synth_flash/UBOOTT_Workload_loop.json: -------------------------------------------------------------------------------- 1 | { 2 | "test_drive_filter": true, 3 | "drive_type": "ssd", 4 | "only_boot_drive": true, 5 | "workload": ["UBOOTT_Workload_loop"], 6 | "fio_synth_params": { 7 | "synth_verify": true, 8 | "ignore_error": true, 9 | "parallel": true 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_synth_flash/UBOOTT_workload_stress.rst: -------------------------------------------------------------------------------- 1 | =========================== 2 | UBOOTT Workload Loop Stress 3 | =========================== 4 | * **Test Module** - fio_synth_flash.fio_synth_flash 5 | * **Test Control file path ** 6 | - autoval_ssd/tests/fio_synth_flash/UBOOTT_Workload_loop.json 7 | - autoval_ssd/tests/fio_synth_flash/USSDT_Workload_loop.json 8 | 9 | ----------------- 10 | Test Description 11 | ----------------- 12 | *Test measures the drive performance with the specified Workload(UBOOTT_Workload_Loop, USSDT_Workload_loop.json) and compares the basic fio parameters such as bandwidth, latency with the reference values* 13 | 14 | ------------------------ 15 | Test Control parameters 16 | ------------------------ 17 | 18 | ``{ 19 | "test_drive_filter": true, 20 | "drive_type": "ssd", 21 | "only_boot_drive": true, 22 | "workload": ["UBOOTT_Workload_loop"], 23 | "fio_synth_params": { 24 | "synth_verify": true, 25 | "ignore_error": true, 26 | "parallel": true 27 | } 28 | }`` 29 | 30 | ----------------------------------------------------------------------------------- 31 | Phases of Test execution and steps involved in each phase with UBOOTT_Workload_Loop 32 | ----------------------------------------------------------------------------------- 33 | 34 | SetUp Phase 35 | ----------- 36 | 37 | ========================================= ==================================================================================== 38 | Step Description Commands 39 | ========================================= ==================================================================================== 40 | ``Check if DUT is accessible`` ping6 -c 3 -i 0.2 41 | 42 | ``Install the rpms required sudo dnf -y --allowerasing --disablerepo=\* --enablerepo=fava install -b 43 | such as fio, fb-FioSynthFlash`` 44 | 45 | ``Identify Boot drive`` ls -la /sys/block/nvme0n1, file -s /dev/nvme0n1, lsblk -J 46 | 47 | ``Create SMART log directory`` NA 48 | 49 | ``Get list of drives based on NA 50 | type and interface`` 51 | 52 | ``Collect SMART and Config data`` nvme smart-log /dev/nvme0n1 -o json 53 | 54 | ``Set up the fiosynthflash results directory`` NA 55 | ============================================= ====================================================================================== 56 | 57 | Execution Phase 58 | --------------- 59 | 60 | ========================================== =================================================================================== 61 | Step Description Commands 62 | ========================================== =================================================================================== 63 | ``Format the drive if formatting is enabled`` nvme format /dev/ -s -r 64 | 65 | 66 | ``Backup workload loop stress json for restore NA 67 | at test end`` 68 | 69 | 70 | ``Enable latency monitor settings`` NA 71 | 72 | 73 | ``Start precondition workload loop stress fb-FioSynthFlash -x -w UBOOTT_Workload_loop -f -d /dev/ 74 | fiosynth job`` 75 | 76 | 77 | ``Collect post latency monitor logs`` NA 78 | 79 | 80 | ``Parse and validate lm_parser results`` NA 81 | 82 | 83 | ``Collect drive performance data`` NA 84 | 85 | 86 | ``Disable latency monitor settings`` NA 87 | 88 | 89 | ``Enable latency monitor settings`` NA 90 | 91 | 92 | ``Start workload loop stress fiosynth job`` fb-FioSynthFlash -x -w UBOOTT_Workload_loop -f -d /dev/ 93 | 94 | 95 | ``Collect post latency monitor logs`` NA 96 | 97 | 98 | ``Parse and validate lm_parser results`` NA 99 | 100 | 101 | ``Collect drive performance data`` NA 102 | 103 | 104 | ``Disable latency monitor settings`` NA 105 | ============================================ ===================================================================================== 106 | 107 | CleanUp Phase 108 | ------------- 109 | 110 | ========================================== ============================================================================= 111 | Step Description Commands 112 | ========================================== ============================================================================= 113 | ``Restore workload stress json file`` NA 114 | 115 | ``Disable latency monitor settings`` NA 116 | 117 | ``Change nvme io timeout in cleanup phase`` /sys/module/nvme_core/parameters/io_timeout will be executed as a file script 118 | 119 | ``Collect SMART and Config data post test`` nvme smart-log /dev/nvme0n1 -o json 120 | 121 | ``Append storage_test_base config into the NA 122 | config_results file from test base`` 123 | =========================================== =============================================================================== 124 | 125 | ---------------- 126 | Expected Result 127 | ---------------- 128 | * The DUT is still accessible at the end of the test. 129 | * The system configuration should not show any changes. 130 | * Fio Synth workload should have been run without error. 131 | * Collect and check fio test result to see if any value out of expected ones. 132 | * Each fio output should meet the target file minimum-maximum values. 133 | * The system log info should not have any error/failure logs. 134 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fio_synth_flash/USSDT_Workload_loop.json: -------------------------------------------------------------------------------- 1 | { 2 | "test_drive_filter": true, 3 | "drive_type": "ssd", 4 | "dc_lm_validation": true, 5 | "workload": ["USSDT_Workload_loop"], 6 | "fio_synth_params":{ 7 | "synth_verify": true, 8 | "ignore_error": true, 9 | "parallel": true 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/fsync/control.json: -------------------------------------------------------------------------------- 1 | { 2 | "testname": "fsync_test", 3 | "drive_type": "ssd", 4 | "template": "prep_flash.fio", 5 | "pre_condition_cycle": 1, 6 | "run_definition": { 7 | "BLKSIZE": "128K", 8 | "DEPTH": 32, 9 | "LOOPS": 0 10 | } 11 | } 12 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/iogo/control.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "fstype": "xfs", 4 | "drive_type": "ssd", 5 | "drive_interface": "nvme", 6 | "max_latency": 15, 7 | "precondition_loops": 2, 8 | "ocp_lm_commands": "True" 9 | } 10 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/namespace_utilization_test/namespace_utilization.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type" : "ssd", 3 | "cycle" : 3, 4 | "expected_nuse_size": 2621440, 5 | "nvme_format_timeout": 1200, 6 | "run_definition": { 7 | "write": { 8 | "template": "basic_write.fio", 9 | "args": { 10 | "BLKSIZE": "256k", 11 | "SIZE": "10GB", 12 | "DEPTH": 32, 13 | "RUNTIME": "3", 14 | "VERIFY": "md5", 15 | "DO_VERIFY": 0, 16 | "RW": "write" 17 | } 18 | } 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/namespace_utilization_test/namespace_utilization.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Namespace Utilization Test 3 | ========================= 4 | * **Test Module** - namespace_utilization_test.namespace_utilization_test 5 | * **Test Control file** 6 | 7 | *expected_nuse_size is 2621440 - /autoval_ssd/tests/namespace_utilization_test/namespace_utilization.json* 8 | 9 | ---------------- 10 | Test Description 11 | ---------------- 12 | **This test validates the namespace utilization size by running fio job and check the size using the 'nvme id-ns /dev/nvmex' command.** 13 | 14 | --------------------------------------------------------- 15 | Test execution and steps involved 16 | --------------------------------------------------------- 17 | * Filter the drives with crypto erase supported options 18 | * Filter the drives with nuse supported drives 19 | * Format the drive with secure erase option 20 | * Read nuse from id-ns and check that it == 0 21 | * Sequentially Write 10GB of data to the drive 22 | * Read nuse from id-ns and check that it equals 2621440(0x280000) 23 | - indicating 10GB of namespace has been used 24 | * Format the drive with crypto-erase option 25 | * Repeat the steps 3-5 for the given cycle_count 26 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/namespace_utilization_test/namespace_utilization_test.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | # Copyright (c) 2019-present, Facebook, Inc. 3 | # All rights reserved. 4 | # 5 | # Description : This test validates the namespace utilization size 6 | # by running fio job and check the size using the 7 | # 'nvme id-ns /dev/nvmex' command. 8 | 9 | # pyre-unsafe 10 | import time 11 | 12 | from autoval.lib.host.component.component import COMPONENT 13 | from autoval.lib.utils.autoval_errors import ErrorType 14 | from autoval.lib.utils.autoval_log import AutovalLog 15 | 16 | from autoval_ssd.lib.utils.fio_runner import FioRunner 17 | from autoval_ssd.lib.utils.storage.nvme.nvme_utils import NVMeUtils 18 | from autoval_ssd.lib.utils.storage.storage_test_base import StorageTestBase 19 | 20 | 21 | class NamespaceUtilizationTest(StorageTestBase): 22 | """ 23 | This script is used to ensure that namespace utilization size 24 | by running the fio job and check the size using the 25 | 'nvme id-ns /dev/nvmex' command. 26 | """ 27 | 28 | def __init__(self, *args, **kwargs) -> None: 29 | super().__init__(*args, **kwargs) 30 | self.cycle_count = self.test_control.get("cycle", 3) 31 | self.expected_nuse_size = self.test_control.get("expected_nuse_size", 2621440) 32 | self.nvme_format_timeout = self.test_control.get("nvme_format_timeout", 1200) 33 | self.nvme_format_block_size = self.test_control.get( 34 | "nvme_format_block_size", None 35 | ) 36 | 37 | def execute(self) -> None: 38 | """ 39 | Test Flow: 40 | 1. Filter the drives with crypto erase supported options 41 | 2. Filter the drives with nuse supported drives 42 | 3. Format the drive with secure erase option 43 | 4. Read nuse from id-ns and check that it == 0 44 | 5. Sequentially Write 10GB of data to the drive 45 | 6. Read nuse from id-ns and check that it equals 2621440(0x280000) 46 | - indicating 10GB of namespace has been used 47 | 7. Format the drive with crypto-erase option 48 | 8. Repeat the steps 3-5 for the given cycle_count 49 | """ 50 | nuse_test_drives = self.get_nuse_test_drives() 51 | if nuse_test_drives: 52 | self.test_control["drives"] = nuse_test_drives 53 | self.fio = FioRunner(self.host, self.test_control) 54 | self.validate_no_exception( 55 | self.fio.test_setup, 56 | [], 57 | "Fio setup()", 58 | component=COMPONENT.STORAGE_DRIVE, 59 | error_type=ErrorType.TOOL_ERR, 60 | ) 61 | AutovalLog.log_info(f"Drives for namespace utilization test {nuse_test_drives}") 62 | for i in range(self.cycle_count): 63 | AutovalLog.log_info("Cycle Count: %d" % (i + 1)) 64 | for drive in nuse_test_drives: 65 | self.validate_no_exception( 66 | NVMeUtils.format_nvme, 67 | [self.host, drive, 2, self.nvme_format_block_size], 68 | f"{drive}: NVME formatting using Cryptographic erase option 2", 69 | component=COMPONENT.STORAGE_DRIVE, 70 | error_type=ErrorType.NVME_ERR, 71 | ) 72 | timeout = time.time() + self.nvme_format_timeout 73 | nuse_all_zero = 0 74 | while time.time() < timeout: 75 | # reset nuse_all_zero to 0 for every checking cycle 76 | nuse_all_zero = 0 77 | for drive in nuse_test_drives: 78 | # check if any drive's nuse equal 0 79 | nuse_all_zero = nuse_all_zero or drive.get_size("nuse") 80 | # if nuse_all_zero quit while loop, no need to wait for timeout 81 | if nuse_all_zero == 0: 82 | break 83 | time.sleep(30) 84 | self.validate_equal( 85 | nuse_all_zero, 86 | 0, 87 | "nuse size of all drive after drive erase operation", 88 | component=COMPONENT.STORAGE_DRIVE, 89 | error_type=ErrorType.DRIVE_ERR, 90 | ) 91 | self.validate_no_exception( 92 | self.fio.start_test, 93 | [], 94 | "Fio start_test()", 95 | component=COMPONENT.STORAGE_DRIVE, 96 | error_type=ErrorType.TOOL_ERR, 97 | ) 98 | for drive in nuse_test_drives: 99 | nuse_size = drive.get_size("nuse") 100 | self.validate_equal( 101 | nuse_size, 102 | self.expected_nuse_size, 103 | f"nuse size of {drive} after 10GB write operation", 104 | component=COMPONENT.STORAGE_DRIVE, 105 | error_type=ErrorType.DRIVE_ERR, 106 | ) 107 | time.sleep(20) 108 | 109 | def get_nuse_test_drives(self): 110 | """ 111 | Get nuse Test Drives. 112 | 113 | This method is used to filter the drives with crypto erase and 114 | nuse supported drives. 115 | """ 116 | nuse_test_drives = [] 117 | for drive in self.test_drives: 118 | out = drive.get_crypto_erase_support_status() 119 | if out: 120 | nsze = drive.get_size("nsze") 121 | try: 122 | NVMeUtils.format_nvme(self.host, drive, 1) 123 | AutovalLog.log_info( 124 | f"{drive}: NVME Formatting using User Data Erase option 1" 125 | ) 126 | except Exception: 127 | self.validate_condition( 128 | False, 129 | f"{drive}: NVME formatting with User Data Erase option 1 not supported", 130 | raise_on_fail=False, 131 | component=COMPONENT.STORAGE_DRIVE, 132 | error_type=ErrorType.NVME_ERR, 133 | ) 134 | nuse = drive.get_size("nuse") 135 | AutovalLog.log_info(f"{drive}: Nuse size after User Data Erase: {nuse}") 136 | if nsze != nuse: 137 | nuse_test_drives.append(drive) 138 | else: 139 | AutovalLog.log_info( 140 | f"Skipping {drive} for nuse test, since nuse and nsze" 141 | f"values are same even after User Data Erase operation" 142 | ) 143 | self.validate_non_empty_list( 144 | nuse_test_drives, 145 | "Validating crypto erase supported drives", 146 | component=COMPONENT.STORAGE_DRIVE, 147 | error_type=ErrorType.NVME_ERR, 148 | ) 149 | return nuse_test_drives 150 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_cli/control.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "include_boot_drive": true, 5 | "boot_drive_physical_location": "0000:01:00.0" 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_cli/control_disable_boot_drive.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "include_boot_drive": false 5 | } 6 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_cli/control_no_crypto_erase.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "check_crypto_erase": false, 5 | "include_boot_drive": true 6 | } 7 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_cli/nvme_cli.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Nvme Cli 3 | ========================= 4 | * **Test Module** - nvme_cli.nvme_cli 5 | * **Test Control file** 6 | - *all drives - /autoval_ssd/tests/nvme_cli/control.json* 7 | - *only data drives - /autoval_ssd/tests/nvme_cli/control_disable_boot_drive.json* 8 | - *disable crypto_erase check - /autoval_ssd/tests/nvme_cli/control_no_crypto_erase.json* 9 | - *nvme version - /autoval_ssd/tests/nvme_cli/nvme_cli_with_nvme_version.json* 10 | 11 | ---------------- 12 | Test Description 13 | ---------------- 14 | ** Test to validate if NVMe spec commands are supported by the NVMe Drives. 15 | Get the controller properties, 16 | Get the Firmware Log, 17 | Check Crypto Erase Support, 18 | Get Error Log Entries, 19 | Log the properties of the specified namespace, 20 | Get the operating parameters of the specified controller, 21 | identified by the Feature Identifier, 22 | Get Vendor Specific Internal Logs, 23 | Retrieve Command Effects Log. 24 | Get Vendor Specific drive up time, 25 | Get Smart log, 26 | Get/Set Power mode. 27 | validate capacity 28 | ** 29 | 30 | Common Objective of Nvme_cli test 31 | --------------------------------------------- 32 | Verify that a DUT with SSD NVMe can support the NVMe Spec commands. 33 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_cli/nvme_cli_with_nvme_version.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "include_boot_drive": true, 5 | "boot_drive_physical_location": "0000:01:00.0", 6 | "nvme_version" : "nvme-cli-2.8-1.hs.el9" 7 | } 8 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/control_nvme.json: -------------------------------------------------------------------------------- 1 | { 2 | "fstype": "ext4", 3 | "drive_type": "ssd", 4 | "drive_interface": "nvme", 5 | "skip_iops_validation": true, 6 | "format_cycles": 1, 7 | "cycle_count": 1, 8 | "ignore_smart": true, 9 | "secure_erase_option": [0, 1, 2], 10 | "run_definition": { 11 | "filesystem_io": { 12 | "template": "nvme_format_template.fio", 13 | "args": { 14 | "NAME": "fio_nvme", 15 | "RW": "write", 16 | "BLKSIZE": "4K", 17 | "RUNTIME": "3m", 18 | "DEPTH": 128, 19 | "DO_VERIFY": 1, 20 | "BUFFER_PATTERN": "0xA5" 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/control_nvme_crypto_erase.json: -------------------------------------------------------------------------------- 1 | { 2 | "fstype": "ext4", 3 | "format_cycles": 10, 4 | "cycle_count": 1, 5 | "drive_type": "ssd", 6 | "drive_interface": "nvme", 7 | "skip_iops_validation": true, 8 | "secure_erase_option": [2], 9 | "run_definition": { 10 | "filesystem_io": { 11 | "template": "nvme_format_template.fio", 12 | "args": { 13 | "NAME": "fio_nvme", 14 | "RW": "write", 15 | "BLKSIZE": "4K", 16 | "RUNTIME": "3m", 17 | "DEPTH": 128, 18 | "DO_VERIFY": 1, 19 | "BUFFER_PATTERN": "0xA5" 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/control_nvme_no_secure_erase.json: -------------------------------------------------------------------------------- 1 | { 2 | "fstype": "ext4", 3 | "format_cycles": 10, 4 | "cycle_count": 1, 5 | "stop_on_error": true, 6 | "drive_type": "ssd", 7 | "drive_interface": "nvme", 8 | "skip_iops_validation": true, 9 | "secure_erase_option": [0], 10 | "run_definition": { 11 | "filesystem_io": { 12 | "template": "nvme_format_template.fio", 13 | "args": { 14 | "NAME": "fio_nvme", 15 | "RW": "write", 16 | "BLKSIZE": "4K", 17 | "RUNTIME": "3m", 18 | "DEPTH": 128, 19 | "DO_VERIFY": 1, 20 | "BUFFER_PATTERN": "0xA5" 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/control_nvme_sanity_check.json: -------------------------------------------------------------------------------- 1 | { 2 | "fstype": "ext4", 3 | "drive_type": "ssd", 4 | "drive_interface": "nvme", 5 | "skip_iops_validation": true, 6 | "format_cycles": 1, 7 | "cycle_count": 1, 8 | "secure_erase_option": [0, 1], 9 | "run_definition": { 10 | "filesystem_io": { 11 | "template": "filesystem_template.job", 12 | "args": { 13 | "NAME": "fio_nvme", 14 | "RW": "write", 15 | "BLKSIZE": "4K", 16 | "RUNTIME": "3m", 17 | "DEPTH": 128, 18 | "DO_VERIFY": 1, 19 | "BUFFER_PATTERN": "0xA5" 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/control_nvme_user_data_erase.json: -------------------------------------------------------------------------------- 1 | { 2 | "fstype": "ext4", 3 | "format_cycles": 10, 4 | "cycle_count": 1, 5 | "drive_type": "ssd", 6 | "drive_interface": "nvme", 7 | "skip_iops_validation": true, 8 | "secure_erase_option": [1], 9 | "run_definition": { 10 | "filesystem_io": { 11 | "template": "nvme_format_template.fio", 12 | "args": { 13 | "NAME": "fio_nvme", 14 | "RW": "write", 15 | "BLKSIZE": "4K", 16 | "RUNTIME": "3m", 17 | "DEPTH": 128, 18 | "DO_VERIFY": 1, 19 | "BUFFER_PATTERN": "0xA5" 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/nvme_format.rst: -------------------------------------------------------------------------------- 1 | =============== 2 | Nvme Format Test 3 | =============== 4 | This test runs Fio on SSD drives, performs NVME formatting on mounted drives, and verifies the 0x00 pattern on drives by running FIO verify. 5 | 6 | * **Test Module** - nvme_format.nvme_format 7 | 8 | * **Test Control file** 9 | ------------- 10 | * `control_nvme_crypto_erase`: Control file for NVMe crypto erase test. 11 | * `no_secure_erase`: Control file for formatting without secure erase. 12 | * `sanity_check`: Control file for sanity checking the test environment. 13 | * `user_data_erase`: Control file for erasing user data from the drives. 14 | * `control_nvme`: Control file for controlling NVMe operations. 15 | 16 | --------------- 17 | Test Description: 18 | --------------- 19 | 20 | The NVMe format test case is designed to evaluate the drive's capability of being securely erased and formatted. 21 | This is achieved by utilizing three distinct options for erasing data on the drive: 22 | 23 | No secure erase option - This choice will only format the drive without implementing 24 | any additional security measures. 25 | User Data Erase - This choice is used for formatting the drive with option 2 for user data erase, 26 | without implementing additional security measures. 27 | Cryptographic Erase - This choice removes all user data from the drive and also implements 28 | additional security measures to guarantee that the data is completely unrecoverable. 29 | 30 | By testing these various options, the test case can help confirm that the drive is able to be securely formatted and meets the required security standards for use in a business setting. 31 | --------------------------------------------------------- 32 | Test execution and steps involved 33 | --------------------------------------------------------- 34 | 35 | * In the storage_test_setup method, the host dictionary and mount point are set up. 36 | Then, the execute method is called, which performs the following steps: 37 | * If test drives are available, they are used; otherwise, Fio installation is checked and performed if necessary. 38 | * For each format cycle, secure erase options are iterated upon. 39 | If the secure erase option is 2 (crypto erase), the script checks whether all drives support crypto erase; if not, this option is skipped for the current cycle. 40 | * Previous Fio sessions are cleaned up. 41 | * Fio starts the test. 42 | * Drives are formatted in parallel using the format_nvme function, which takes care of 43 | formatting namespaces for each drive. 44 | * After formatting, the verify_pattern function is called to check if the pattern on the drives is 0x00. 45 | * Additional functions included in the script are: 46 | * check_crypto_erase_support: Checks whether all test drives support crypto erase. 47 | * format_nvme: Formats an NVMe drive based on the provided secure erase option, file system type, and block name. It also verifies the pattern on the drive after formatting. 48 | * _format_nvme: A private function to format the NVMe drive with the given secure erase option, file system type, and device. It also verifies the pattern on the drive after formatting. 49 | * _mount_drive: Mounts the drive with the specified file system type. 50 | * _verify_pattern: Verifies the pattern on the drive. 51 | * get_test_params: Returns a string containing the test parameters for this run. 52 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_format/nvme_format_1_cycle.json: -------------------------------------------------------------------------------- 1 | { 2 | "fstype": "ext4", 3 | "drive_type": "ssd", 4 | "drive_interface": "nvme", 5 | "skip_iops_validation": true, 6 | "format_cycles": 1, 7 | "cycle_count": 1, 8 | "secure_erase_option": [0, 1, 2], 9 | "run_definition": { 10 | "filesystem_io": { 11 | "template": "nvme_format_template.fio", 12 | "args": { 13 | "NAME": "fio_nvme", 14 | "RW": "write", 15 | "BLKSIZE": "4K", 16 | "RUNTIME": "3m", 17 | "DEPTH": 128, 18 | "DO_VERIFY": 1, 19 | "BUFFER_PATTERN": "0xA5" 20 | } 21 | } 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_ns_resize/nvme_ns_resize.rst: -------------------------------------------------------------------------------- 1 | ========================= 2 | Nvme_ns_resize 3 | ========================= 4 | * **Test Module** - nvme_ns_resize.nvme_ns_resize 5 | * **Test Control file** - op_pct_sweep_control 6 | 7 | ---------------- 8 | 9 | Test Description 10 | ---------------- 11 | **The purpose of the test is to create namespace using NVMe create-ns command with a variety of sizes and 12 | run FIO to ensure IOs can be issued to the new namespace. 13 | 14 | --------------------------------------------------------- 15 | Test execution and steps involved 16 | --------------------------------------------------------- 17 | * Verify the DUT is accessible. 18 | * Collect only the nvme SSD drives on the DUT based on drive type and interface. 19 | * Get the drives which support namespace management. 20 | * For each of drive, different userspace will be created. 21 | * Once the namespace is created, fio is run to ensure that namespace is properly created. 22 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_ns_resize/op_pct_sweep_control.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "sweep_param_key": "overprovisioning", 5 | "sweep_param_unit": "percent", 6 | "sweep_param_values": [10, 20, 28, 50], 7 | "cycle_count": 1, 8 | "skip_iops_validation": true, 9 | "run_definition": { 10 | "filesystem_io": { 11 | "template": "filesystem_template.job", 12 | "args": { 13 | "NAME": "fio_nvme", 14 | "RW": "write", 15 | "BLKSIZE": "4K", 16 | "SIZE": "10G", 17 | "RUNTIME": 60, 18 | "DEPTH": 128, 19 | "MIXWRITE": 50, 20 | "MIXREAD": 50 21 | } 22 | } 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/nvme_ns_resize/usercap_TB_sweep_500G_control.json: -------------------------------------------------------------------------------- 1 | { 2 | "drive_type": "ssd", 3 | "drive_interface": "nvme", 4 | "sweep_param_key": "usercapacity", 5 | "sweep_param_unit": "num_TB", 6 | "sweep_param_values": [0.5], 7 | "nvme_id_ctrl_filter": "nvme_id_ctrl[\"tnvmcap\"] >= 536870912000", 8 | "cycle_count": 1, 9 | "skip_iops_validation": true, 10 | "run_definition": { 11 | "filesystem_io": { 12 | "template": "filesystem_template.job", 13 | "args": { 14 | "NAME": "fio_nvme", 15 | "RW": "write", 16 | "BLKSIZE": "4K", 17 | "SIZE": "10G", 18 | "RUNTIME": 60, 19 | "DEPTH": 128, 20 | "MIXWRITE": 50, 21 | "MIXREAD": 50 22 | } 23 | } 24 | } 25 | } 26 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/sed_check/sed_check.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # pyre-unsafe 4 | """ 5 | Test validates if the Self Encrypting Drive supports OPAL 2.0 spec 6 | """ 7 | from autoval.lib.host.component.component import COMPONENT 8 | from autoval.lib.utils.autoval_errors import ErrorType 9 | from autoval_ssd.lib.utils.sed_util import SedUtils 10 | from autoval_ssd.lib.utils.storage.drive import DriveType 11 | from autoval_ssd.lib.utils.storage.nvme.nvme_drive import OwnershipStatus 12 | from autoval_ssd.lib.utils.storage.storage_test_base import StorageTestBase 13 | 14 | 15 | class SedCheck(StorageTestBase): 16 | """ 17 | This script is used to validate if the Self Encrypting drive is based 18 | on OPAL 2.0 specification. 19 | """ 20 | 21 | def __init__(self, *args, **kwargs) -> None: 22 | super().__init__(*args, **kwargs) 23 | self.opal_drive_objs = [] 24 | self.test_control["drive_type"] = DriveType.SSD.value 25 | self.test_control["include_boot_drive"] = True 26 | self.include_boot_drive = True 27 | 28 | def setup(self, *args, **kwargs) -> None: 29 | self.storage_test_tools.extend(["sedutil"]) 30 | self.test_control["drive_type"] = DriveType.SSD.value 31 | self.test_control["include_boot_drive"] = True 32 | self.include_boot_drive = True 33 | super().setup(*args, **kwargs) 34 | 35 | def execute(self) -> None: 36 | """ 37 | Test Flow: 38 | 1. Check information - issue "sedutil-cli --scan" 39 | command to check drive information. 40 | 2. Check SED support. 41 | 3. if SED is not supported, pass test. 42 | 4. check if ownership is taken for all the SED supported 43 | if not fail the test if validate_take_ownership is true. 44 | """ 45 | opal_list, non_opal_list = SedUtils.opal_support_scan(self.host) 46 | self.validate_non_empty_list( 47 | opal_list, 48 | "Validate if SED drives are present", 49 | warning=True, 50 | component=COMPONENT.STORAGE_DRIVE, 51 | error_type=ErrorType.DRIVE_ERR, 52 | ) 53 | if opal_list: 54 | self.log_info(f"SED Supported Drive list is {opal_list}") 55 | self.opal_drive_objs = [ 56 | drive 57 | for drive in self.test_drives 58 | if drive.get_drive_name() in opal_list 59 | ] 60 | self.log_info( 61 | "SED Supported Drive list is " 62 | f"{[f'Serial no:{drive.serial_number} block name:{drive.block_name})' for drive in self.opal_drive_objs]}" 63 | ) 64 | if self.test_control.get("valdiate_drive_ownership", True): 65 | self.log_info( 66 | "Validating, if ownership is taken for the opal supported drives." 67 | ) 68 | for drive in self.opal_drive_objs: 69 | self.validate_in( 70 | str(drive.get_tcg_ownership_status()), 71 | [ 72 | str(OwnershipStatus.SET), 73 | str(OwnershipStatus.BLOCKED_AND_SET), 74 | ], 75 | "validating the drive ownership status" 76 | f" {drive.block_name} {drive.serial_number}", 77 | component=COMPONENT.STORAGE_DRIVE, 78 | error_type=ErrorType.DRIVE_ERR, 79 | ) 80 | if non_opal_list: 81 | self.log_info(f"SED Un-Supported Drive list is {non_opal_list}") 82 | -------------------------------------------------------------------------------- /src/autoval_ssd/tests/sed_check/sed_take_ownership.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | # pyre-unsafe 4 | """ 5 | Test validates if the Self Encrypting Drive supports OPAL 2.0 spec and 6 | ownership of the drive can be correctly set and verified. 7 | """ 8 | from autoval.lib.host.component.component import COMPONENT 9 | from autoval.lib.utils.autoval_errors import ErrorType 10 | from autoval_ssd.lib.utils.sed_util import SedUtils 11 | from autoval_ssd.lib.utils.storage.nvme.nvme_drive import OwnershipStatus 12 | from autoval_ssd.tests.sed_check.sed_check import SedCheck 13 | 14 | 15 | class SedTakeOwnership(SedCheck): 16 | """ 17 | The intent of the sed_take_ownership test is to validate if the SED is based 18 | on the OPAL 2.0 specification and to ensure that the ownership of the drive 19 | can be correctly set and verified. This test is crucial for maintaining 20 | the security and proper management of the drives in use. 21 | """ 22 | 23 | def __init__(self, *args, **kwargs) -> None: 24 | super().__init__(*args, **kwargs) 25 | self.test_control["valdiate_drive_ownership"] = False 26 | self.ownership_taken_drives = [] 27 | 28 | def execute(self) -> None: 29 | """ 30 | Test Flow: 31 | 1. Check information - issue "sedutil-cli --scan" 32 | command to check drive information. 33 | 2. Check SED support. 34 | 3. if SED is not supported, pass test. 35 | 4. Check Locked = N with sedutil-cli --query $drive. 36 | 5. Check ownership state 37 | 6. If ownership is taken, attempt to revert with a tper revert 38 | """ 39 | super().execute() 40 | if not self.opal_drive_objs: 41 | self.validate_condition( 42 | False, 43 | "Validate if SED drives are present", 44 | warning=True, 45 | component=COMPONENT.STORAGE_DRIVE, 46 | error_type=ErrorType.DRIVE_ERR, 47 | ) 48 | return 49 | self.log_info("Validating the take ownership for opal supported drives.") 50 | for drive in self.opal_drive_objs: 51 | msid = SedUtils.get_msid(self.host, drive.block_name) 52 | lock_status = SedUtils.check_locked_status(self.host, drive.block_name) 53 | ownership_status = drive.get_tcg_ownership_status() 54 | self.log_info(f"+++Performing Take ownership check on {drive.block_name}") 55 | self.log_info( 56 | f"Locked Status for drive {drive.block_name} is {lock_status}" 57 | ) 58 | self.log_info( 59 | f"TCG Ownership for drive {drive.block_name} is {ownership_status}" 60 | ) 61 | if not lock_status: 62 | if ownership_status == OwnershipStatus.NOT_SET: 63 | SedUtils.take_ownership(self.host, drive, password=msid) 64 | self.ownership_taken_drives.append(drive) 65 | elif ownership_status == OwnershipStatus.SET: 66 | if ( 67 | drive.tooling_owned_models 68 | and drive.model in drive.tooling_owned_models 69 | ): 70 | self.log_info( 71 | "+++ Model owned by tooling. Cannot revert ownership" 72 | ) 73 | return 74 | self.log_info( 75 | "+++Reverting the ownership and proceeding" 76 | " with the take_ownership" 77 | ) 78 | SedUtils.revert_take_ownership( 79 | self.host, drive, password="facebook" 80 | ) 81 | if ownership_status == OwnershipStatus.NOT_SET: 82 | SedUtils.take_ownership(self.host, drive, password=msid) 83 | self.ownership_taken_drives.append(drive) 84 | else: 85 | self.validate_condition( 86 | False, 87 | "Validate if Ownership can be taken", 88 | warning=True, 89 | component=COMPONENT.STORAGE_DRIVE, 90 | error_type=ErrorType.DRIVE_ERR, 91 | ) 92 | self.log_info( 93 | "Ownership can not be taken as the drive " 94 | f"{drive.serial_number} is locked." 95 | ) 96 | 97 | def cleanup(self, *args, **kwargs) -> None: 98 | for drive in self.ownership_taken_drives: 99 | SedUtils.revert_take_ownership(self.host, drive, password="facebook") 100 | super().cleanup(*args, **kwargs) 101 | -------------------------------------------------------------------------------- /src/autoval_ssd/tools/fsync.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include 6 | #include 7 | #include 8 | #include 9 | #include 10 | #include 11 | 12 | typedef long long longlong; 13 | 14 | double gettimeofday_sec() { 15 | struct timeval tv; 16 | gettimeofday(&tv, NULL); 17 | return tv.tv_sec + (double)tv.tv_usec * 1e-6; 18 | } 19 | 20 | void now(struct timeval* tv) { 21 | assert(!gettimeofday(tv, NULL)); 22 | } 23 | 24 | long now_minus_then_usecs( 25 | struct timeval const* now, 26 | struct timeval const* then) { 27 | longlong now_usecs = (now->tv_sec * 1000000) + now->tv_usec; 28 | longlong then_usecs = (then->tv_sec * 1000000) + then->tv_usec; 29 | 30 | if (now_usecs >= then_usecs) 31 | return (long)(now_usecs - then_usecs); 32 | else 33 | return -1; 34 | } 35 | 36 | int longcmp(const void* aa, const void* bb) { 37 | const long *a = (const long*)aa, *b = (const long*)bb; 38 | return (*a < *b) ? -1 : (*a > *b); 39 | } 40 | 41 | int main(int argc, char** argv) { 42 | double t1, t2, t3, t4; 43 | unsigned int i = 0; 44 | int fd; 45 | 46 | if (argc < 3) { 47 | printf("Missing arguments. \n"); 48 | return 1; 49 | } 50 | 51 | char* filepath = argv[1]; 52 | long total_writes = atoi(argv[2]); 53 | int block_size = atoi(argv[3]); 54 | 55 | printf("Fsync %d bytes x %d times.\n", block_size, total_writes); 56 | time_t timer; 57 | struct tm* t_st; 58 | time(&timer); 59 | printf("Current Time: %s", ctime(&timer)); 60 | 61 | char* str = malloc(block_size * sizeof(str)); 62 | memset(str, 1, (size_t)block_size); 63 | 64 | fd = open64(filepath, O_WRONLY); 65 | if (fd == -1) { 66 | printf("error\n"); 67 | return 1; 68 | } 69 | 70 | struct timeval start, stop; 71 | long latency; 72 | 73 | t1 = gettimeofday_sec(); 74 | 75 | long fsync_stats[total_writes]; 76 | for (i = 0; i < total_writes; i++) { 77 | now(&start); 78 | write(fd, str, block_size); 79 | fsync(fd); 80 | now(&stop); 81 | latency = now_minus_then_usecs(&stop, &start); 82 | fsync_stats[i] = latency; 83 | /*printf("%ld ", latency); 84 | */ 85 | } 86 | close(fd); 87 | printf("\n"); 88 | t2 = gettimeofday_sec(); 89 | 90 | int total_time = t2 - t1; 91 | printf( 92 | "block_size: %d, %d fsync/sec\n", block_size, total_writes / total_time); 93 | qsort(fsync_stats, total_writes, sizeof(long), longcmp); 94 | long sum = 0; 95 | for (i = 0; i < total_writes; i++) { 96 | sum += fsync_stats[i]; 97 | } 98 | long avg_lat = (long)(sum / total_writes); 99 | long p95_lat = fsync_stats[(int)(total_writes * .95)]; 100 | long p99_lat = fsync_stats[(int)(total_writes * .99)]; 101 | long max_lat = fsync_stats[total_writes - 1]; 102 | printf( 103 | "Latency\nAvg: %ld, P95: %ld, P99: %ld, Max: %ld\n", 104 | avg_lat, 105 | p95_lat, 106 | p99_lat, 107 | max_lat); 108 | 109 | free(str); 110 | 111 | return 0; 112 | } 113 | -------------------------------------------------------------------------------- /src/autoval_ssd/tools/ioT6.go: -------------------------------------------------------------------------------- 1 | package main 2 | 3 | import ( 4 | "flag" 5 | "fmt" 6 | "log" 7 | "math/rand" 8 | "os" 9 | "os/signal" 10 | "runtime" 11 | "sync/atomic" 12 | "syscall" 13 | "time" 14 | ) 15 | 16 | var ioc chan int 17 | var stats chan time.Duration 18 | var file *os.File 19 | var size = flag.Int64("size", 1, "arena size in gigabytes") 20 | var hz = flag.Float64("rate", 384, "rate in Hz") 21 | var block = flag.Int64("block", 64, "block size in kilobytes") 22 | var total int64 23 | var totalDuration time.Duration 24 | var maxDuration time.Duration 25 | var maxPending int32 26 | var pending int32 27 | 28 | func worker() { 29 | buf := make([]byte, *block*1024) 30 | boundary := *size * 1024 * 1024 * 1024 31 | for range ioc { 32 | pos := rand.Int63n(boundary/4096) * 4096 33 | p := atomic.AddInt32(&pending, 1) 34 | if p > maxPending { 35 | maxPending = p 36 | } 37 | start := time.Now() 38 | _, err := file.WriteAt(buf, pos) 39 | if err != nil { 40 | log.Println(err) 41 | } 42 | elapsed := time.Since(start) 43 | atomic.AddInt32(&pending, -1) 44 | stats <- elapsed 45 | } 46 | } 47 | 48 | func generator() { 49 | duration := time.Duration(0) 50 | if *hz != 0 { 51 | duration = time.Second / time.Duration(*hz) 52 | } 53 | overhead := time.Duration(0) 54 | 55 | start := time.Now() 56 | previous := start 57 | 58 | for { 59 | ioc <- 1 60 | 61 | elapsed := start.Sub(previous) 62 | 63 | overhead += (elapsed - duration) 64 | 65 | if overhead > duration { 66 | overhead -= duration 67 | } else { 68 | time.Sleep(duration) 69 | } 70 | 71 | previous = start 72 | start = time.Now() 73 | } 74 | } 75 | 76 | func aggregate() { 77 | for d := range stats { 78 | total++ 79 | totalDuration += d 80 | if maxDuration < d { 81 | maxDuration = d 82 | } 83 | } 84 | } 85 | 86 | func statistics() { 87 | last := int64(0) 88 | lastDuration := time.Duration(0) 89 | 90 | for { 91 | time.Sleep(time.Second) 92 | sampleDuration := totalDuration - lastDuration 93 | if maxDuration > (time.Duration(10)*time.Millisecond) || maxPending > 10 { 94 | fmt.Println(time.Now(), " ", maxDuration, sampleDuration/time.Duration(total-last), maxPending) 95 | } 96 | last = total 97 | lastDuration = totalDuration 98 | maxDuration = time.Duration(0) 99 | maxPending = 0 100 | } 101 | } 102 | 103 | func init() { 104 | flag.Parse() 105 | runtime.GOMAXPROCS(32) 106 | } 107 | 108 | func main() { 109 | var err error 110 | ioc = make(chan int, 10000) 111 | stats = make(chan time.Duration, 100) 112 | flag.Parse() 113 | file, err = os.OpenFile(flag.Arg(0), syscall.O_DIRECT|os.O_RDWR, 0) 114 | if err != nil { 115 | panic(err) 116 | } 117 | for i := 0; i < 1000; i++ { 118 | go worker() 119 | } 120 | go aggregate() 121 | go generator() 122 | go statistics() 123 | c := make(chan os.Signal, 1) 124 | signal.Notify(c, os.Interrupt, syscall.SIGTERM) 125 | <-c 126 | // panic("interrupted") 127 | } 128 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/lib/mock_autoval.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | 3 | import csv 4 | import logging 5 | import unittest.mock as mock 6 | 7 | from autoval.lib.transport.ssh import SSHConn 8 | from autoval.lib.utils.autoval_exceptions import CmdError 9 | from autoval.lib.utils.autoval_log import AutovalLog 10 | from autoval.lib.utils.autoval_utils import AutovalUtils, CmdResult 11 | from autoval.lib.utils.file_actions import FileActions 12 | from autoval_ssd.unittest.mock.lib.mock_host import MockHost 13 | 14 | MOCK_INPUT_PATH = "autoval_ssd/unittest/mock/util_outputs/" 15 | MOCK_COMMAND_MAP_PATH = "autoval_ssd/unittest/mock/testbed/cmd_map" 16 | 17 | 18 | class MockAutovalUtils(MockHost): 19 | def __init__(self, cmd_map=None): 20 | self.autovalLog = AutovalLog._init_logs() 21 | self.logger = logging.getLogger("cmdlog") 22 | self.cmd_output_dict = None 23 | self.get_result_obj_rc = 0 24 | if cmd_map: 25 | self.cmd_map = cmd_map 26 | else: 27 | self.cmd_map = self.generate_cmp_map() 28 | super(MockAutovalUtils, self).__init__(self.cmd_map) 29 | 30 | @staticmethod 31 | def generate_cmp_map(): 32 | """This function will convert the cmd_map file into the list of 33 | dict with in format [{"cmd"="cmd","file"="file_path"},..]""" 34 | try: 35 | file_path = FileActions.get_resource_file_path(MOCK_COMMAND_MAP_PATH[14:]) 36 | with open(file_path, "r") as file_context: 37 | cmd_map_reader = csv.reader( 38 | file_context, delimiter=":", quoting=csv.QUOTE_ALL 39 | ) 40 | """in case cmd has the delimiter part of it, csv reader 41 | will consider the last element as "file" and will join 42 | the rest of elements to command""" 43 | cmd_map = [ 44 | { 45 | "cmd": ":".join(each_cmd_map[0:-1]).strip(), 46 | "file": each_cmd_map[-1].strip(), 47 | } 48 | for each_cmd_map in cmd_map_reader 49 | ] 50 | return cmd_map 51 | except Exception: 52 | raise Exception( 53 | f"Failed to generate the cmd_map from file {MOCK_COMMAND_MAP_PATH}" 54 | ) 55 | 56 | def run(self, *params, **kparams): 57 | """Function is side effect of mocking run method and 58 | will be give a mock output based on the cmd_map 59 | *params will contain values of cmd from run method 60 | **kparams will contain the key argument values of get_result_obj, 61 | ignore_status,custom_logfile cmd_output_dict is used 62 | in case cmd_map is not to be referred which would be and optimised way 63 | in case we have single line output instead of creating file 64 | get_result_obj_rc return code of command run by default set to 0 65 | """ 66 | data = None 67 | cmd = params[0] 68 | get_result_obj = kparams.get("get_result_obj") 69 | ignore_status = kparams.get("ignore_status") 70 | if self.cmd_output_dict and cmd in self.cmd_output_dict: 71 | data = self.cmd_output_dict[cmd] 72 | if isinstance(data, Exception): 73 | raise data 74 | else: 75 | if get_result_obj: 76 | data = self.run_get_result(cmd, ignore_status) 77 | else: 78 | data = super(MockAutovalUtils, self).run(cmd, ignore_status) 79 | if get_result_obj: 80 | data = CmdResult(cmd, data, "", self.get_result_obj_rc) 81 | if self.get_result_obj_rc and not ignore_status: 82 | raise CmdError(cmd, data, "command failed") 83 | return data 84 | 85 | def get_mock_data(self, funct, *args, **kwargs): 86 | """Function will mock the methods which should run on Dut 87 | such as run""" 88 | self.cmd_output_dict = kwargs.pop("cmd_output_dict", None) 89 | self.get_result_obj_rc = kwargs.pop("get_result_obj_rc", 0) 90 | with mock.patch.object( 91 | SSHConn, "scp_file", return_value="pass" 92 | ), mock.patch.object(SSHConn, "run", side_effect=self.run), mock.patch.object( 93 | AutovalUtils, "run_get_output", side_effect=self.run 94 | ), mock.patch.object( 95 | AutovalLog, 96 | "log_info", 97 | side_effect=self.logger.info, 98 | ): 99 | return funct(*args, **kwargs) 100 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/lib/mock_connection_dispatcher.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | 3 | from autoval.lib.transport.ssh import SSHConn 4 | 5 | MOCK_HOSTS = { 6 | "hostname": "using.fake.host", 7 | "ipv6": "abcd:db00:0012:700e:face:0000:0023:0000", 8 | "oob_addr": "using-oob.fake.host", 9 | "rack_sub_position_slot": 1, 10 | "is_container": False, 11 | } 12 | 13 | 14 | class MockConnectionDispatcher: 15 | def __init__(self): 16 | self.oob_only = None 17 | self.host_connection = SSHConn(None) 18 | self.bmc_connections = [SSHConn(None)] 19 | self._bmc_connections = [SSHConn(None)] 20 | self.oob_addr = MOCK_HOSTS.get("oob_addr") 21 | self.rack_sub_position = MOCK_HOSTS.get("rack_sub_position") 22 | self.rack_sub_position_slot = MOCK_HOSTS.get("rack_sub_position_slot") 23 | self.hostname = MOCK_HOSTS.get("hostname") 24 | self.localhost = None 25 | self.host_dict = MOCK_HOSTS 26 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/lib/mock_openbmc.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | 3 | import os 4 | import unittest.mock as mock 5 | 6 | from autoval.lib.host.bmc import BMC 7 | from autoval.lib.utils.file_actions import FileActions 8 | 9 | 10 | MOCK_INPUT_PATH = "autoval_ssd/unittest/mock/util_outputs/" 11 | 12 | 13 | class MockOpenBMC: 14 | def __init__(self, host, cmd_map): 15 | self.host = host 16 | self.cmd_map = cmd_map 17 | self.bmc_host = host 18 | 19 | def run(self, cmd, ignore_status=True): 20 | data = None 21 | for c in self.cmd_map: 22 | if cmd == c["cmd"]: 23 | data = self._read_file(c["file"]) 24 | break 25 | return data 26 | 27 | def _get_openbmc(self): 28 | with mock.patch.object(BMC, "__init__", lambda a, b, c, d: None): 29 | openbmc = BMC(None, None, None) 30 | openbmc.slot_info = "slot4" 31 | openbmc.host = self.host 32 | openbmc.bmc_host = self.host 33 | openbmc.config_filter = {} 34 | return openbmc 35 | 36 | def _read_file(self, _file): 37 | _file = os.path.join(MOCK_INPUT_PATH, _file) 38 | file_path = FileActions.get_resource_file_path(_file) 39 | return FileActions.read_data(file_path) 40 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/lib/mock_test_base_init.py: -------------------------------------------------------------------------------- 1 | # (c) Meta Platforms, Inc. and affiliates. Confidential and proprietary. 2 | # pyre-unsafe 3 | from unittest.mock import patch 4 | 5 | from autoval.lib.test_args import TEST_CONTROL 6 | 7 | 8 | @patch.dict(TEST_CONTROL, {}) 9 | def mock_testbase_init(self, *_): 10 | self.test_control = TEST_CONTROL 11 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/lib/mock_threadpool_executor.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | from concurrent import futures 3 | 4 | 5 | class MockExecutor(futures.Executor): 6 | def submit(self, f, *args, **kwargs): 7 | future = futures.Future() 8 | future.set_result(f(*args, **kwargs)) 9 | return future 10 | 11 | def wait(self, futures=None): 12 | pass 13 | 14 | def shutdown(self, wait=True): 15 | pass 16 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/testbed/testbed.json: -------------------------------------------------------------------------------- 1 | 2 | { 3 | "hosts": [ 4 | { 5 | "hostname" : "using.fake.host", 6 | "ipv6": "abcd:db00:0012:700e:face:0000:0023:0000", 7 | "oob_addr" : "using-oob.fake.host", 8 | "rack_sub_position_slot" : 1 9 | } 10 | ] 11 | } 12 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/empty: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opencomputeproject/ocp-diag-autoval-ssd/4d47fc5fe5c4c236275539c791c5b5a3995e01ff/src/autoval_ssd/unittest/mock/util_outputs/empty -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/fstrim: -------------------------------------------------------------------------------- 1 | /mnt/autoval_nvme0n1: 290.1 GiB (311452086272 bytes) trimmed 2 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/id_ctrl: -------------------------------------------------------------------------------- 1 | { 2 | "vid" : 7260, 3 | "ssvid" : 7260, 4 | "sn" : "MN95N637910306E2I ", 5 | "mn" : "HFS256GD9TNG-62A0A ", 6 | "fr" : "80003E00", 7 | "rab" : 2, 8 | "ieee" : 11330606, 9 | "cmic" : 0, 10 | "mdts" : 5, 11 | "cntlid" : 1, 12 | "ver" : 66048, 13 | "rtd3r" : 500000, 14 | "rtd3e" : 2000000, 15 | "oaes" : 512, 16 | "ctratt" : 0, 17 | "rrls" : 0, 18 | "oacs" : 23, 19 | "acl" : 7, 20 | "aerl" : 3, 21 | "frmw" : 20, 22 | "lpa" : 2, 23 | "elpe" : 255, 24 | "npss" : 4, 25 | "avscc" : 1, 26 | "apsta" : 1, 27 | "wctemp" : 352, 28 | "cctemp" : 354, 29 | "mtfa" : 0, 30 | "hmpre" : 0, 31 | "hmmin" : 0, 32 | "tnvmcap" : 0, 33 | "unvmcap" : 0, 34 | "rpmbs" : 0, 35 | "edstt" : 10, 36 | "dsto" : 0, 37 | "fwug" : 0, 38 | "kas" : 0, 39 | "hctma" : 0, 40 | "mntmt" : 0, 41 | "mxtmt" : 0, 42 | "sanicap" : 0, 43 | "hmminds" : 0, 44 | "hmmaxd" : 0, 45 | "nsetidmax" : 0, 46 | "anatt" : 0, 47 | "anacap" : 0, 48 | "anagrpmax" : 0, 49 | "nanagrpid" : 0, 50 | "sqes" : 102, 51 | "cqes" : 68, 52 | "maxcmd" : 0, 53 | "nn" : 1, 54 | "oncs" : 31, 55 | "fuses" : 1, 56 | "fna" : 0, 57 | "vwc" : 1, 58 | "awun" : 7, 59 | "awupf" : 7, 60 | "nvscc" : 1, 61 | "nwpc" : 0, 62 | "acwu" : 7, 63 | "sgls" : 0, 64 | "ioccsz" : 0, 65 | "iorcsz" : 0, 66 | "icdoff" : 0, 67 | "ctrattr" : 0, 68 | "msdbd" : 0, 69 | "psds" : [ 70 | { 71 | "max_power" : 600, 72 | "flags" : 0, 73 | "entry_lat" : 5, 74 | "exit_lat" : 5, 75 | "read_tput" : 0, 76 | "read_lat" : 0, 77 | "write_tput" : 0, 78 | "write_lat" : 0, 79 | "idle_power" : 0, 80 | "idle_scale" : 0, 81 | "active_power" : 0, 82 | "active_work_scale" : 0 83 | }, 84 | { 85 | "max_power" : 380, 86 | "flags" : 0, 87 | "entry_lat" : 30, 88 | "exit_lat" : 30, 89 | "read_tput" : 1, 90 | "read_lat" : 1, 91 | "write_tput" : 1, 92 | "write_lat" : 1, 93 | "idle_power" : 0, 94 | "idle_scale" : 0, 95 | "active_power" : 0, 96 | "active_work_scale" : 0 97 | }, 98 | { 99 | "max_power" : 240, 100 | "flags" : 0, 101 | "entry_lat" : 100, 102 | "exit_lat" : 100, 103 | "read_tput" : 2, 104 | "read_lat" : 2, 105 | "write_tput" : 2, 106 | "write_lat" : 2, 107 | "idle_power" : 0, 108 | "idle_scale" : 0, 109 | "active_power" : 0, 110 | "active_work_scale" : 0 111 | }, 112 | { 113 | "max_power" : 700, 114 | "flags" : 3, 115 | "entry_lat" : 1000, 116 | "exit_lat" : 1000, 117 | "read_tput" : 3, 118 | "read_lat" : 3, 119 | "write_tput" : 3, 120 | "write_lat" : 3, 121 | "idle_power" : 0, 122 | "idle_scale" : 0, 123 | "active_power" : 0, 124 | "active_work_scale" : 0 125 | }, 126 | { 127 | "max_power" : 70, 128 | "flags" : 3, 129 | "entry_lat" : 1000, 130 | "exit_lat" : 5000, 131 | "read_tput" : 3, 132 | "read_lat" : 3, 133 | "write_tput" : 3, 134 | "write_lat" : 3, 135 | "idle_power" : 0, 136 | "idle_scale" : 0, 137 | "active_power" : 0, 138 | "active_work_scale" : 0 139 | } 140 | ] 141 | } 142 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/id_ctrl.json: -------------------------------------------------------------------------------- 1 | { 2 | "vid" : 5197, 3 | "ssvid" : 5197, 4 | "sn" : "S43ANA0M405593 ", 5 | "mn" : "Unknown MZ1LB960HAJQ-000FB ", 6 | "fr" : "EDA76F2Q", 7 | "rab" : 2, 8 | "ieee" : 9528, 9 | "cmic" : 0, 10 | "mdts" : 9, 11 | "cntlid" : 4, 12 | "ver" : 66048, 13 | "rtd3r" : 8000000, 14 | "rtd3e" : 8000000, 15 | "oaes" : 0, 16 | "ctratt" : 0, 17 | "rrls" : 0, 18 | "oacs" : 15, 19 | "acl" : 7, 20 | "aerl" : 3, 21 | "frmw" : 23, 22 | "lpa" : 15, 23 | "elpe" : 63, 24 | "npss" : 0, 25 | "avscc" : 1, 26 | "apsta" : 0, 27 | "wctemp" : 353, 28 | "cctemp" : 365, 29 | "mtfa" : 0, 30 | "hmpre" : 0, 31 | "hmmin" : 0, 32 | "tnvmcap" : 900185481216, 33 | "unvmcap" : 0, 34 | "rpmbs" : 0, 35 | "edstt" : 0, 36 | "dsto" : 0, 37 | "fwug" : 0, 38 | "kas" : 0, 39 | "hctma" : 0, 40 | "mntmt" : 0, 41 | "mxtmt" : 0, 42 | "sanicap" : 0, 43 | "hmminds" : 0, 44 | "hmmaxd" : 0, 45 | "nsetidmax" : 0, 46 | "anatt" : 0, 47 | "anacap" : 0, 48 | "anagrpmax" : 0, 49 | "nanagrpid" : 0, 50 | "sqes" : 102, 51 | "cqes" : 68, 52 | "maxcmd" : 0, 53 | "nn" : 1, 54 | "oncs" : 95, 55 | "fuses" : 0, 56 | "fna" : 4, 57 | "vwc" : 0, 58 | "awun" : 127, 59 | "awupf" : 0, 60 | "nvscc" : 1, 61 | "nwpc" : 0, 62 | "acwu" : 0, 63 | "sgls" : 0, 64 | "ioccsz" : 0, 65 | "iorcsz" : 0, 66 | "icdoff" : 0, 67 | "ctrattr" : 0, 68 | "msdbd" : 0, 69 | "psds" : [ 70 | { 71 | "max_power" : 800, 72 | "flags" : 0, 73 | "entry_lat" : 0, 74 | "exit_lat" : 0, 75 | "read_tput" : 0, 76 | "read_lat" : 0, 77 | "write_tput" : 0, 78 | "write_lat" : 0, 79 | "idle_power" : 0, 80 | "idle_scale" : 0, 81 | "active_power" : 0, 82 | "active_work_scale" : 0 83 | } 84 | ] 85 | } 86 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/id_ctrlH: -------------------------------------------------------------------------------- 1 | NVME Identify Controller: 2 | vid : 0x1344 3 | ssvid : 0x1344 4 | sn : 1921220DDF47 5 | mn : MTFDHBA256TCK-1AS15ABYY 6 | fr : P1MU002 7 | rab : 0 8 | ieee : 00a075 9 | cmic : 0 10 | [2:2] : 0 PCI 11 | [1:1] : 0 Single Controller 12 | [0:0] : 0 Single Port 13 | 14 | mdts : 7 15 | cntlid : 0 16 | ver : 10201 17 | rtd3r : 1e8480 18 | rtd3e : 1e8480 19 | oaes : 0 20 | [8:8] : 0 Namespace Attribute Changed Event Not Supported 21 | 22 | oacs : 0x17 23 | [15:4] : 0x1 Reserved 24 | [3:3] : 0 NS Management and Attachment Not Supported 25 | [2:2] : 0x1 FW Commit and Download Supported 26 | [1:1] : 0x1 Format NVM Supported 27 | [0:0] : 0x1 Sec. Send and Receive Supported 28 | 29 | acl : 3 30 | aerl : 7 31 | frmw : 0x2 32 | [4:4] : 0 Firmware Activate Without Reset Not Supported 33 | [3:1] : 0x1 Number of Firmware Slots 34 | [0:0] : 0 Firmware Slot 1 Read/Write 35 | 36 | lpa : 0x3 37 | [1:1] : 0x1 Command Effects Log Page Supported 38 | [0:0] : 0x1 SMART/Health Log Page per NS Supported 39 | 40 | elpe : 255 41 | npss : 4 42 | avscc : 0x1 43 | [0:0] : 0x1 Admin Vendor Specific Commands uses NVMe Format 44 | 45 | apsta : 0x1 46 | [0:0] : 0x1 Autonomous Power State Transitions Supported 47 | 48 | wctemp : 355 49 | cctemp : 358 50 | mtfa : 0 51 | hmpre : 0 52 | hmmin : 0 53 | tnvmcap : 0 54 | unvmcap : 0 55 | rpmbs : 0 56 | [31:24]: 0 Access Size 57 | [23:16]: 0 Total Size 58 | [5:3] : 0 Authentication Method 59 | [2:0] : 0 Number of RPMB Units 60 | 61 | sqes : 0x66 62 | [7:4] : 0x6 Max SQ Entry Size (64) 63 | [3:0] : 0x6 Min SQ Entry Size (64) 64 | 65 | cqes : 0x44 66 | [7:4] : 0x4 Max CQ Entry Size (16) 67 | [3:0] : 0x4 Min CQ Entry Size (16) 68 | 69 | nn : 1 70 | oncs : 0x17 71 | [5:5] : 0 Reservations Not Supported 72 | [4:4] : 0x1 Save and Select Supported 73 | [3:3] : 0 Write Zeroes Not Supported 74 | [2:2] : 0x1 Data Set Management Supported 75 | [1:1] : 0x1 Write Uncorrectable Supported 76 | [0:0] : 0x1 Compare Supported 77 | 78 | fuses : 0 79 | [0:0] : 0 Fused Compare and Write Not Supported 80 | 81 | fna : 0x7 82 | [2:2] : 0x1 Crypto Erase Supported as part of Secure Erase 83 | [1:1] : 0x1 Crypto Erase Applies to All Namespace(s) 84 | [0:0] : 0x1 Format Applies to All Namespace(s) 85 | 86 | vwc : 0x1 87 | [0:0] : 0x1 Volatile Write Cache Present 88 | 89 | awun : 7 90 | awupf : 0 91 | nvscc : 1 92 | [0:0] : 0x1 NVM Vendor Specific Commands uses NVMe Format 93 | 94 | acwu : 0 95 | sgls : 0 96 | [0:0] : 0 Scatter-Gather Lists Not Supported 97 | 98 | subnqn : nqn.2014-08.org.nvmexpress:13441344 1921220DDF47MTFDHBA256TCK-1AS15ABYY 99 | ps 0 : mp:8.25W operational enlat:0 exlat:0 rrt:0 rrl:0 100 | rwt:0 rwl:0 idle_power:- active_power:- 101 | ps 1 : mp:2.40W operational enlat:0 exlat:0 rrt:1 rrl:1 102 | rwt:1 rwl:1 idle_power:- active_power:- 103 | ps 2 : mp:1.90W operational enlat:0 exlat:0 rrt:2 rrl:2 104 | rwt:2 rwl:2 idle_power:- active_power:- 105 | ps 3 : mp:0.0800W non-operational enlat:10000 exlat:2500 rrt:3 rrl:3 106 | rwt:3 rwl:3 idle_power:- active_power:- 107 | ps 4 : mp:0.0050W non-operational enlat:5000 exlat:44000 rrt:4 rrl:4 108 | rwt:4 rwl:4 idle_power:- active_power:- 109 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/lsblk: -------------------------------------------------------------------------------- 1 | NAME TYPE ROTA SIZE MODEL MOUNTPOINT 2 | loop0 loop 1 100G /var/oss/autoval/agent/control 3 | sda disk 0 238.5G Vendor_1100_MTFD 4 | sda1 part 0 243M /boot/efi 5 | sda2 part 0 488M /boot 6 | sda3 part 0 1.9G [SWAP] 7 | sda4 part 0 235.9G / 8 | sdb disk 1 12.8T WUH721414AL4204 9 | sdc disk 1 12.8T WUH721414AL4204 10 | sdd disk 1 12.8T WUH721414AL4204 11 | sde disk 1 12.8T WUH721414AL4204 12 | sdf disk 1 12.8T WUH721414AL4204 13 | sdg disk 1 12.8T WUH721414AL4204 14 | sdh disk 1 12.8T WUH721414AL4204 15 | sdi disk 1 12.8T WUH721414AL4204 16 | sdj disk 1 12.8T WUH721414AL4204 17 | sdk disk 1 12.8T WUH721414AL4204 18 | sdl disk 1 12.8T WUH721414AL4204 19 | sdm disk 1 12.8T WUH721414AL4204 20 | sdn disk 1 12.8T WUH721414AL4204 21 | sdo disk 1 12.8T WUH721414AL4204 22 | sdp disk 1 12.8T WUH721414AL4204 23 | sdq disk 1 12.8T WUH721414AL4204 24 | sdr disk 1 12.8T WUH721414AL4204 25 | sds disk 1 12.8T WUH721414AL4204 26 | sdt disk 1 12.8T WUH721414AL4204 27 | sdu disk 1 12.8T WUH721414AL4204 28 | sdv disk 1 12.8T WUH721414AL4204 29 | sdw disk 1 12.8T WUH721414AL4204 30 | sdx disk 1 12.8T WUH721414AL4204 31 | sdy disk 1 12.8T WUH721414AL4204 32 | sdz disk 1 12.8T WUH721414AL4204 33 | sdaa disk 1 12.8T WUH721414AL4204 34 | sdab disk 1 12.8T WUH721414AL4204 35 | sdac disk 1 12.8T WUH721414AL4204 36 | sdad disk 1 12.8T WUH721414AL4204 37 | sdae disk 1 12.8T WUH721414AL4204 38 | sdaf disk 1 12.8T WUH721414AL4204 39 | sdag disk 1 12.8T WUH721414AL4204 40 | sdah disk 1 12.8T WUH721414AL4204 41 | sdai disk 1 12.8T WUH721414AL4204 42 | sdaj disk 1 12.8T WUH721414AL4204 43 | sdak disk 1 12.8T WUH721414AL4204 44 | nvme1n1 disk 0 838.4G Vendor MZ1LB960HAJQ-000 45 | nvme0n1 disk 0 838.4G Vendor MZ1LB960HAJQ-000 46 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/lsblk_J: -------------------------------------------------------------------------------- 1 | { 2 | "blockdevices": [ 3 | {"name": "loop0", "maj:min": "7:0", "rm": "0", "size": "100G", "ro": "0", "type": "loop", "mountpoint": "/var/oss/autoval/agent/control"}, 4 | {"name": "sda", "maj:min": "8:0", "rm": "0", "size": "1.8T", "ro": "0", "type": "disk", "mountpoint": null, 5 | "children": [ 6 | {"name": "sda1", "maj:min": "8:1", "rm": "0", "size": "243M", "ro": "0", "type": "part", "mountpoint": "/boot/efi"}, 7 | {"name": "sda2", "maj:min": "8:2", "rm": "0", "size": "488M", "ro": "0", "type": "part", "mountpoint": "/boot"}, 8 | {"name": "sda3", "maj:min": "8:3", "rm": "0", "size": "1.9G", "ro": "0", "type": "part", "mountpoint": "[SWAP]"}, 9 | {"name": "sda4", "maj:min": "8:4", "rm": "0", "size": "1.8T", "ro": "0", "type": "part", "mountpoint": "/"} 10 | ] 11 | }, 12 | {"name": "nvme0n1", "maj:min": "259:0", "rm": "0", "size": "1.7T", "ro": "0", "type": "disk", "mountpoint": null} 13 | ] 14 | } 15 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/mock/util_outputs/lspci_vvv_1: -------------------------------------------------------------------------------- 1 | 00:00.0 Host bridge: Unknown Vendor DMI3 Registers (rev 04) 2 | Subsystem: Unknown Vendor Device 0000 3 | Control: I/O- Mem- BusMaster- SpecCycle- MemWINV- VGASnoop- ParErr+ Stepping- SERR+ 4 | FastB2B- DisINTx+ 5 | Status: Cap+ 66MHz- UDF- FastB2B- ParErr- DEVSEL=fast >TAbort- SERR- 6 | None: 66 | """Unittest for expander soft reset.""" 67 | m_sleep.return_value = True 68 | ScrtnyCli.expander_soft_reset(self.mock_host) # type: ignore 69 | m_run.assert_called_once_with( 70 | "scrtnycli -i 2 reset -e", 71 | ) 72 | 73 | @patch("time.sleep") 74 | @patch.object(MockHost, "run") 75 | def test_phy_link_reset(self, m_run, m_sleep) -> None: 76 | """Unittest for drive PHY link reset of a drive.""" 77 | m_sleep.return_value = True 78 | phy_addr = 2 79 | ScrtnyCli.phy_link_reset(self.mock_host, phy_addr) # type: ignore 80 | m_run.assert_called_once_with("scrtnycli -i 2 reset -pl 2", True) 81 | 82 | @patch("time.sleep") 83 | @patch.object(MockHost, "run") 84 | def test_phy_link_reset_all(self, m_run, m_sleep) -> None: 85 | """Unittest for PHY link reset of all drive.""" 86 | m_sleep.return_value = True 87 | ScrtnyCli.phy_link_reset_all(self.mock_host) # type: ignore 88 | m_run.assert_called_once_with("scrtnycli -i 2 reset -pla", True) 89 | 90 | @patch("time.sleep") 91 | @patch.object(MockHost, "run") 92 | def test_phy_hard_reset(self, m_run, m_sleep) -> None: 93 | """Unittest for PHY hard reset of a drive.""" 94 | m_sleep.return_value = True 95 | phy_addr = 2 96 | ScrtnyCli.phy_hard_reset(self.mock_host, phy_addr) # type: ignore 97 | m_run.assert_called_once_with("scrtnycli -i 2 reset -ph 2", True) 98 | 99 | @patch("time.sleep") 100 | @patch.object(MockHost, "run") 101 | def test_phy_hard_reset_all(self, m_run, m_sleep) -> None: 102 | """Unittest for PHY hard reset of all drives.""" 103 | m_sleep.return_value = True 104 | ScrtnyCli.phy_hard_reset_all(self.mock_host) # type: ignore 105 | m_run.assert_called_once_with("scrtnycli -i 2 reset -pha", True) 106 | 107 | @patch("time.sleep") 108 | @patch.object(MockHost, "run") 109 | def test_turn_phy_on(self, m_run, m_sleep) -> None: 110 | """Unittest for turning the PHY ON.""" 111 | m_sleep.return_value = True 112 | mock_phy_value = 4 113 | ScrtnyCli.turn_phy_on(self.mock_host, mock_phy_value) # type: ignore 114 | m_run.assert_called_once_with("scrtnycli -i 2 phy -on 4", True) 115 | 116 | @patch("time.sleep") 117 | @patch.object(MockHost, "run") 118 | def test_turn_phy_off(self, m_run, m_sleep) -> None: 119 | """Unittest for turning the PHY OFF.""" 120 | m_sleep.return_value = True 121 | mock_phy_value = 4 122 | ScrtnyCli.turn_phy_off(self.mock_host, mock_phy_value) # type: ignore 123 | m_run.assert_called_once_with("scrtnycli -i 2 phy -off 4", True) 124 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/test_sdparm_utils.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | import unittest 3 | 4 | from autoval.lib.utils.autoval_exceptions import TestError 5 | 6 | from autoval_ssd.lib.utils.sdparm_utils import SdparmUtils 7 | from autoval_ssd.unittest.mock.lib.mock_host import MockHost 8 | 9 | cmd_map = [ 10 | {"cmd": "sdparm --get WCE /dev/sda", "result": "WCE 1 [cha: y, def: 1]"}, 11 | {"cmd": "ls /sys/block/sda/device/scsi_disk", "result": "0:0:0:0"}, 12 | { 13 | "cmd": "echo 'write back' > /sys/block/sda/device/scsi_disk/0:0:0:0/cache_type", 14 | "result": "True", 15 | }, 16 | { 17 | "cmd": "sdparm --set WCE=1 --save /dev/sda", 18 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 19 | }, 20 | { 21 | "cmd": "sdparm --set WCE=1 /dev/sda", 22 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 23 | }, 24 | { 25 | "cmd": "echo 'write through' > /sys/block/sda/device/scsi_disk/0:0:0:0/cache_type", 26 | "result": "True", 27 | }, 28 | { 29 | "cmd": "sdparm --set WCE=0 --save /dev/sda", 30 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 31 | }, 32 | { 33 | "cmd": "sdparm --set WCE=0 --save /dev/sda", 34 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 35 | }, 36 | {"cmd": "sdparm --get DRA /dev/sda", "result": "DRA 0 [cha: n, def: 0]"}, 37 | { 38 | "cmd": "sdparm --set DRA=1 --save /dev/sda", 39 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 40 | }, 41 | { 42 | "cmd": "sdparm --set DRA=1 /dev/sda", 43 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 44 | }, 45 | { 46 | "cmd": "sdparm --set DRA=0 --save /dev/sda", 47 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 48 | }, 49 | { 50 | "cmd": "sdparm --set DRA=0 /dev/sda", 51 | "result": "/dev/sda: ATA SanDisk SD9SN8W2 1020", 52 | }, 53 | ] 54 | 55 | 56 | class SdparmUtilsUnitTest(unittest.TestCase): 57 | def setUp(self): 58 | self.mock_host = MockHost(cmd_map) 59 | self.sdparm_utils = SdparmUtils() 60 | 61 | def test_get_write_cache(self): 62 | """unit test for get_write_cache""" 63 | SdparmUtils.get_write_cache(self.mock_host, "sda") 64 | 65 | # Validate exception case 66 | cmd = "sdparm --get WCE /dev/sda" 67 | mock_result = "WCE [cha: y, def: 1]" 68 | self.mock_host.update_cmd_map(cmd, mock_result) 69 | self.assertRaises(TestError, SdparmUtils.get_write_cache, self.mock_host, "sda") 70 | 71 | def test_enable_write_cache(self): 72 | """unit test for the enable_write_cache""" 73 | SdparmUtils.enable_write_cache(self.mock_host, "sda") 74 | SdparmUtils.enable_write_cache(self.mock_host, "sda", True) 75 | 76 | def test_disable_write_cache(self): 77 | """unit test for the disable_write_cache""" 78 | SdparmUtils.disable_write_cache(self.mock_host, "sda") 79 | SdparmUtils.enable_write_cache(self.mock_host, "sda", True) 80 | 81 | def test_get_read_lookahead(self): 82 | """unit test for the get_read_lookahead""" 83 | SdparmUtils.get_read_lookahead(self.mock_host, "sda") 84 | 85 | # Validate exception case 86 | cmd = "sdparm --get DRA /dev/sda" 87 | mock_result = "DRA [cha: n, def: 0]" 88 | self.mock_host.update_cmd_map(cmd, mock_result) 89 | self.assertRaises( 90 | TestError, SdparmUtils.get_read_lookahead, self.mock_host, "sda" 91 | ) 92 | 93 | def test_enable_read_lookahead(self): 94 | """unit test for the enable_read_lookahead""" 95 | SdparmUtils.enable_read_lookahead(self.mock_host, "sda") 96 | SdparmUtils.enable_read_lookahead(self.mock_host, "sda", True) 97 | 98 | def test_disable_read_lookahead(self): 99 | """unit test for the disable_read_lookahead""" 100 | SdparmUtils.disable_read_lookahead(self.mock_host, "sda") 101 | SdparmUtils.enable_read_lookahead(self.mock_host, "sda", True) 102 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/test_sed_util.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | from unittest import mock, TestCase 3 | from unittest.mock import call 4 | 5 | from autoval_ssd.lib.utils.sed_util import SedUtils 6 | 7 | from autoval_ssd.unittest.mock.lib.mock_host import MockHost 8 | 9 | 10 | CMD_MAP = [{"cmd": None, "file": None}] 11 | 12 | 13 | class SedUtilUnittest(TestCase): 14 | """unitest for the sedutil-cli library""" 15 | 16 | def setUp(self) -> None: 17 | """initializing the required variables""" 18 | self.mock_host = MockHost(CMD_MAP) 19 | self.block_name = "/dev/mock" 20 | 21 | @mock.patch.object(MockHost, "run") 22 | def test_get_sed_support_status(self, mock_host): 23 | """Unittest of get_sed_support_status function""" 24 | supported_out = f"/dev/{self.block_name} SED -2- WDC CL SN720 SDAQNTW-512G-2000 10103122" 25 | nonsuported_out = f"/dev/{self.block_name} NO --- KXG50ZNV256G TOSHIBA AAGA4102" 26 | # Supported 27 | mock_host.return_value = supported_out 28 | sup_result = SedUtils.get_sed_support_status(self.mock_host, self.block_name) 29 | self.assertTrue(sup_result) 30 | mock_host.assert_has_calls( 31 | calls=[call("sedutil-cli --isValidSED /dev//dev/mock")] 32 | ) 33 | # Not Supported 34 | mock_host.reset_mock(return_value=True) 35 | mock_host.return_value = nonsuported_out 36 | nonsup_result = SedUtils.get_sed_support_status(self.mock_host, self.block_name) 37 | self.assertFalse(nonsup_result) 38 | mock_host.assert_has_calls( 39 | calls=[call("sedutil-cli --isValidSED /dev//dev/mock")] 40 | ) 41 | 42 | @mock.patch.object(MockHost, "run") 43 | def test_opal_support_scan(self, mock_host): 44 | """Unittest of opal_support_scan function""" 45 | mock_output = ( 46 | "Scanning for Opal compliant disks\n" 47 | "/dev/nvme0 2 SAMSUNG MZ1LB960HAJQ-000FB EDA75F2Q\n" 48 | "The Kernel flag libata.allow_tpm is not set correctly\n" 49 | "Please see the readme note about setting the libata.allow_tpm\n" 50 | "/dev/sda No SanDisk SD9SN8W256G1020 X6101020\n" 51 | "The Kernel flag libata.allow_tpm is not set correctly\n" 52 | "Please see the readme note about setting the libata.allow_tpm\n" 53 | "/dev/sdb No\n" 54 | ) 55 | opal_complaint = ["nvme0"] 56 | opal_non_complaint = ["sda", "sdb"] 57 | mock_host.return_value = mock_output 58 | opal_list, non_opal_list = SedUtils.opal_support_scan(self.mock_host) 59 | self.assertListEqual(opal_complaint, opal_list) 60 | self.assertListEqual(opal_non_complaint, non_opal_list) 61 | mock_host.assert_has_calls(calls=[call("sedutil-cli --scan")]) 62 | 63 | @mock.patch.object(MockHost, "run") 64 | def test_get_msid(self, mock_host): 65 | """Unittest for get_msid.""" 66 | mock_out = "MSID: MSIDMSIDMSIDMSIDMSIDMSIDMSIDMSID" 67 | mock_host.return_value = mock_out 68 | out = SedUtils.get_msid(self.mock_host, self.block_name) 69 | self.assertEqual(out, "MSIDMSIDMSIDMSIDMSIDMSIDMSIDMSID") 70 | mock_host.assert_has_calls( 71 | calls=[call("sedutil-cli --printDefaultPassword /dev//dev/mock")] 72 | ) 73 | 74 | @mock.patch.object(MockHost, "run") 75 | def test_check_locked_status(self, mock_host): 76 | """Unittest for check_locked_status.""" 77 | mock_out = ( 78 | " Locked = N, LockingEnabled = N, LockingSupported = Y, " 79 | "MBRDone = N, MBREnabled = N, MediaEncrypt = Y" 80 | ) 81 | mock_host.return_value = mock_out 82 | out = SedUtils.check_locked_status(self.mock_host, self.block_name) 83 | self.assertEqual(out, False) 84 | mock_host.assert_has_calls(calls=[call("sedutil-cli --query /dev//dev/mock")]) 85 | -------------------------------------------------------------------------------- /src/autoval_ssd/unittest/test_storage_device_factory.py: -------------------------------------------------------------------------------- 1 | # pyre-unsafe 2 | import unittest 3 | from collections import namedtuple 4 | from unittest.mock import patch 5 | 6 | from autoval_ssd.lib.utils.storage.storage_device_factory import StorageDeviceFactory 7 | 8 | from autoval_ssd.unittest.mock.lib.mock_host import MockHost 9 | 10 | CMD_MAP = [ 11 | {"cmd": "nvme list", "file": "nvme_list"}, 12 | {"cmd": "lsscsi", "file": "lsscsi"}, 13 | {"cmd": "smartctl -x /dev/sdd", "file": "smartctl_sas_drive"}, 14 | ] 15 | 16 | MOCK_NVME = namedtuple("NVMeDrive", ["host", "block_name"]) 17 | MOCK_SATA = namedtuple("SATADrive", ["host", "block_name"]) 18 | MOCK_SAS = namedtuple("SASDrive", ["host", "block_name"]) 19 | MOCK_DRIVE = namedtuple("Drive", ["host", "block_name"]) 20 | 21 | 22 | class StorageDeviceFactoryUnitTest(unittest.TestCase): 23 | def setUp(self) -> None: 24 | self.host = MockHost(cmd_map=CMD_MAP) 25 | self.invalid_drive = "xyz" 26 | 27 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.NVMeDriveFactory") 28 | def test_create_nvme(self, mock_nmve): 29 | """ 30 | This method validates the drive object returned is the object 31 | of NVMeDrive class. 32 | """ 33 | nvme0n1 = MOCK_NVME(self.host, "nvme0n1") 34 | nvme1n1 = MOCK_NVME(self.host, "nvme1n1") 35 | nvme2n1 = MOCK_NVME(self.host, "nvme2n1") 36 | nvme3n1 = MOCK_NVME(self.host, "nvme3n1") 37 | nvme4n1 = MOCK_NVME(self.host, "nvme4n1") 38 | nvme5n1 = MOCK_NVME(self.host, "nvme5n1") 39 | nvme6n1 = MOCK_NVME(self.host, "nvme6n1") 40 | block_names = [ 41 | "nvme0n1", 42 | "nvme1n1", 43 | "nvme2n1", 44 | "nvme3n1", 45 | "nvme4n1", 46 | "nvme5n1", 47 | "nvme6n1", 48 | ] 49 | with patch.object(StorageDeviceFactory, "_get_host", return_value=self.host): 50 | mock_nmve.create.side_effect = ( 51 | nvme0n1, 52 | nvme1n1, 53 | nvme2n1, 54 | nvme3n1, 55 | nvme4n1, 56 | nvme5n1, 57 | nvme6n1, 58 | ) 59 | sdf = StorageDeviceFactory(self.host, block_names) 60 | drive_list = sdf.create() 61 | self.assertEqual(sdf.nvme_list, block_names) 62 | self.assertEqual( 63 | set(drive_list), 64 | {nvme0n1, nvme1n1, nvme2n1, nvme3n1, nvme4n1, nvme5n1, nvme6n1}, 65 | ) 66 | 67 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.SATADrive") 68 | def test_create_sata(self, mock_object): 69 | """ 70 | This method validates the drive object returned is the object 71 | of SATADrive class. 72 | """ 73 | sda = MOCK_SATA(self.host, "sda") 74 | block_name = ["sda"] 75 | with patch.object(StorageDeviceFactory, "_get_host", return_value=self.host): 76 | mock_object.return_value = sda 77 | sdf = StorageDeviceFactory(self.host, block_name) 78 | drive_list = sdf.create() 79 | self.assertEqual(sdf.sata_drive_list, block_name) 80 | self.assertListEqual(drive_list, [sda]) 81 | 82 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.SASDrive") 83 | def test_create_sas(self, mock_object): 84 | """ 85 | This method validates the drive object returned is the object 86 | of SASDrive class. 87 | """ 88 | sdd = MOCK_SAS(self.host, "sdd") 89 | block_name = ["sdd"] 90 | with patch.object(StorageDeviceFactory, "_get_host", return_value=self.host): 91 | mock_object.return_value = sdd 92 | sdf = StorageDeviceFactory(self.host, block_name) 93 | drive_list = sdf.create() 94 | self.assertListEqual(drive_list, [sdd]) 95 | 96 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.Drive") 97 | def test_create_no_drive_interface(self, mock_object): 98 | """ 99 | This method validates the drive object returned is the object 100 | of Drive class. 101 | """ 102 | invalid_drive = MOCK_DRIVE(self.host, self.invalid_drive) 103 | block_name = [self.invalid_drive] 104 | with patch.object(StorageDeviceFactory, "_get_host", return_value=self.host): 105 | mock_object.return_value = invalid_drive 106 | sdf = StorageDeviceFactory(self.host, block_name) 107 | drive_list = sdf.create() 108 | self.assertListEqual(drive_list, [invalid_drive]) 109 | 110 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.NVMeDriveFactory") 111 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.SATADrive") 112 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.SASDrive") 113 | @patch("autoval_ssd.lib.utils.storage.storage_device_factory.Drive") 114 | def test_create_all_interface(self, mock_object, mock_sas, mock_sata, mock_nvme): 115 | """ 116 | This method validates the list of drive object returned is the object 117 | of NVMeDrive, SATADrive, SASDrive and Drive class. 118 | """ 119 | nvme0n1 = MOCK_NVME(self.host, "nvme0n1") 120 | nvme1n1 = MOCK_NVME(self.host, "nvme1n1") 121 | sda = MOCK_SATA(self.host, "sda") 122 | sdd = MOCK_SAS(self.host, "sdd") 123 | invalid_drive = MOCK_DRIVE(self.host, self.invalid_drive) 124 | block_names = [ 125 | "mmcblk0", 126 | "nvme0n1", 127 | "sda", 128 | "sdd", 129 | self.invalid_drive, 130 | "nvme1n1", 131 | ] 132 | with patch.object(StorageDeviceFactory, "_get_host", return_value=self.host): 133 | mock_nvme.create.side_effect = (nvme0n1, nvme1n1) 134 | mock_sata.return_value = sda 135 | mock_sas.return_value = sdd 136 | mock_object.return_value = invalid_drive 137 | sdf = StorageDeviceFactory(self.host, block_names) 138 | drive_list = sdf.create() 139 | self.assertEqual( 140 | set(drive_list), 141 | {nvme0n1, sda, sdd, invalid_drive, nvme1n1}, 142 | ) 143 | --------------------------------------------------------------------------------