├── docs ├── img │ ├── .gitkeep │ ├── Test_Setup.JPG │ └── Trident_SW_Architecture.JPG └── UserGuide.md ├── __init__.py ├── lib ├── __init__.py ├── thread.py ├── threadable_node.py ├── logger.py ├── proc.py ├── profiler.py ├── pos_config.py ├── tags.py ├── pos.py └── composable │ ├── system_management.py │ ├── io_management.py │ └── composable_core.py ├── testcase ├── config_files │ ├── trident_mapping.json │ ├── static.json │ ├── trident_config.json │ ├── topology.json │ ├── hetero_setup.json │ ├── multi_initiator_ops.json │ ├── pos_config.json │ └── system_management.json ├── array │ ├── config.json │ ├── test_save_restore_sanity.py │ ├── test_cmd_prgrs_sanity.py │ ├── test_hetero_multi_sanity.py │ ├── test_array_states.py │ ├── test_array_disk_replace_sanity.py │ ├── test_array_sanity.py │ └── test_raid6_array_sanity.py ├── pytest.ini ├── telemetry │ └── test_telemetry.py ├── subsystem │ └── test_subsystem_sanity.py ├── system_daemon │ └── test_system_daemon_sanity.py ├── userio │ └── test_gc.py ├── qos │ └── test_qos.py ├── cli │ ├── test_cli.py │ ├── test_uuid_sanity.py │ └── test_crud_sanity.py ├── volume │ └── test_volume_sanity.py ├── multiverse │ └── test_multi_initiator_ops.py └── conftest.py ├── requirements.txt ├── .gitignore ├── LICENSE └── README.md /docs/img/.gitkeep: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /__init__.py: -------------------------------------------------------------------------------- 1 | import pos 2 | -------------------------------------------------------------------------------- /lib/__init__.py: -------------------------------------------------------------------------------- 1 | import pos 2 | -------------------------------------------------------------------------------- /testcase/config_files/trident_mapping.json: -------------------------------------------------------------------------------- 1 | {} -------------------------------------------------------------------------------- /docs/img/Test_Setup.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidonos/trident/HEAD/docs/img/Test_Setup.JPG -------------------------------------------------------------------------------- /testcase/array/config.json: -------------------------------------------------------------------------------- 1 | {"test_ArrayStates": {"runtime": "10"}, "test_ArraySanity": {"runtime": 10}} 2 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pytest 2 | pytest - html 3 | setuptools_rust 4 | paramiko 5 | lxml 6 | prometheus_api_client 7 | -------------------------------------------------------------------------------- /docs/img/Trident_SW_Architecture.JPG: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/poseidonos/trident/HEAD/docs/img/Trident_SW_Architecture.JPG -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | logs/* 2 | */__pycache__/* 3 | .idea/* 4 | *.log 5 | *.pyc 6 | */.cache/ 7 | testcase/system_test/* 8 | testcase/config_files/topology.json 9 | .scap/ 10 | output/ 11 | sam_cli.cfg -------------------------------------------------------------------------------- /testcase/config_files/static.json: -------------------------------------------------------------------------------- 1 | { 2 | "Project Info":{ 3 | "Project name": "Trident" 4 | }, 5 | 6 | "Test Cycle Info": { 7 | "Validation Type": "Validation", 8 | "Release": "Dec 2022", 9 | "Version":"POS 0.12.0"} 10 | } 11 | 12 | -------------------------------------------------------------------------------- /testcase/pytest.ini: -------------------------------------------------------------------------------- 1 | [pytest] 2 | markers = 3 | sanity: mark for sanity suite 4 | regression: mark for regression suite 5 | bamboo : for bamboo poc 6 | rebuild_perf: mark of rebuild perf tests 7 | hetero_setup : mark for hetero tests 8 | -------------------------------------------------------------------------------- /testcase/telemetry/test_telemetry.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logger 3 | logger = logger.get_logger(__name__) 4 | 5 | @pytest.mark.sanity 6 | def test_telemetry(system_fixture): 7 | try: 8 | pos = system_fixture 9 | if pos.pos_as_service == False: 10 | pytest.skip("POS should run as a service for telemetry to work") 11 | 12 | assert pos.target_utils.pos_bring_up() == True 13 | assert pos.prometheus.update_config() == True 14 | assert pos.prometheus.set_telemetry_configs() == True 15 | assert pos.prometheus.get_all_metrics() == True 16 | except Exception as e: 17 | logger.error(e) 18 | assert 0 19 | -------------------------------------------------------------------------------- /testcase/config_files/trident_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "elk_log_stage": { 3 | "enable": true 4 | }, 5 | "copy_pos_log": { 6 | "test_fail": true, 7 | "test_pass": false 8 | }, 9 | "dump_pos_core": { 10 | "enable": false 11 | }, 12 | "telemetry_logs": { 13 | "start": true, 14 | "upload": false 15 | }, 16 | "pos_as_a_service": { 17 | "enable" : true 18 | }, 19 | "forced_fio_config": { 20 | "enable" : false, 21 | "fio_config" : { 22 | "size_based" : true, 23 | "max_size" : "10G", 24 | "time_based" : true, 25 | "max_runtime" : 120 26 | } 27 | } 28 | } 29 | -------------------------------------------------------------------------------- /testcase/subsystem/test_subsystem_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logger 3 | from pos import POS 4 | 5 | logger = logger.get_logger(__name__) 6 | 7 | 8 | @pytest.mark.sanity 9 | def test_sanitySubsystem(array_fixture): 10 | try: 11 | pos = array_fixture 12 | assert pos.target_utils.get_subsystems_list() == True 13 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = 256 14 | pos.data_dict["array"]["num_array"] = 1 15 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 16 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 17 | 18 | assert pos.target_utils.get_subsystems_list() == True 19 | 20 | except Exception as e: 21 | logger.error(f"TC failed due to {e}") 22 | assert 0 23 | -------------------------------------------------------------------------------- /testcase/config_files/topology.json: -------------------------------------------------------------------------------- 1 | { 2 | "login": 3 | { 4 | "target": 5 | { 6 | "number": 1, 7 | "server": 8 | [ 9 | { 10 | "ip":"127.0.0.1", 11 | "username":"root", 12 | "password":"password", 13 | "Data_Ip": "127.0.0.1" 14 | } 15 | ] 16 | }, 17 | "initiator": 18 | { 19 | "number": 1, 20 | "client": 21 | [ 22 | { 23 | "ip":"127.0.0.1", 24 | "username":"root", 25 | "password":"password", 26 | "Data_Ip": "127.0.0.1" 27 | } 28 | 29 | ] 30 | } 31 | }, 32 | "paths": 33 | { 34 | "pos_path": "path of compiled POS source code" 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /testcase/config_files/hetero_setup.json: -------------------------------------------------------------------------------- 1 | { 2 | "enable": "false", 3 | "num_test_device": 2, 4 | "test_devices": [ 5 | { 6 | "dev_name": "nvme0", 7 | "ns_config": [ 8 | { 9 | "num_namespace": 1, 10 | "ns_size": "20GiB", 11 | "attach": "true" 12 | }, 13 | { 14 | "num_namespace": 1, 15 | "ns_size": "100GiB", 16 | "attach": "true" 17 | } 18 | ] 19 | }, 20 | { 21 | "dev_name": "nvme1", 22 | "ns_config": [ 23 | { 24 | "num_namespace": 1, 25 | "ns_size": "19GiB", 26 | "attach": "true" 27 | }, 28 | { 29 | "num_namespace": 1, 30 | "ns_size": "20GiB", 31 | "attach": "true" 32 | } 33 | ] 34 | } 35 | ], 36 | "recovery": { 37 | "magic_number": "9877", 38 | "file_name": "target_hetero_setup.pickle", 39 | "dir_name": "/root/" 40 | } 41 | } 42 | -------------------------------------------------------------------------------- /testcase/system_daemon/test_system_daemon_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import traceback 3 | import time 4 | 5 | import logger 6 | logger = logger.get_logger(__name__) 7 | 8 | 9 | @pytest.mark.sanity 10 | def test_pos_start_after_reboot(system_fixture): 11 | """ 12 | The purpose of this test case is to verify POS is running after system 13 | reboot. 14 | """ 15 | logger.info( 16 | " ================ Test : test_pos_start_after_reboot =============" 17 | ) 18 | try: 19 | pos = system_fixture 20 | if pos.pos_as_service == False: 21 | pytest.skip("POS should run as a service for telemetry to work") 22 | 23 | assert pos.target_utils.reboot_and_reconnect() == True 24 | 25 | # Wait for 3 minutes 26 | logger.info("Wait for 3 minutes after system start") 27 | time.sleep(180) 28 | 29 | # Return False if pos is running 30 | assert pos.target_utils.helper.check_pos_exit() == False 31 | 32 | logger.info(" ===================== Test ENDs ===================== ") 33 | except Exception as e: 34 | logger.error(f"Test script failed due to {e}") 35 | pos.exit_handler(expected=False) 36 | traceback.print_exc() 37 | -------------------------------------------------------------------------------- /testcase/config_files/multi_initiator_ops.json: -------------------------------------------------------------------------------- 1 | { 2 | "test_vol_lc_stress_io_stress_io_sanity_system_sanity_6_initiator": 3 | { 4 | "config": 5 | { 6 | "target":1, 7 | "initiator":1 8 | }, 9 | "validation": 10 | { 11 | "totalphase":8, 12 | "totaltime":5, 13 | "testcase": 14 | [ 15 | { "lib":"io_management", "name":"test_io_sanity_iteration_io_verify_random_pattern"}, 16 | { "lib":"io_management", "name":"test_io_sanity_set_get_threashold_io_gc"}, 17 | { "lib":"system_management", "name":"test_system_sanity_detach_attach_device_iteration_io_verify"}, 18 | { "lib":"vol_management", "name":"test_vol_lc_io_sanity_create_mount_io_unmount_mount_verifyio_umount_delete"}, 19 | { "lib":"vol_management", "name":"test_vol_lc_io_sanity_create_mount_verifyqos_unmount_delete"}, 20 | { "lib":"vol_management", "name":"test_vol_lc_stress_unmount_delete_create_mount_io"} 21 | ], 22 | "por": 23 | { 24 | "ibof": 25 | { 26 | "npor": { "valid": false, "phase":"5,7" }, 27 | "spor": { "valid": false } 28 | }, 29 | "device": 30 | { 31 | "npor": { "valid": false }, 32 | "spor": { "valid": false } 33 | } 34 | } 35 | } 36 | } 37 | } 38 | 39 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | BSD LICENSE 2 | 3 | Copyright (c) 2021 Samsung Electronics Corporation 4 | All rights reserved. 5 | 6 | Redistribution and use in source and binary forms, with or without 7 | modification, are permitted provided that the following conditions 8 | are met: 9 | 10 | * Redistributions of source code must retain the above copyright 11 | notice, this list of conditions and the following disclaimer. 12 | * Redistributions in binary form must reproduce the above copyright 13 | notice, this list of conditions and the following disclaimer in 14 | the documentation and/or other materials provided with the 15 | distribution. 16 | * Neither the name of Samsung Electronics Corporation nor the names of 17 | its contributors may be used to endorse or promote products derived 18 | from this software without specific prior written permission. 19 | 20 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Trident 2 | Trident is a framework to test and explore Poseidon OS (POS). It has python APIs for all the POS CLI commands, for user to develop their own test suite. 3 | It is built on top of pytest framework, The tool contains following test cases to cover base scenarios: 4 | - Array, Volume management (create, delete, rename) 5 | - CLI test suite 6 | - Subsystem, QOS management 7 | - GC and flush 8 | - Telemetry and Prometheus 9 | - Array Disk Replace and Hot Remove 10 | 11 | A setup tool is developed to check if setup is ready for test execution. 12 | 13 | # Table of contents 14 | - [Download the Source Code](#download-the-source-code) 15 | - [Install Prerequisites](#install-prerequisites) 16 | - [Download and Build POS](#download-and-build-pos) 17 | - [Updating Trident config](#updating-trident-config) 18 | - [Run Test cases](#run-test-cases) 19 | - [Notes](#notes) 20 | # Download the Source Code 21 | 22 | `$git clone https://github.com/poseidonos/trident.git` 23 | 24 | # Install Prerequisites 25 | `$pip3 install --upgrade pip` 26 | 27 | `$pip3 install -r requirements.txt` 28 | 29 | # Download and Build POS 30 | Please refer to https://github.com/poseidonos/poseidonos/blob/main/README.md 31 | 32 | # Updating Trident config 33 | Update testcases/config_files/topology.json with system details such as IP addresses and POS path 34 | 35 | Test the system by executing setup tool from utils 36 | 37 | `$python3 utils/setup_tool.py all` 38 | 39 | Please refer docs/UserGuide.md for details 40 | 41 | # Run Test cases 42 | `$python3 -m pytest -v -s testcase/ -m 'sanity' ` 43 | 44 | Please refer docs/UserGuide.md for details 45 | 46 | # Notes 47 | Trident currently supports 1.0.x version of Poseidon OS 48 | 49 | -------------------------------------------------------------------------------- /lib/thread.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | import threading 35 | 36 | 37 | class ThreadReturnable(threading.Thread): 38 | def __init__(self, group=None, target=None, name=None, 39 | args=(), kwargs={}): 40 | threading.Thread.__init__(self, group, target, name, args, kwargs) 41 | self._return = None 42 | 43 | def run(self): 44 | if self._target is not None: 45 | self._return = self._target(*self._args, **self._kwargs) 46 | 47 | def join(self, *args): 48 | threading.Thread.join(self, *args) 49 | return self._return 50 | -------------------------------------------------------------------------------- /lib/threadable_node.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | from concurrent import futures 35 | from typing import Union, List 36 | import threading 37 | 38 | 39 | def threaded(fn): 40 | def wrapper(*args, **kwargs): 41 | thread = threading.Thread(target=fn, args=args, kwargs=kwargs) 42 | thread.start() 43 | return thread 44 | return wrapper 45 | def sync_parallel_run(node_obj,cmd_list: List[Union[str, list]],) -> List[str]: 46 | 47 | 48 | results = [] 49 | with futures.ThreadPoolExecutor() as executor: 50 | tasks = [executor.submit(node_obj.execute, cmd) 51 | for cmd in cmd_list] 52 | for task in futures.as_completed(tasks): 53 | results.append(task.result()) 54 | return results 55 | -------------------------------------------------------------------------------- /testcase/userio/test_gc.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | # from lib.pos import POS 4 | import random 5 | from common_libs import * 6 | 7 | import logger 8 | logger = logger.get_logger(__name__) 9 | 10 | 11 | 12 | @pytest.mark.sanity 13 | def test_do_gc_emptyarray(array_fixture): 14 | try: 15 | """GC is expected to fail on 100% Free array""" 16 | pos = array_fixture 17 | status = pos.cli.wbt_do_gc("array1") 18 | assert status[0] == False 19 | logger.info(f"Expected failure for do gc") 20 | except Exception as e: 21 | logger.error(e) 22 | pos.exit_handler() 23 | 24 | 25 | @pytest.mark.sanity 26 | @pytest.mark.parametrize( 27 | "raid_type, nr_data_drives", 28 | [("RAID0", 2), ("RAID10", 4), ("RAID10", 2), ("no-raid", 1), ("RAID10", 8)],) 29 | def test_gcMaxvol(array_fixture, raid_type, nr_data_drives): 30 | """Trigger garbage collection with longevity of I/O""" 31 | try: 32 | pos = array_fixture 33 | 34 | assert pos.cli.device_list()[0] == True 35 | system_disks = pos.cli.system_disks 36 | if (nr_data_drives * 2) > len(system_disks): 37 | logger.warning("Insufficient system disks to test array create") 38 | pytest.skip("Required disk condition is not met") 39 | 40 | array_name = pos.data_dict["array"]["pos_array"][0]["array_name"] 41 | pos.data_dict["array"]["pos_array"][0]["raid_type"] = raid_type 42 | pos.data_dict["array"]["pos_array"][1]["raid_type"] = raid_type 43 | pos.data_dict["array"]["pos_array"][0]["write_back"] = random.choice( 44 | [True, False] 45 | ) 46 | pos.data_dict["array"]["pos_array"][1]["write_back"] = random.choice( 47 | [True, False] 48 | ) 49 | pos.data_dict["array"]["pos_array"][0]["data_device"] = nr_data_drives 50 | pos.data_dict["array"]["pos_array"][1]["data_device"] = nr_data_drives 51 | pos.data_dict["array"]["pos_array"][0]["spare_device"] = 0 52 | pos.data_dict["array"]["pos_array"][1]["spare_device"] = 0 53 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = 256 54 | pos.data_dict["volume"]["pos_volumes"][1]["num_vol"] = 256 55 | 56 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 57 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 58 | run_io(pos) 59 | pos.cli.wbt_do_gc(array_name=array_name) 60 | pos.cli.wbt_get_gc_status(array_name=array_name) 61 | 62 | 63 | logger.info( 64 | " ============================= Test ENDs ======================================" 65 | ) 66 | 67 | except Exception as e: 68 | logger.error(e) 69 | pos.exit_handler() 70 | -------------------------------------------------------------------------------- /testcase/qos/test_qos.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import pos 3 | from common_libs import * 4 | 5 | import logger 6 | logger = logger.get_logger(__name__) 7 | 8 | 9 | @pytest.mark.sanity 10 | def test_qos_happy_path(array_fixture): 11 | try: 12 | pos = array_fixture 13 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = 1 14 | pos.data_dict["array"]["num_array"] = 1 15 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 16 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 17 | run_io(pos) 18 | except Exception as e: 19 | logger.error(e) 20 | pos.exit_handler() 21 | assert 0 22 | 23 | 24 | @pytest.mark.sanity 25 | @pytest.mark.parametrize("num_vol", [1, 256]) 26 | def test_qos_set_reset(array_fixture, num_vol): 27 | try: 28 | pos = array_fixture 29 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = num_vol 30 | pos.data_dict["array"]["num_array"] = 1 31 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 32 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 33 | run_io(pos) 34 | assert pos.cli.volume_list(array_name="array1")[0] == True 35 | for vol in pos.cli.vols: 36 | assert ( 37 | pos.cli.qos_reset_volume_policy(volumename=vol, arrayname="array1")[0] 38 | == True 39 | ) 40 | 41 | except Exception as e: 42 | logger.error(e) 43 | pos.exit_handler() 44 | assert 0 45 | 46 | 47 | @pytest.mark.sanity 48 | def test_qos_rebuilding_Array(array_fixture): 49 | try: 50 | pos = array_fixture 51 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = 1 52 | pos.data_dict["array"]["num_array"] = 1 53 | pos.data_dict["array"]["pos_array"][0]["spare_device"] = 1 54 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 55 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 56 | run_io(pos) 57 | 58 | assert pos.cli.array_list()[0] == True 59 | for index, array in enumerate(list(pos.cli.array_dict.keys())): 60 | assert pos.cli.array_info(array_name=array)[0] == True 61 | assert ( 62 | pos.target_utils.device_hot_remove( 63 | device_list=[pos.cli.array_data[array]["data_list"][0]] 64 | ) 65 | == True 66 | ) 67 | assert pos.target_utils.array_rebuild_wait(array_name=array) == True 68 | assert pos.cli.volume_list(array_name="array1")[0] == True 69 | for vol in pos.cli.vols: 70 | assert ( 71 | pos.cli.qos_reset_volume_policy(volumename=vol, arrayname="array1")[0] 72 | == True 73 | ) 74 | 75 | except Exception as e: 76 | logger.error(e) 77 | pos.exit_handler() 78 | assert 0 79 | -------------------------------------------------------------------------------- /testcase/array/test_save_restore_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import traceback 3 | 4 | import logger 5 | logger = logger.get_logger(__name__) 6 | 7 | @pytest.fixture(scope="function") 8 | def save_restore_fixture(system_fixture): 9 | pos = system_fixture 10 | assert pos.pos_conf.save_restore(enable=True, update_now=True) == True 11 | assert pos.target_utils.remove_restore_file() == True 12 | assert pos.target_utils.pos_bring_up() == True 13 | 14 | yield pos 15 | 16 | assert pos.pos_conf.save_restore(enable=False, update_now=True) == True 17 | 18 | 19 | @pytest.mark.sanity 20 | def test_save_restore_spor(save_restore_fixture): 21 | """ 22 | The purpose of this test case is to Create and mount arrays, then 23 | create and mount volumes. Do SPOR and verify pos save restore feature 24 | """ 25 | logger.info( 26 | " ================ Test : test_array_save_restore_spor =============" 27 | ) 28 | try: 29 | pos = save_restore_fixture 30 | assert pos.cli.array_list()[0] == True 31 | for array_name in pos.cli.array_dict.keys(): 32 | array_data = pos.cli.array_dict[array_name] 33 | logger.info(f"Array - Name:{array_name}, Data:{array_data}") 34 | assert pos.cli.volume_list(array_name=array_name)[0] == True 35 | #for vol_dict.keys() 36 | 37 | assert pos.target_utils.spor(save_restore=True, 38 | restore_verify=True) == True 39 | 40 | assert pos.cli.array_list()[0] == True 41 | 42 | logger.info(" ===================== Test ENDs ===================== ") 43 | except Exception as e: 44 | logger.error(f"Test script failed due to {e}") 45 | pos.exit_handler(expected=False) 46 | traceback.print_exc() 47 | 48 | 49 | @pytest.mark.sanity 50 | def test_save_restore_spor_clean_bringup(save_restore_fixture): 51 | """ 52 | The purpose of this test case is to Create and mount arrays, then create 53 | and mount volumes. Do SPOR, delete restore json and then bringup POS. 54 | """ 55 | logger.info( 56 | " ================ Test : test_save_restore_spor_clean_bringup =============" 57 | ) 58 | try: 59 | pos = save_restore_fixture 60 | assert pos.cli.array_list()[0] == True 61 | for array_name in pos.cli.array_dict.keys(): 62 | array_data = pos.cli.array_dict[array_name] 63 | logger.info(f"Array - Name:{array_name}, Data:{array_data}") 64 | assert pos.cli.volume_list(array_name=array_name)[0] == True 65 | 66 | assert pos.target_utils.remove_restore_file() == True 67 | assert pos.target_utils.spor(save_restore=False, 68 | restore_verify=False) == True 69 | 70 | assert pos.cli.array_list()[0] == True 71 | 72 | logger.info(" ===================== Test ENDs ===================== ") 73 | except Exception as e: 74 | logger.error(f"Test script failed due to {e}") 75 | pos.exit_handler(expected=False) 76 | traceback.print_exc() 77 | 78 | -------------------------------------------------------------------------------- /testcase/array/test_cmd_prgrs_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from common_libs import * 3 | import logger 4 | logger = logger.get_logger(__name__) 5 | 6 | @pytest.mark.sanity 7 | def test_cmd_prgrs_sanity(array_fixture): 8 | ''' 9 | the purpose of the test is to verify 10 | command progress of mouinting the array and unmounting the array 11 | ''' 12 | try: 13 | logger.info( 14 | f" ============== Test : start of test_cmd_prgrs_sanity =============" 15 | ) 16 | pos = array_fixture 17 | #creating array 18 | array_name = pos.data_dict["array"]["pos_array"][0]["array_name"] 19 | single_array_data_setup(pos.data_dict, "RAID5", 4, 0, "NO", False) 20 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 21 | assert pos.cli.array_list()[0] == True 22 | assert pos.cli.array_info(array_name=array_name)[0] == True 23 | #verifying array mount progress in report.log 24 | mount_progress = pos.target_utils.report_log_array("array_mount") 25 | logger.info(mount_progress) 26 | if array_name in mount_progress: 27 | logger.info( 28 | f" ============== array mount command progress verified successfully =============" 29 | ) 30 | #verifying volume mount progress in report.log 31 | assert pos.cli.volume_create(array_name=array_name,volumename="vol1",size='1gb')[0] == True 32 | assert pos.cli.volume_mount(array_name=array_name,volumename="vol1")[0] == True 33 | assert pos.cli.volume_info(array_name=array_name,vol_name="vol1")[0] == True 34 | cmd_mount_out = pos.target_utils.report_log_volume("volume_mount") 35 | logger.info(cmd_mount_out) 36 | if "vol1" in cmd_mount_out: 37 | logger.info( 38 | f" ============== volume mount command progress verified successfully =============" 39 | ) 40 | #verifying volume unmount progress in report.log 41 | assert pos.cli.volume_unmount(array_name=array_name,volumename="vol1")[0] == True 42 | cmd_unmount_out = pos.target_utils.report_log_volume("volume_unmount") 43 | logger.info(cmd_unmount_out) 44 | if "vol1" in cmd_unmount_out: 45 | logger.info( 46 | f" ============== volume unmount command progress verified successfully =============" 47 | ) 48 | #verifying array unmount progress in report.log 49 | assert pos.cli.array_unmount(array_name=array_name)[0] == True 50 | unmount_progress = pos.target_utils.report_log_array("array_unmount") 51 | logger.info(unmount_progress) 52 | if array_name in unmount_progress: 53 | logger.info( 54 | f" ============== array unmount command progress verified successfully =============" 55 | ) 56 | logger.info( 57 | f" ============== Test : end of test_cmd_prgrs_sanity =============" 58 | ) 59 | 60 | except Exception as e: 61 | logger.error(f"Test script failed due to {e}") 62 | pos.exit_handler(expected=False) -------------------------------------------------------------------------------- /testcase/config_files/pos_config.json: -------------------------------------------------------------------------------- 1 | { 2 | "system": { 3 | "phase": "true", 4 | "pos_as_service": "false" 5 | }, 6 | "device": { 7 | "phase": "true", 8 | "uram": [ 9 | { 10 | "uram_name": "uram0", 11 | "bufer_size": "8388608", 12 | "strip_size": "512", 13 | "numa_node": "0" 14 | }, 15 | { 16 | "uram_name": "uram1", 17 | "bufer_size": "8388608", 18 | "strip_size": "512", 19 | "numa_node": "0" 20 | } 21 | ] 22 | }, 23 | "array": { 24 | "phase": "true", 25 | "num_array": 2, 26 | "pos_array": [ 27 | { 28 | "array_name": "array1", 29 | "data_device": 3, 30 | "spare_device": 0, 31 | "raid_type": "RAID5", 32 | "uram": "uram0", 33 | "auto_create": "true", 34 | "mount": "true", 35 | "write_back": "false" 36 | }, 37 | { 38 | "array_name": "array2", 39 | "data_device": 3, 40 | "spare_device": 0, 41 | "raid_type": "RAID5", 42 | "uram": "uram1", 43 | "auto_create": "true", 44 | "mount": "true", 45 | "write_back": "false" 46 | } 47 | ] 48 | }, 49 | "volume": { 50 | "phase": "true", 51 | "pos_volumes": [ 52 | { 53 | "num_vol": 2, 54 | "vol_name_pre": "pos_vol", 55 | "array_name": "array1", 56 | "size": "None", 57 | "qos": { 58 | "maxbw": 100000, 59 | "minbw": 1000, 60 | "maxiops": 10000000, 61 | "miniops": 1000 62 | }, 63 | "mount": { 64 | "phase": "true", 65 | "nqn_pre": "nqn.2022-10-array1.pos:subsystem", 66 | "subsystem_range": "1-1" 67 | } 68 | }, 69 | 70 | { 71 | "num_vol": 2, 72 | "vol_name_pre": "pos_vol", 73 | "array_name": "array2", 74 | "size": "None", 75 | "qos": { 76 | "maxbw": 100000, 77 | "minbw": 1000, 78 | "maxiops": 10000000, 79 | "miniops": 1000 80 | }, 81 | "mount": { 82 | "phase": "true", 83 | "nqn_pre": "nqn.2022-10-array2.pos:subsystem", 84 | "subsystem_range": "1-1" 85 | } 86 | } 87 | ] 88 | }, 89 | "subsystem": { 90 | "phase": "true", 91 | "pos_subsystems": [ 92 | { 93 | "nr_subsystems": 1, 94 | "base_nqn_name": "nqn.2022-10.pos-array1", 95 | "ns_count": "512", 96 | "serial_number": "POS000000000001", 97 | "model_name": "POS_VOLUME_array1" 98 | }, 99 | { 100 | "nr_subsystems": 1, 101 | "base_nqn_name": "nqn.2022-10.pos-array2", 102 | "ns_count": "512", 103 | "serial_number": "POS000000000001", 104 | "model_name": "POS_VOLUME_array2" 105 | } 106 | ] 107 | } 108 | } 109 | -------------------------------------------------------------------------------- /testcase/cli/test_cli.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | from pos import POS 3 | from common_libs import * 4 | 5 | import logger 6 | logger = logger.get_logger(__name__) 7 | 8 | 9 | def device(pos): 10 | logger.info(" ================= DEVICE =================") 11 | assert pos.cli.device_list()[0] == True 12 | # assert pos.cli.device_smart(pos.cli.dev_type['SSD'][0])[0] == True 13 | assert pos.cli.device_smart_log(pos.cli.dev_type["SSD"][0])[0] == True 14 | 15 | 16 | def qos(pos): 17 | logger.info(" ================= qos ===================") 18 | assert pos.cli.volume_list(array_name="array1")[0] == True 19 | assert ( 20 | pos.cli.qos_create_volume_policy( 21 | volumename=pos.cli.vols[0], 22 | arrayname="array1", 23 | maxiops="1000000000000", 24 | maxbw="21313123113", 25 | )[0] 26 | == True 27 | ) 28 | assert ( 29 | pos.cli.qos_list_volume_policy(volumename=pos.cli.vols[0], arrayname="array1")[ 30 | 0 31 | ] 32 | == True 33 | ) 34 | assert ( 35 | pos.cli.qos_reset_volume_policy(volumename=pos.cli.vols[0], arrayname="array1")[ 36 | 0 37 | ] 38 | == True 39 | ) 40 | 41 | 42 | def array(pos): 43 | logger.info(" ================= ARRAY ===================") 44 | assert pos.cli.array_list()[0] == True 45 | volume(pos) 46 | assert ( 47 | pos.cli.array_addspare( 48 | device_name=pos.cli.system_disks[0], array_name="array1" 49 | )[0] 50 | == True 51 | ) 52 | assert pos.cli.array_info(array_name="array1")[0] == True 53 | assert ( 54 | pos.cli.array_rmspare( 55 | device_name=pos.cli.array_data["array1"]["spare_list"][0], 56 | array_name="array1", 57 | )[0] 58 | == True 59 | ) 60 | for array in list(pos.cli.array_dict.keys()): 61 | assert pos.cli.array_unmount(array_name=array)[0] == True 62 | assert pos.cli.array_delete(array_name=array)[0] == True 63 | 64 | assert pos.cli.telemetry_stop()[0] == True 65 | assert pos.cli.devel_eventwrr_reset()[0] == True 66 | 67 | 68 | def volume(pos): 69 | logger.info(" ==================== Volume ===============") 70 | assert pos.cli.volume_info(array_name="array1", vol_name=pos.cli.vols[0])[0] == True 71 | assert pos.cli.volume_list(array_name="array1")[0] == True 72 | # assert pos.cli.volume_rename("newvol", pos.cli.vols[0], "array1")[0] == True 73 | assert pos.cli.volume_unmount(pos.cli.vols[0], "array1")[0] == True 74 | assert pos.cli.volume_info(array_name="array1", vol_name=pos.cli.vols[0])[0] == True 75 | assert pos.cli.volume_rename("newvol", pos.cli.vols[0], "array1")[0] == True 76 | assert pos.cli.volume_delete("newvol", "array1")[0] == True 77 | 78 | 79 | @pytest.mark.sanity 80 | def test_cli_happypath(array_fixture): 81 | 82 | try: 83 | pos = array_fixture 84 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 85 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 86 | run_io(pos) 87 | device(pos) 88 | qos(pos) 89 | 90 | logger.info("====================GC=====================") 91 | pos.cli.wbt_do_gc(array_name="array1") 92 | pos.cli.wbt_get_gc_status(array_name="array1") 93 | 94 | logger.info(" ================== logger ================") 95 | assert pos.cli.logger_get_log_level()[0] == True 96 | assert pos.cli.logger_info()[0] == True 97 | assert pos.cli.logger_set_log_level(level="debug")[0] == True 98 | # assert pos.cli.logger_apply_log_filter()[0] == True 99 | array(pos) 100 | 101 | logger.info("================== telemetry ===============") 102 | assert pos.cli.telemetry_start()[0] == True 103 | logger.info("================== devel ==================") 104 | assert pos.cli.devel_eventwrr_update("rebuild", "1")[0] == True 105 | # pos.exit_handler(expected = True) 106 | except Exception as e: 107 | logger.error(f"Test script failed due to {e}") 108 | pos.exit_handler() 109 | assert 0 110 | -------------------------------------------------------------------------------- /lib/logger.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | import logging 35 | import sys 36 | import os 37 | import datetime 38 | import platform 39 | from logging.handlers import TimedRotatingFileHandler 40 | 41 | path = ( 42 | os.path.dirname(os.path.dirname(os.path.abspath(__file__))) 43 | + "/logs/POS_" 44 | + datetime.datetime.now().strftime("%Y-%m-%d_%H_%M") 45 | ) 46 | FORMATTER = logging.Formatter( 47 | "%(asctime)s %(filename)s %(funcName)s %(lineno)s %(levelname)s :: %(message)s" 48 | ) 49 | LOG_FILE = "{}/pos_execution".format(path) + ".log" 50 | 51 | 52 | def mkdir_p(path): 53 | """ 54 | Method: to create log dir 55 | :param path: path of log dir 56 | :return: None 57 | """ 58 | 59 | if not os.path.exists(path): 60 | os.makedirs(path) 61 | 62 | 63 | def get_console_handler(): 64 | """ 65 | Method to get console handler 66 | :return: console handler 67 | """ 68 | console_handler = logging.StreamHandler(sys.stdout) 69 | console_handler.setFormatter(FORMATTER) 70 | return console_handler 71 | 72 | 73 | def get_file_handler(): 74 | 75 | """ 76 | Method: to get file handler 77 | :return: file handler 78 | """ 79 | mkdir_p(path) 80 | file_handler = TimedRotatingFileHandler(LOG_FILE, when="H", interval=48) 81 | file_handler.setFormatter(FORMATTER) 82 | return file_handler 83 | 84 | 85 | def get_logger(logger_name): 86 | """ 87 | Method : to get logger 88 | :param logger_name: Logger name 89 | :return: logger 90 | 91 | Usage: 92 | logger= logger.get_logger(__name__) 93 | 94 | """ 95 | 96 | logger = logging.getLogger(logger_name) 97 | if not getattr(logger, "handler_set", None): 98 | logger.setLevel(logging.DEBUG) # better to have too much log than not enough 99 | logger.addHandler(get_console_handler()) 100 | logger.addHandler(get_file_handler()) 101 | logger.propagate = False 102 | logger.handler_set = True 103 | return logger 104 | 105 | 106 | def get_logname(): 107 | """ 108 | 109 | :return: log name 110 | 111 | Method: to get the log name file name 112 | 113 | usage: 114 | logger.get_logname() 115 | """ 116 | return LOG_FILE 117 | 118 | 119 | def get_logpath(): 120 | """ 121 | 122 | :return: log path 123 | 124 | Method: to get the logpath 125 | usage: 126 | logger.get_logpath() 127 | """ 128 | return path 129 | -------------------------------------------------------------------------------- /testcase/volume/test_volume_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import random 3 | 4 | import logger 5 | logger = logger.get_logger(__name__) 6 | 7 | 8 | def random_string(length): 9 | rstring = "" 10 | rstr_seq = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" 11 | for i in range(0, length): 12 | if i % length == 0 and i != 0: 13 | rstring += "-" 14 | rstring += str(rstr_seq[random.randint(0, len(rstr_seq) - 1)]) 15 | return rstring 16 | 17 | 18 | @pytest.mark.sanity 19 | @pytest.mark.parametrize("numvol", [1,256]) 20 | @pytest.mark.parametrize( 21 | "volsize", ["1mb", "1gb"] 22 | ) # None means max size of the array/num of vols per array 23 | def test_SanityVolume(array_fixture, numvol, volsize): 24 | try: 25 | logger.info( 26 | f" ============== Test : volsize {volsize} numvol {numvol} =============" 27 | ) 28 | pos = array_fixture 29 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = numvol 30 | pos.data_dict["volume"]["pos_volumes"][1]["num_vol"] = numvol 31 | pos.data_dict["array"]["num_array"] = 2 32 | pos.data_dict["volume"]["pos_volumes"][0]["size"] = volsize 33 | pos.data_dict["volume"]["pos_volumes"][1]["size"] = volsize 34 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 35 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 36 | # negative test Multiple invalid commands 37 | for nums in range(numvol): 38 | volname = f"tempvolpos{str(nums)}" 39 | status = pos.cli.volume_create( 40 | volumename=volname, array_name="array33", size=volsize 41 | ) 42 | assert status[0] == False 43 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 44 | logger.info(f"Expected failure for volume create due to {event_name}") 45 | # invalid array volume creation 46 | status = pos.cli.volume_mount(volumename=volname, array_name="array1") 47 | 48 | assert status[0] == False 49 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 50 | logger.info(f"Expected failure for volume mount due to {event_name}") 51 | #volume re-mount 52 | 53 | assert pos.cli.volume_list(array_name="array1")[0] == True 54 | for vol in pos.cli.vols: 55 | rlist = [i for i in range(10, 255)] 56 | newname = random_string(random.choice(rlist)) 57 | assert pos.cli.volume_info(array_name="array1", vol_name=vol)[0] == True 58 | assert ( 59 | pos.cli.volume_rename( 60 | new_volname=newname, volname=vol, array_name="array1" 61 | )[0] 62 | == True 63 | ) 64 | assert ( 65 | pos.cli.volume_unmount(volumename=newname, array_name="array1")[0] 66 | == True 67 | ) 68 | assert pos.cli.volume_info(array_name="array1", vol_name=newname)[0] == True 69 | assert ( 70 | pos.cli.volume_delete(volumename=newname, array_name="array1")[0] 71 | == True 72 | ) 73 | 74 | except Exception as e: 75 | logger.error(f" ======= Test FAILED due to {e} ========") 76 | assert 0 77 | 78 | 79 | @pytest.mark.sanity() 80 | def test_volumesanity257vols(array_fixture): 81 | array_name = "array1" 82 | try: 83 | pos = array_fixture 84 | pos.data_dict["volume"]["pos_volumes"][0]["num_vol"] = 256 85 | pos.data_dict["array"]["num_array"] = 1 86 | 87 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 88 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 89 | # negative test 90 | status = pos.cli.volume_create( 91 | volumename="invalidvol", array_name=array_name, size="1gb" 92 | ) 93 | assert status[0] == False 94 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 95 | logger.info(f"Expected failure for volume create due to {event_name}") 96 | 97 | except Exception as e: 98 | logger.error(f" ======= Test FAILED due to {e} ========") 99 | assert 0 100 | -------------------------------------------------------------------------------- /lib/proc.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | import re 35 | import time 36 | import logger 37 | 38 | 39 | log = logger.get_logger(__name__) 40 | 41 | 42 | class ExecutionError(RuntimeError): 43 | """Exception to signify an execution error""" 44 | 45 | pass 46 | 47 | 48 | class TimeOutError(ExecutionError): 49 | """Exception to signify a time-out on the CLI""" 50 | 51 | pass 52 | 53 | 54 | class Proc(object): 55 | """This class enables one to keep track of an async process, 56 | monitor its status and read the output later 57 | NOTE : An instance of this class is meant to run just one process, 58 | to run another processes, create another instance of Proc 59 | ** If the expected output is very large then the buf size needs 60 | to be adjusted appropriately. If memory is of concern, then it is 61 | necessary to redirect the output to a file 62 | """ 63 | 64 | def __init__(self, channel): 65 | """Initialization code. 66 | channe: session over which command to be executed 67 | """ 68 | 69 | self.channel = channel 70 | 71 | def is_complete(self): 72 | """Checks if the process has completed execution 73 | RETURNS : True / False 74 | usage: 75 | c1=Proc(channel) 76 | c1.is_comlete() 77 | 78 | """ 79 | return self.channel.exit_status_ready() 80 | 81 | def terminate(self): 82 | """Terminates the process and closes the interface 83 | c1=Proc(channel) 84 | c1.terminate() 85 | 86 | """ 87 | self.channel.close() 88 | 89 | def wait_for_completion(self, sleep=3, tolerance=200): 90 | """Polls until the completion of the process. Raises 91 | a TimeOutError exception if it times out. 92 | Returns : None 93 | *Note : This is a blocking call 94 | c1=Proc(channel) 95 | c1.wait_for_completion() 96 | """ 97 | attempts = 0 98 | while not self.is_complete() and attempts < tolerance: 99 | time.sleep(sleep) 100 | attempts += 1 101 | 102 | if attempts >= tolerance: 103 | raise TimeOutError("Timed out waiting for the process to complete") 104 | 105 | # TODO : Add support for large output 106 | def get_output(self): 107 | """Gets the output from the command executed 108 | RETURNS : String 109 | 110 | Usage: 111 | c1=Proc(channel) 112 | c1.get_output() 113 | """ 114 | buf = b"" 115 | while self.channel.recv_ready(): 116 | buf = self.channel.recv(6024) 117 | 118 | return buf 119 | -------------------------------------------------------------------------------- /testcase/array/test_hetero_multi_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import traceback 3 | 4 | from common_libs import * 5 | import logger 6 | 7 | logger = logger.get_logger(__name__) 8 | 9 | @pytest.mark.sanity 10 | def test_hetero_multi_array_max_size_volume(array_fixture): 11 | """ 12 | Test two RAID5 arrays using hetero devices, Create max size volume on each array. 13 | """ 14 | logger.info( 15 | " ==================== Test : test_hetero_multi_array_max_size_volume ================== " 16 | ) 17 | try: 18 | pos = array_fixture 19 | 20 | assert array_create_and_list(pos) == True 21 | assert volume_create_and_mount_multiple(pos, num_volumes=1) == True 22 | except Exception as e: 23 | logger.error(f"Test script failed due to {e}") 24 | traceback.print_exc() 25 | pos.exit_handler(expected=False) 26 | 27 | logger.info( 28 | " ============================= Test ENDs ======================================" 29 | ) 30 | 31 | @pytest.mark.sanity 32 | def test_hetero_degraded_array_create_delete_vols(array_fixture): 33 | """ 34 | Test two RAID5 arrays using hetero devices, Make if degraded and create and delete volume on each array. 35 | """ 36 | logger.info( 37 | " ==================== Test : test_hetero_degraded_array_create_delete_vols ================== " 38 | ) 39 | try: 40 | pos = array_fixture 41 | 42 | assert array_create_and_list(pos) == True 43 | 44 | # Hot Remove Disk 45 | for array_name in pos.cli.array_dict.keys(): 46 | data_dev_list = pos.cli.array_data[array_name]["data_list"] 47 | remove_drives = data_dev_list[:1] 48 | assert pos.target_utils.device_hot_remove(device_list=remove_drives) 49 | 50 | # Create and delete array from faulty array 51 | for array_name in pos.cli.array_dict.keys(): 52 | vol_size = "1G" 53 | vol_name = f"{array_name}_pos_vol" 54 | assert pos.cli.volume_create(vol_name, vol_size, 55 | array_name=array_name)[0] == True 56 | 57 | assert pos.cli.volume_delete(vol_name, array_name)[0] == True 58 | 59 | except Exception as e: 60 | logger.error(f"Test script failed due to {e}") 61 | traceback.print_exc() 62 | pos.exit_handler(expected=False) 63 | 64 | logger.info( 65 | " ============================= Test ENDs ======================================" 66 | ) 67 | 68 | @pytest.mark.sanity 69 | def test_hetero_degraded_array_unmount(array_fixture): 70 | """ 71 | Create two RAID5 arrays using hetero devices, Make one array degraded and unmount it. 72 | It should not impect the other array. 73 | """ 74 | logger.info( 75 | " ==================== Test : test_hetero_degraded_array_unmount ================== " 76 | ) 77 | try: 78 | pos = array_fixture 79 | 80 | assert array_create_and_list(pos) == True 81 | 82 | # Hot Remove Disk 83 | for array_name in pos.cli.array_dict.keys(): 84 | data_dev_list = pos.cli.array_data[array_name]["data_list"] 85 | remove_drives = data_dev_list[:1] 86 | assert pos.target_utils.device_hot_remove(device_list=remove_drives) 87 | 88 | # Unmount the degraded array 89 | assert pos.cli.array_unmount(array_name=array_name)[0] == True 90 | 91 | # Get the array info for both array 92 | for array_name in pos.cli.array_dict.keys(): 93 | assert pos.cli.array_info(array_name=array_name)[0] == True 94 | 95 | except Exception as e: 96 | logger.error(f"Test script failed due to {e}") 97 | traceback.print_exc() 98 | pos.exit_handler(expected=False) 99 | 100 | logger.info( 101 | " ============================= Test ENDs ======================================" 102 | ) 103 | 104 | 105 | def array_create_and_list(pos): 106 | try: 107 | # Loop 2 times to create two RAID array of RAID5 using hetero device 108 | for array_index in range(2): 109 | data_disk_req = {'mix': 2, 'any': 1} 110 | assert create_hetero_array(pos, "RAID5", data_disk_req, 111 | array_index=array_index, array_mount="WT", 112 | array_info=True) == True 113 | 114 | assert pos.cli.array_list()[0] == True 115 | except Exception as e: 116 | logger.error(f"Array create and list failed due to {e}") 117 | traceback.print_exc() 118 | return False 119 | return True 120 | 121 | -------------------------------------------------------------------------------- /lib/profiler.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | from node import SSHclient 35 | import logger 36 | import logger as logger_obj 37 | import time 38 | import datetime 39 | 40 | logger = logger.get_logger(__name__) 41 | 42 | 43 | import threading 44 | 45 | 46 | class ProfilerThread(threading.Thread): 47 | 48 | """ 49 | Purpose of class: 50 | ProfilerThread class having methods for running and killing background stats collection task 51 | """ 52 | 53 | def __init__(self, ssh_obj, sleep_interval=5): 54 | """ 55 | 56 | :param ssh_obj: ssh_object where remote execution needs to be done 57 | :param sleep_interval: Sleep interval in between stats collection 58 | """ 59 | super(ProfilerThread, self).__init__() 60 | self._kill = threading.Event() 61 | self._interval = sleep_interval 62 | self.ssh_obj = ssh_obj 63 | self.profiler_log_path = logger_obj.get_logpath() 64 | self.profiler_log = self.profiler_log_path + "/profiler.log" 65 | 66 | def run(self): 67 | """ 68 | 69 | :return: None 70 | Method: to start the stats collection work 71 | 72 | Usage: 73 | c1= ProfilerThread(ssh_obj) 74 | c1.start() 75 | Note: Call start method instead of run as start method internally will be calling run method 76 | 77 | """ 78 | while True: 79 | 80 | cpu_output = self.ssh_obj.execute("mpstat ") 81 | disk_output = self.ssh_obj.execute("iostat ") 82 | mem_output = self.ssh_obj.execute("free -m") 83 | with open(self.profiler_log, "a+") as log_handler: 84 | log_handler.write( 85 | "{}=============={}===============\n".format( 86 | str(datetime.datetime.now()), "Disk_usage" 87 | ) 88 | ) 89 | for lines in disk_output: 90 | log_handler.write(lines) 91 | 92 | log_handler.write( 93 | "{}=============={}===============\n".format( 94 | str(datetime.datetime.now()), "cpu_usage" 95 | ) 96 | ) 97 | for lines in cpu_output: 98 | log_handler.write(lines) 99 | 100 | log_handler.write( 101 | "{}=============={}===============\n".format( 102 | str(datetime.datetime.now()), "Mem_usage" 103 | ) 104 | ) 105 | for lines in mem_output: 106 | log_handler.write(lines) 107 | is_killed = self._kill.wait(self._interval) 108 | if is_killed: 109 | break 110 | 111 | def kill(self): 112 | 113 | """ 114 | 115 | :return:None 116 | method: To kill background stats collection task 117 | 118 | Usage: 119 | c1= ProfilerThread(ssh_obj) 120 | c1.kill() 121 | 122 | 123 | """ 124 | self._kill.set() 125 | 126 | 127 | """ 128 | if __name__=="__main__": 129 | logger.info("======================================Test profiler method===========================================") 130 | c1 = SSHclient("192.168.56.103", "test", "srib@123") 131 | z1 = ProfilerThread(c1) 132 | z1.start() 133 | time.sleep(10) 134 | z1.kill() 135 | 136 | """ 137 | -------------------------------------------------------------------------------- /testcase/array/test_array_states.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import time 3 | import random 4 | import os 5 | import json 6 | from common_libs import * 7 | 8 | # sys.path.insert(0, '../') 9 | # sys.path.insert(0, '/root/poseidon/ibot') 10 | import logger as logger 11 | 12 | from array_state import _Array as array 13 | 14 | logger = logger.get_logger(__name__) 15 | dir_path = os.path.dirname(os.path.realpath(__file__)) 16 | 17 | with open("{}/config.json".format(dir_path)) as f: 18 | config_dict = json.load(f) 19 | 20 | 21 | def pos_setup(pos, num_array, list_array_obj, data_dict): 22 | data_dict["array"]["num_array"] = 2 if num_array == 2 else 1 23 | assert pos.target_utils.bringup_array(data_dict = data_dict) == True 24 | assert pos.target_utils.bringup_volume(data_dict = data_dict) == True 25 | assert pos.cli.array_list()[0] == True 26 | assert pos.target_utils.get_subsystems_list() == True 27 | 28 | array_list = list(pos.cli.array_dict.keys()) 29 | for item in range(len(array_list)): 30 | list_array_obj.append( 31 | array(pos, 32 | array_name=array_list[item], 33 | data_dict=data_dict, 34 | cli_history=pos.cli.cli_history, 35 | ) 36 | ) 37 | list_array_obj[item].subsystem = pos.target_utils.ss_temp_list 38 | list_array_obj[item].func["param"]["pre_write"] = True 39 | 40 | 41 | @pytest.mark.parametrize("num_array", [1, 2]) 42 | def test_array_states(array_fixture, num_array): 43 | try: 44 | fio_command = "fio --ioengine=libaio --rw=write --bs=16384 --iodepth=256 --direct=0 --numjobs=1 --verify=pattern --verify_pattern=0x0c60df8108c141f6 --do_verify=1 --verify_dump=1 --verify_fatal=1 --group_reporting --log_offset=1 --size=5% --name=pos0 " 45 | pos = array_fixture 46 | list_array_obj = [] 47 | # step ::0 : variable initialization 48 | data_dict = pos.data_dict 49 | loop = 1 50 | # seed = 10 51 | seed = random.randint(1, 10) 52 | random.seed(seed) 53 | logger.info( 54 | "#################################################################################################" 55 | ) 56 | logger.info( 57 | "--------------------------------------- RANDOM SEED : {} ---------------------------------------".format( 58 | seed 59 | ) 60 | ) 61 | logger.info( 62 | "#################################################################################################" 63 | ) 64 | pos_setup(pos, num_array, list_array_obj, data_dict) 65 | # step ::1 : setup envirenment for POS 66 | 67 | # step ::2 : time setup 68 | start_time = time.time() 69 | run_time = int(config_dict["test_ArrayStates"]["runtime"]) 70 | end_time = start_time + (60 * run_time) 71 | logger.info("RunTime is {} minutes".format(run_time)) 72 | 73 | # step ::3 : run fio 74 | run_io(pos, fio_command=fio_command) 75 | 76 | while True: 77 | logger.info( 78 | "#################################################################################################" 79 | ) 80 | logger.info( 81 | "---------------------------------------- LOOP COUNT : {} ----------------------------------------".format( 82 | loop 83 | ) 84 | ) 85 | logger.info( 86 | "#################################################################################################" 87 | ) 88 | # step ::4 : select randomly the functions to be executed next and run 89 | for array_obj in list_array_obj: 90 | assert array_obj.select_next_state() == True 91 | assert array_obj.run_func(list_array_obj=list_array_obj) == True 92 | time.sleep(2) 93 | # step ::5 : verify that the array state has changed to the expected value 94 | for array_obj in list_array_obj: 95 | assert array_obj.check_next_state() == True 96 | # step ::6 : check the path executed and add the function to command history 97 | 98 | for array_obj in list_array_obj: 99 | assert array_obj.cmd_history(loop=loop) == True 100 | # step ::7 : check the runtime and system memory 101 | if loop % 10 == 1: 102 | # assert pos.client.check_system_memory() == True 103 | assert pos.target_utils.helper.check_system_memory() == True 104 | if time.time() > end_time: 105 | for array_obj in list_array_obj: 106 | assert array_obj.cmd_history(exit=True) == True 107 | 108 | break 109 | time_left = int((end_time - time.time()) / 60) 110 | logger.info( 111 | f"Remaining time for the test to be completed is {str(time_left)} minutes" 112 | ) 113 | loop += 1 114 | time.sleep(2) 115 | 116 | pos.exit_handler(expected=True) 117 | except Exception as e: 118 | logger.error(f"Test script failed due to {e}") 119 | if len(list_array_obj) > 0: 120 | for array_obj in list_array_obj: 121 | assert array_obj.cmd_history(exit=False) == True 122 | 123 | pos.exit_handler(expected=False) 124 | assert 0 125 | -------------------------------------------------------------------------------- /testcase/multiverse/test_multi_initiator_ops.py: -------------------------------------------------------------------------------- 1 | import pytest, json, sys, os, time, random 2 | 3 | from concurrent.futures import ( 4 | ProcessPoolExecutor, 5 | ThreadPoolExecutor, 6 | ALL_COMPLETED, 7 | FIRST_COMPLETED, 8 | FIRST_EXCEPTION, 9 | wait, 10 | ) 11 | 12 | from pos import POS 13 | import composable.vol_management as volmgmt 14 | import composable.io_management as iomgmt 15 | import composable.system_management as sysmgmt 16 | import composable.composable_core as libcore 17 | from composable.composable_core import _Data as data_mgmt 18 | 19 | import logger as logger 20 | logger = logger.get_logger(__name__) 21 | dir_path = os.path.dirname(os.path.realpath(__file__)) 22 | 23 | with open("{}/../config_files/multi_initiator_ops.json".format(dir_path)) as p: 24 | tc_dict = json.load(p) 25 | 26 | 27 | def setup_module(): 28 | global pos 29 | global data_dict 30 | pos = POS() 31 | data_dict = pos.data_dict 32 | 33 | 34 | def teardown_module(): 35 | pos.exit_handler(expected=True) 36 | 37 | 38 | def test_vol_lc_stress_io_stress_io_sanity_system_sanity_6_initiator(): 39 | try: 40 | if pos.client_cnt < 1: 41 | logger.info( 42 | "Skipping Test as Minimum one Initiator requirement did not match" 43 | ) 44 | pytest.skip("Test config not met") 45 | 46 | logger.info( 47 | f"Max Initiators Supported: 6, Current Initiators: {pos.client_cnt}" 48 | ) 49 | data_dict["system"]["phase"] = "true" 50 | data_dict["device"]["phase"] = "true" 51 | data_dict["array"]["num_array"] = 1 52 | data_dict["subsystem"]["phase"] = "false" 53 | data_dict["volume"]["phase"] = "false" 54 | 55 | assert pos.target_utils.pos_bring_up(data_dict) == True 56 | assert pos.cli.array_list()[0] == True 57 | array_list = list(pos.cli.array_dict.keys()) 58 | pos.cli.array_name = array_list[0] 59 | 60 | test_dict = tc_dict[ 61 | "test_vol_lc_stress_io_stress_io_sanity_system_sanity_6_initiator" 62 | ] 63 | test_object = { 64 | "vol_management": volmgmt, 65 | "io_management": iomgmt, 66 | "system_management": sysmgmt, 67 | } 68 | 69 | futures = [] 70 | executor = ThreadPoolExecutor() 71 | 72 | total_phase = test_dict["validation"]["totalphase"] 73 | total_time = test_dict["validation"]["totaltime"] 74 | por_phase = [] 75 | por_plus_loop = 0 76 | phase = 0 77 | 78 | if test_dict["validation"]["por"]["ibof"]["npor"]["valid"]: 79 | por_phase = test_dict["validation"]["por"]["ibof"]["npor"]["phase"].split( 80 | "," 81 | ) 82 | por_plus_loop = len(por_phase) 83 | 84 | total_phase = total_phase + por_plus_loop 85 | time_per_phase = total_time / total_phase 86 | 87 | data_set = [] 88 | 89 | for cn in range(test_dict["config"]["initiator"]): 90 | client_seed = cn 91 | data_set.append(data_mgmt(client_seed)) 92 | 93 | for idx in range(1, total_phase + 1): 94 | if str(idx) in por_phase: 95 | start_time = time.time() 96 | npo_cnt = 1 97 | while True: 98 | assert pos.target_utils.npor() == True 99 | #for cn in range(test_dict["config"]["initiator"]): 100 | # assert libcore.npor_recover(target=pos, 101 | # data_set=data_set[cn]) == True 102 | current_time = time.time() 103 | running_time = current_time - start_time 104 | if running_time >= time_per_phase: 105 | break 106 | npo_cnt += 1 107 | 108 | else: 109 | 110 | for cn in range(test_dict["config"]["initiator"]): 111 | futures.append( 112 | executor.submit( 113 | getattr( 114 | test_object[ 115 | test_dict["validation"]["testcase"][cn]["lib"] 116 | ], 117 | test_dict["validation"]["testcase"][cn]["name"], 118 | ), 119 | target=pos, 120 | client=pos.client_handle[cn], 121 | phase=phase, 122 | data_set=data_set[cn], 123 | Time=time_per_phase, 124 | ) 125 | ) 126 | logger.info( 127 | "#Test case : {} / {}".format( 128 | test_dict["validation"]["testcase"][cn]["lib"], 129 | test_dict["validation"]["testcase"][cn]["name"], 130 | ) 131 | ) 132 | done, not_done = wait(futures, return_when=FIRST_EXCEPTION) 133 | if len(done) != 0: 134 | raise_proc = done.pop() 135 | if raise_proc.exception() is not None: 136 | raise raise_proc.exception() 137 | phase += 1 138 | 139 | assert pos.target_utils.helper.check_system_memory() == True 140 | 141 | for cn in range(test_dict["config"]["initiator"]): 142 | if pos.client_handle[cn].ctrlr_list()[1] is not None: 143 | assert ( 144 | pos.client_handle[cn].nvme_disconnect(pos.target_utils.ss_temp_list) 145 | == True 146 | ) 147 | 148 | assert pos.cli.array_list()[0] == True 149 | array_list = list(pos.cli.array_dict.keys()) 150 | if len(array_list) == 0: 151 | logger.info("No array found in the config") 152 | else: 153 | for array in array_list: 154 | assert pos.cli.array_info(array_name=array)[0] == True 155 | if pos.cli.array_dict[array].lower() == "mounted": 156 | assert pos.cli.array_unmount(array_name=array)[0] == True 157 | 158 | except Exception as e: 159 | pos.exit_handler(expected=False) 160 | assert 0 161 | -------------------------------------------------------------------------------- /testcase/array/test_array_disk_replace_sanity.py: -------------------------------------------------------------------------------- 1 | from time import sleep 2 | import pytest 3 | 4 | from common_libs import * 5 | 6 | import logger 7 | logger = logger.get_logger(__name__) 8 | 9 | @pytest.mark.sanity 10 | def test_noraid_array_disk_replace(array_fixture): 11 | """ 12 | The purpose of this test is to create a NO-RAID array with 1 data drive. 13 | Verification: POS CLI 14 | """ 15 | logger.info( 16 | f" ==================== Test : test_noraid_array_disk_replace ================== " 17 | ) 18 | pos = array_fixture 19 | try: 20 | raid_type, data_disk = "no-raid", 1 21 | 22 | assert single_array_data_setup(pos.data_dict, raid_type, 23 | data_disk, 0, "WT", False) == True 24 | 25 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 26 | 27 | array_name = pos.data_dict['array']["pos_array"][0]["array_name"] 28 | assert pos.cli.array_info(array_name=array_name)[0] == True 29 | data_disk_list = pos.cli.array_data[array_name]["data_list"] 30 | 31 | # The command is expected to fail. 32 | status = pos.cli.array_replace_disk(data_disk_list[0], array_name) 33 | assert status[0] == False 34 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 35 | logger.info(f"Expected failure for array replace disk due to {event_name}") 36 | 37 | logger.info( 38 | " ============================= Test ENDs ======================================" 39 | ) 40 | except Exception as e: 41 | logger.error(f"Test script failed due to {e}") 42 | pos.exit_handler(expected=False) 43 | 44 | @pytest.mark.sanity 45 | @pytest.mark.parametrize("raid_type", ["RAID10", "RAID5", "RAID6" ,"RAID0"]) 46 | def test_no_spare_array_disk_replace(array_fixture, raid_type): 47 | """ 48 | The purpose of this test is to create a array of RAID5/6/10 with minimum 49 | required data drive and 0 spare drive. Create Volume and Run IO. 50 | Do Disk Replace - The command should fail 51 | Verification: POS CLI 52 | """ 53 | logger.info( 54 | f" ==================== Test : test_no_spare_array_disk_replace[{raid_type}] ================== " 55 | ) 56 | pos = array_fixture 57 | try: 58 | data_disk = RAID_MIN_DISK_REQ_DICT[raid_type] 59 | array_cap_volumes = [(4, 100), (8, 100), (32, 100)] 60 | 61 | assert single_array_data_setup(pos.data_dict, raid_type, 62 | data_disk, 0, "WT", False) == True 63 | 64 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 65 | 66 | assert pos.target_utils.get_subsystems_list() == True 67 | subs_list = pos.target_utils.ss_temp_list 68 | 69 | assert pos.cli.array_list()[0] == True 70 | array_list = list(pos.cli.array_dict.keys()) 71 | 72 | assert volume_create_and_mount_random(pos, array_list=array_list, 73 | subsyste_list=subs_list, arr_cap_vol_list=array_cap_volumes) == True 74 | 75 | assert vol_connect_and_run_random_io(pos, subs_list, size='10g') == True 76 | 77 | for array_name in array_list: 78 | assert pos.cli.array_info(array_name=array_name)[0] == True 79 | data_disk_list = pos.cli.array_data[array_name]["data_list"] 80 | 81 | # The command is expected to fail. 82 | status = pos.cli.array_replace_disk(data_disk_list[0], array_name) 83 | assert status[0] == False 84 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 85 | logger.info(f"Expected failure for array replace disk due to {event_name}") 86 | if raid_type == "RAID0": 87 | assert status[1]['output']['Response']['result']['status'][ 88 | 'eventName'] == "REPLACE_DEV_UNSUPPORTED_RAID_TYPE" 89 | 90 | logger.info( 91 | " ============================= Test ENDs ======================================" 92 | ) 93 | except Exception as e: 94 | logger.error(f"Test script failed due to {e}") 95 | pos.exit_handler(expected=False) 96 | 97 | test_quick_rebuild = { 98 | "t0" : ("RAID5", "WB", True, 256), # RAID Type, Mount Type, Auto Create, Num Vols 99 | "t1" : ("RAID10", "WB", False, 2), 100 | "t2" : ("RAID6", "WT", False, 2), 101 | } 102 | 103 | @pytest.mark.sanity 104 | @pytest.mark.parametrize("test_param", test_quick_rebuild) 105 | def test_array_data_disk_replace(array_fixture, test_param): 106 | """ 107 | The purpose of this test is to create a array of RAID5/RAID6/RAID10 with 108 | minimum required data drive and 2 spare drive. Create volumes and Run IO. 109 | During IO pefrom data Disk Replacement and verify disk is replaced. 110 | Verification: POS CLI 111 | """ 112 | logger.info( 113 | f" ==================== Test : test_array_data_disk_replace[{test_param}] ================== " 114 | ) 115 | try: 116 | pos = array_fixture 117 | raid_type, mount_type, auto_create, num_vols = test_quick_rebuild[test_param] 118 | 119 | data_disk = RAID_MIN_DISK_REQ_DICT[raid_type] 120 | 121 | assert single_array_data_setup(pos.data_dict, raid_type, 122 | data_disk, 2, mount_type, auto_create) == True 123 | 124 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 125 | 126 | assert pos.target_utils.get_subsystems_list() == True 127 | subs_list = pos.target_utils.ss_temp_list 128 | 129 | assert pos.cli.array_list()[0] == True 130 | array_list = list(pos.cli.array_dict.keys()) 131 | 132 | assert volume_create_and_mount_multiple(pos, num_vols, 133 | array_list=array_list, mount_vols=True, subs_list=subs_list) == True 134 | 135 | ip_addr = pos.target_utils.helper.ip_addr[0] 136 | for nqn in subs_list: 137 | assert pos.client.nvme_connect(nqn, ip_addr, "1158") == True 138 | 139 | assert pos.client.nvme_list() == True 140 | nvme_devs = pos.client.nvme_list_out 141 | 142 | fio_cmd = "fio --name=seq_write --ioengine=libaio --rw=write --iodepth=64 --bs=128k "\ 143 | "--size=100gb --do_verify=1 --verify=pattern --verify_pattern=0x5678" 144 | out, async_block_io = pos.client.fio_generic_runner( 145 | nvme_devs, fio_user_data=fio_cmd, run_async=True) 146 | assert out == True 147 | 148 | logger.info("Async IO Started... Wait for 5 minutes") 149 | time.sleep(300) 150 | 151 | # Array disk replace 152 | assert array_disk_remove_replace(pos, array_list, replace=True, 153 | verify_rebuild=True, verify_disk=True) == True 154 | 155 | assert wait_sync_fio([], nvme_devs, None, async_block_io) == True 156 | 157 | logger.info( 158 | " ============================= Test ENDs ======================================" 159 | ) 160 | except Exception as e: 161 | logger.error(f"Test script failed due to {e}") 162 | pos.exit_handler(expected=False) 163 | 164 | 165 | -------------------------------------------------------------------------------- /testcase/cli/test_uuid_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | import logger 3 | 4 | logger = logger.get_logger(__name__) 5 | 6 | @pytest.mark.sanity 7 | def test_verify_new_array_uuid(array_fixture): 8 | ''' 9 | the purpose of the test is to verify uuid of an array 10 | delete exiting array and create new array with same name 11 | ''' 12 | try: 13 | logger.info( 14 | f" ============== Test : start of test_verify_new_array_uuid =============" 15 | ) 16 | pos = array_fixture 17 | #creating array 18 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 19 | assert pos.cli.array_list()[0] == True 20 | array_name = list(pos.cli.array_dict.keys())[0] 21 | assert pos.cli.array_info(array_name=array_name)[0] == True 22 | assert pos.cli.array_info(array_name)[0] == True 23 | array_uuid1 = pos.cli.array_data[array_name] 24 | #verifying uuid of an array is not equal to zero 25 | assert pos.cli.array_data[array_name]["uniqueId"] != 0 26 | logger.info(f" Array unique id : ", array_uuid1["uniqueId"]) 27 | logger.info(array_uuid1["uniqueId"]) 28 | #deleting both arrays created 29 | array_name = "array1" 30 | assert pos.cli.array_unmount(array_name=array_name)[0] == True 31 | assert pos.cli.array_delete(array_name=array_name)[0] == True 32 | array_name = "array2" 33 | assert pos.cli.array_unmount(array_name=array_name)[0] == True 34 | assert pos.cli.array_delete(array_name=array_name)[0] == True 35 | #creating array 36 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 37 | assert pos.cli.array_list()[0] == True 38 | array_name = list(pos.cli.array_dict.keys())[0] 39 | assert pos.cli.array_info(array_name=array_name)[0] == True 40 | assert pos.cli.array_info(array_name)[0] == True 41 | array_uuid2 = pos.cli.array_data[array_name] 42 | #verifying uuid of an array is not equal to zero 43 | assert pos.cli.array_data[array_name]["uniqueId"] != 0 44 | logger.info(f" Array unique id : ", array_uuid2["uniqueId"]) 45 | logger.info(array_uuid2["uniqueId"]) 46 | #verifying old and new array's uuid should not match 47 | assert array_uuid1 != array_uuid2 48 | logger.info( 49 | f" ============== Test : end of test_verify_new_array_uuid =============" 50 | ) 51 | 52 | except Exception as e: 53 | logger.error(f"Test script failed due to {e}") 54 | pos.exit_handler(expected=False) 55 | 56 | @pytest.mark.sanity 57 | def test_array_uuid(array_fixture): 58 | ''' 59 | the purpose of the test is to list array uuid 60 | ''' 61 | try: 62 | logger.info( 63 | f" ============== Test : start of test_array_uuid =============" 64 | ) 65 | pos = array_fixture 66 | #creating array 67 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 68 | assert pos.cli.array_list()[0] == True 69 | array_name = list(pos.cli.array_dict.keys())[0] 70 | assert pos.cli.array_info(array_name=array_name)[0] == True 71 | assert pos.cli.array_info(array_name)[0] == True 72 | array_uuid = pos.cli.array_data[array_name] 73 | logger.info(f" Array unique id : ", array_uuid["uniqueId"]) 74 | logger.info(array_uuid["uniqueId"]) 75 | #verifying uuid of an array is not equal to zero 76 | assert pos.cli.array_data[array_name]["uniqueId"] != 0 77 | logger.info( 78 | f" ============== Test : end of test_array_uuid =============" 79 | ) 80 | 81 | except Exception as e: 82 | logger.error(f"Test script failed due to {e}") 83 | pos.exit_handler(expected=False) 84 | 85 | 86 | @pytest.mark.sanity 87 | def test_volume_uuid(volume_fixture): 88 | ''' 89 | the purpose of the test is to list volume uuid 90 | ''' 91 | try: 92 | logger.info( 93 | f" ============== Test : start of test_volume_uuid =============" 94 | ) 95 | pos = volume_fixture 96 | assert pos.cli.array_list()[0] == True 97 | array_name = list(pos.cli.array_dict.keys())[0] 98 | #creating volume 99 | assert pos.cli.volume_create(array_name=array_name,volumename="vol1",size='1gb')[0] == True 100 | assert pos.cli.volume_mount(array_name=array_name,volumename="vol1")[0] == True 101 | assert pos.cli.volume_info(array_name=array_name, vol_name="vol1")[0] == True 102 | #verifying uuid of an volume is not equal to zero 103 | assert pos.cli.volume_data[array_name]["vol1"]["uuid"] != 0 104 | volume_uuid = pos.cli.volume_data[array_name]["vol1"]["uuid"] 105 | logger.info(f" Volume unique id : ", volume_uuid) 106 | logger.info(volume_uuid) 107 | logger.info( 108 | f" ============== Test : end of test_volume_uuid =============" 109 | ) 110 | 111 | except Exception as e: 112 | logger.error(f"Test script failed due to {e}") 113 | pos.exit_handler(expected=False) 114 | 115 | @pytest.mark.sanity 116 | def test_volume_uuid_of_two_array(volume_fixture): 117 | ''' 118 | the purpose of the test is to verify uuid of an volume 119 | creating volume with same name on two arrays 120 | ''' 121 | try: 122 | logger.info( 123 | f" ============== Test : start of test_volume_uuid_of_two_array =============" 124 | ) 125 | pos = volume_fixture 126 | assert pos.cli.array_list()[0] == True 127 | array_name1 = list(pos.cli.array_dict.keys())[0] 128 | #create vol1 on array1 129 | assert pos.cli.volume_create(array_name=array_name1,volumename="vol1",size='1gb')[0] == True 130 | assert pos.cli.volume_mount(array_name=array_name1,volumename="vol1")[0] == True 131 | assert pos.cli.volume_info(array_name=array_name1, vol_name="vol1")[0] == True 132 | #verifying uuid of an volume is not equal to zero 133 | assert pos.cli.volume_data[array_name1]["vol1"]["uuid"] != 0 134 | volume_uuid1 = pos.cli.volume_data[array_name1]["vol1"]["uuid"] 135 | logger.info(f" Volume unique id : ", volume_uuid1) 136 | logger.info(volume_uuid1) 137 | assert pos.cli.array_list()[0] == True 138 | array_name2 = list(pos.cli.array_dict.keys())[1] 139 | #create vol1 on array2 140 | assert pos.cli.volume_create(array_name=array_name2,volumename="vol1",size='1gb')[0] == True 141 | assert pos.cli.volume_mount(array_name=array_name2,volumename="vol1")[0] == True 142 | assert pos.cli.volume_info(array_name=array_name2, vol_name="vol1")[0] == True 143 | #verifying uuid of an volume is not equal to zero 144 | assert pos.cli.volume_data[array_name2]["vol1"]["uuid"] != 0 145 | volume_uuid2 = pos.cli.volume_data[array_name2]["vol1"]["uuid"] 146 | logger.info(f" Volume unique id : ", volume_uuid2) 147 | logger.info(volume_uuid2) 148 | #verifying uuid of array1 vol1 and array2 vol1 should not match 149 | assert volume_uuid1 != volume_uuid2 150 | logger.info( 151 | f" ============== Test : end of test_volume_uuid_of_two_array =============" 152 | ) 153 | 154 | except Exception as e: 155 | logger.error(f"Test script failed due to {e}") 156 | pos.exit_handler(expected=False) -------------------------------------------------------------------------------- /testcase/array/test_array_sanity.py: -------------------------------------------------------------------------------- 1 | from array import array 2 | import pytest 3 | import random 4 | from pos import POS 5 | from common_libs import * 6 | import json 7 | import os 8 | import time 9 | 10 | dir_path = os.path.dirname(os.path.realpath(__file__)) 11 | 12 | with open("{}/config.json".format(dir_path)) as f: 13 | config_dict = json.load(f) 14 | 15 | 16 | import logger 17 | logger = logger.get_logger(__name__) 18 | 19 | raid = { 20 | "RAID0": {"spare": 0, "data": 2}, 21 | "RAID10": {"spare": 2, "data": 2}, 22 | "no-raid": {"spare": 0, "data": 1}, 23 | "RAID5": {"spare": 1, "data": 3}, 24 | "RAID6": {"spare": 2, "data": 4}, 25 | } 26 | 27 | 28 | def array_ops(pos): 29 | arrayname = pos.data_dict["array"]["pos_array"][0]["array_name"] 30 | assert pos.cli.array_info(array_name=arrayname)[0] == True 31 | array_data = pos.cli.array_data[arrayname] 32 | if array_data["data_raid"].lower() not in ["raid0", "no-raid"]: 33 | disklist = [random.choice(array_data["data_list"])] 34 | assert pos.target_utils.device_hot_remove(disklist) == True 35 | 36 | if(len(array_data["spare_list"]) > 0): 37 | assert pos.cli.array_unmount(array_name=arrayname)[0] == False 38 | assert pos.cli.array_delete(array_name=arrayname)[0] == False 39 | assert pos.target_utils.array_rebuild_wait(array_name=arrayname) == True 40 | 41 | assert pos.cli.device_scan()[0] == True 42 | assert pos.cli.array_list()[0] == True 43 | for array_name in list(pos.cli.array_dict.keys()): 44 | assert pos.cli.array_info(array_name=array_name)[0] == True 45 | if pos.cli.array_dict[array_name].lower() == "mounted": 46 | assert pos.cli.array_unmount(array_name=array_name)[0] == True 47 | 48 | assert pos.cli.array_delete(array_name=array_name)[0] == True 49 | assert pos.cli.array_list()[0] == True 50 | return True 51 | 52 | 53 | def negative_tests(pos): 54 | assert pos.cli.device_list()[0] == True 55 | array_raid = pos.data_dict["array"]["pos_array"][0]["raid_type"] 56 | status = pos.cli.array_autocreate(array_name="array2", 57 | num_data=raid[array_raid]["data"], 58 | num_spare=raid[array_raid]["spare"], 59 | buffer_name=pos.cli.dev_type["NVRAM"][1], 60 | raid_type=random.choice(list(raid.keys()))) 61 | assert status[0] == False 62 | #event_name = status[1]['output']['Response']['result']['status']['eventName'] 63 | logger.info(f"Expected failure for autocreate array") 64 | 65 | 66 | for array in ["array1", "array2"]: 67 | writechoice = random.choice([True, False]) 68 | status = pos.cli.array_mount(array_name=array, write_back=writechoice) 69 | assert status[0] == False 70 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 71 | logger.info(f"Expected failure for array mount due to {event_name}") 72 | status = pos.cli.array_delete(array_name=array) 73 | assert status[0] == False 74 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 75 | logger.info(f"Expected failure for array delete due to {event_name}") 76 | return True 77 | 78 | 79 | @pytest.mark.sanity 80 | def test_SanityArray(array_fixture): 81 | try: 82 | start_time = time.time() 83 | run_time = int(config_dict["test_ArraySanity"]["runtime"]) 84 | end_time = start_time + (60 * run_time) 85 | logger.info("RunTime is {} minutes".format(run_time)) 86 | counter = 0 87 | while True: 88 | counter += 1 89 | logger.info(f"Iteration {counter} Started") 90 | pos = array_fixture 91 | pos_array = pos.data_dict["array"]["pos_array"] 92 | 93 | array1_raid = random.choice(list(raid.keys())) 94 | array2_raid = random.choice(list(raid.keys())) 95 | 96 | pos_array[0]["raid_type"] = array1_raid 97 | pos_array[1]["raid_type"] = array2_raid 98 | 99 | pos_array[0]["write_back"] = random.choice([True, False]) 100 | pos_array[1]["write_back"] = random.choice([True, False]) 101 | 102 | pos_array[0]["data_device"] = raid[array1_raid]["data"] 103 | pos_array[1]["data_device"] = raid[array2_raid]["data"] 104 | 105 | pos_array[0]["spare_device"] = raid[array1_raid]["spare"] 106 | pos_array[1]["spare_device"] = raid[array2_raid]["spare"] 107 | 108 | pos_volume = pos.data_dict["volume"]["pos_volumes"] 109 | pos_volume[0]["num_vol"] = random.randint(1, 256) 110 | pos_volume[1]["num_vol"] = random.randint(1, 256) 111 | 112 | por = random.choice([True]) 113 | logger.info(pos.data_dict) 114 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 115 | assert pos.target_utils.bringup_volume(data_dict=pos.data_dict) == True 116 | run_io(pos) 117 | assert pos.cli.array_list()[0] == True 118 | array_name = list(pos.cli.array_dict.keys())[0] 119 | assert pos.cli.array_info(array_name=array_name)[0] == True 120 | if pos_array[0]["raid_type"] not in ["RAID0", "no-raid"]: 121 | spare_disk_name = pos.cli.array_data["array1"]["data_list"][0] 122 | status = pos.cli.array_addspare(array_name=array_name, 123 | device_name=spare_disk_name) 124 | assert status[0] == False 125 | event_name = status[1]['output']['Response']['result']['status']['eventName'] 126 | logger.info(f"Expected failure for add spare due to {event_name}") 127 | assert pos.cli.device_list()[0] == True 128 | assert pos.cli.array_addspare(array_name=array_name, 129 | device_name=pos.cli.system_disks[0])[0] == True 130 | 131 | ## Create3rd array/ duplicate array 132 | negative_tests(pos) 133 | if por == True: 134 | logger.info("Performing SPOR") 135 | pos.target_utils.spor() 136 | else: 137 | logger.info("Performing NPOR") 138 | pos.target_utils.npor() 139 | 140 | array_ops(pos) 141 | if time.time() > end_time: 142 | logger.info("Test completed") 143 | break 144 | time_left = int((end_time - time.time()) / 60) 145 | logger.info( 146 | f"Remaining time for the test to be completed is {str(time_left)} minutes" 147 | ) 148 | time.sleep(2) 149 | logger.info(f"Iteration {counter} Completed ") 150 | except Exception as e: 151 | logger.error(f"Test failed due to {e}") 152 | pos.exit_handler(expected=False) 153 | 154 | 155 | @pytest.mark.sanity 156 | def test_Create_Array_alldrives(array_fixture): 157 | try: 158 | pos = array_fixture 159 | 160 | assert pos.cli.device_list()[0] == True 161 | 162 | # Minimum Required Uram = Num Of Disk * 128MB + 512MB 163 | # Uram size in calculated in MB 164 | uram_size = (int(pos.data_dict["device"]["uram"][0]["bufer_size"]) 165 | * int(pos.data_dict["device"]["uram"][0]["strip_size"])) 166 | if ((len(pos.cli.dev_type["SSD"]) * 128 + 512) < uram_size): 167 | pytest.skip("Minimum uram size requirement is not met") 168 | 169 | assert pos.cli.array_create( 170 | array_name=pos.data_dict["array"]["pos_array"][0]["array_name"], 171 | data=pos.cli.dev_type["SSD"], 172 | write_buffer=pos.data_dict["device"]["uram"][0]["uram_name"], 173 | raid_type="RAID5", spare=[])[0] == True 174 | except Exception as e: 175 | logger.error("Test case failed due to {e}") 176 | assert 0 177 | -------------------------------------------------------------------------------- /testcase/array/test_raid6_array_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | from common_libs import * 4 | 5 | import logger 6 | logger = logger.get_logger(__name__) 7 | 8 | @pytest.mark.sanity 9 | @pytest.mark.parametrize("array_mount", ["WT", "WB"]) 10 | def test_create_raid6_array(array_fixture, array_mount): 11 | """ 12 | The purpose of this test is to create RAID 6 array with different data disk and spare disk. 13 | It includes the positive and negative test. 14 | Verification: POS CLI - Create Array Mount Array and List Array command. 15 | """ 16 | logger.info( 17 | f" ==================== Test : test_create_raid6_array[{array_mount}] ================== " 18 | ) 19 | pos = array_fixture 20 | try: 21 | assert pos.cli.device_list()[0] == True 22 | system_disks = pos.cli.system_disks 23 | 24 | array_disks = [(4, 0), (4, 1), (4, 2), (8, 2), (16, 2), (3, 0), (2, 2)] 25 | for data_disk, spare_disk in array_disks: 26 | if (data_disk + spare_disk) > len(system_disks): 27 | logger.warning("Insufficient system disks to test array create") 28 | continue 29 | 30 | exp_res = False if data_disk < RAID6_MIN_DISKS else True 31 | 32 | auto_create = False 33 | assert single_array_data_setup(pos.data_dict, "RAID6", data_disk, 34 | spare_disk, array_mount, auto_create) == True 35 | 36 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == exp_res 37 | 38 | if exp_res: 39 | assert array_unmount_and_delete(pos) == True 40 | logger.info( 41 | " ============================= Test ENDs ======================================" 42 | ) 43 | except Exception as e: 44 | logger.error(f"Test script failed due to {e}") 45 | pos.exit_handler(expected=False) 46 | 47 | @pytest.mark.sanity 48 | @pytest.mark.parametrize("array_mount", ["WT", "WB"]) 49 | def test_auto_create_raid6_array(array_fixture, array_mount): 50 | """ 51 | The purpose of this test is to create RAID 6 array with different data disk and spare disk. 52 | It includes the positive and negative test. 53 | Verification: POS CLI - Create Array Mount Array and List Array command. 54 | """ 55 | logger.info( 56 | f" ==================== Test : test_auto_create_raid6_array[{array_mount}] ================== " 57 | ) 58 | pos = array_fixture 59 | try: 60 | assert pos.cli.device_list()[0] == True 61 | system_disks = pos.cli.system_disks 62 | 63 | array_disks = [(4, 0), (4, 1), (4, 2), (3, 2), (2, 2)] 64 | for data_disk, spare_disk in array_disks: 65 | if (data_disk + spare_disk) > len(system_disks): 66 | logger.warning("Insufficient system disks to test array create") 67 | continue 68 | 69 | exp_res = False if data_disk < RAID6_MIN_DISKS else True 70 | 71 | auto_create = True 72 | assert single_array_data_setup(pos.data_dict, "RAID6", data_disk, 73 | spare_disk, array_mount, auto_create) == True 74 | 75 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == exp_res 76 | 77 | if exp_res: 78 | array_name = pos.data_dict["array"]["pos_array"][0]["array_name"] 79 | assert pos.cli.array_info(array_name=array_name)[0] == True 80 | 81 | assert array_unmount_and_delete(pos) == True 82 | logger.info( 83 | " ============================= Test ENDs ======================================" 84 | ) 85 | except Exception as e: 86 | logger.error(f"Test script failed due to {e}") 87 | pos.exit_handler(expected=False) 88 | 89 | 90 | @pytest.mark.sanity 91 | @pytest.mark.parametrize("array_mount", ["WT", "WB"]) 92 | def test_array_cap_with_volumes(array_fixture, array_mount): 93 | """ 94 | The purpose of this test is to create RAID 6 array with different volumes and utilize its capacity. 95 | Verification: POS CLI - Array - Create, Mount, and List: Volume - Create, Mount, List 96 | """ 97 | logger.info( 98 | f" ==================== Test : test_raid6_array_cap_with_volumes[{array_mount}] ================== " 99 | ) 100 | pos = array_fixture 101 | try: 102 | assert pos.cli.device_list()[0] == True 103 | if len(pos.cli.system_disks) < RAID6_MIN_DISKS: 104 | pytest.skip("Less number of data disk") 105 | 106 | auto_create = False 107 | assert single_array_data_setup(pos.data_dict, "RAID6", RAID6_MIN_DISKS, 108 | 0, array_mount, auto_create) == True 109 | 110 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 111 | 112 | assert pos.cli.array_list()[0] == True 113 | array_list = list(pos.cli.array_dict.keys()) 114 | 115 | assert pos.target_utils.get_subsystems_list() == True 116 | subsyste_list = pos.target_utils.ss_temp_list 117 | 118 | array_cap_volumes = [(1, 50), (1, 100), (1, 105), (50, 105), (256, 100), (257, 100)] 119 | 120 | for num_volumes, cap_utilize in array_cap_volumes: 121 | assert volume_create_and_mount_multiple(pos, num_volumes, cap_utilize, 122 | array_list=array_list, subs_list=subsyste_list) == True 123 | 124 | assert volume_unmount_and_delete_multiple(pos, array_list) == True 125 | logger.info( 126 | " ============================= Test ENDs ======================================" 127 | ) 128 | except Exception as e: 129 | logger.error(f"Test script failed due to {e}") 130 | pos.exit_handler(expected=False) 131 | 132 | 133 | 134 | @pytest.mark.sanity 135 | @pytest.mark.parametrize("array_mount", ["WT", "WB"]) 136 | @pytest.mark.parametrize("num_vols", [8]) 137 | def test_raid6_array_vols_data_integrity(array_fixture, array_mount, num_vols): 138 | """ 139 | The purpose of this test is to create one raid6 array mounted in WT and WB. 140 | Create and mount 8 volumes and utilize its full capacity. Run multiple FIO 141 | of File and Block IO on each Volume. And Verify the data integrify. 142 | 143 | Verification: Data Integrity on Multiple Volumes 144 | """ 145 | logger.info( 146 | f" ==================== Test : test_raid6_array_vols_data_integrity[{array_mount}-{num_vols}] ================== " 147 | ) 148 | pos = array_fixture 149 | try: 150 | num_data_disk, num_spare_disk = RAID6_MIN_DISKS, 2 151 | assert pos.cli.device_list()[0] == True 152 | if len(pos.cli.system_disks) < (num_data_disk + num_spare_disk): 153 | pytest.skip("Less number of system disk") 154 | 155 | assert single_array_data_setup(pos.data_dict, "RAID6", num_data_disk, 156 | num_spare_disk, array_mount, False) == True 157 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 158 | 159 | assert volume_create_and_mount_multiple(pos, num_vols) == True 160 | 161 | assert pos.target_utils.get_subsystems_list() == True 162 | subs_list = pos.target_utils.ss_temp_list 163 | ip_addr = pos.target_utils.helper.ip_addr[0] 164 | for nqn in subs_list: 165 | assert pos.client.nvme_connect(nqn, ip_addr, "1158") == True 166 | 167 | fio_cmd = "fio --name=wt_verify --ioengine=libaio --rw=write --iodepth=64 --bs=128k"\ 168 | " --size=2gb --do_verify=1 --verify=pattern --verify_pattern=0x5678" 169 | assert run_fio_all_volumes(pos, fio_cmd=fio_cmd, fio_type="mix") == True 170 | 171 | logger.info( 172 | " ============================= Test ENDs ======================================" 173 | ) 174 | except Exception as e: 175 | logger.error(f"Test script failed due to {e}") 176 | pos.exit_handler(expected=False) 177 | -------------------------------------------------------------------------------- /lib/pos_config.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | import logger 35 | import json 36 | import os 37 | from traceback import print_exc 38 | from datetime import datetime 39 | 40 | logger = logger.get_logger(__name__) 41 | 42 | 43 | class POS_Config: 44 | def __init__(self, ssh_obj, file_name="pos.conf", file_path="/etc/pos/") -> None: 45 | self.ssh_obj = ssh_obj 46 | self.file_name = file_name 47 | self.file_path = file_path 48 | self.file_data = None # Config File Data in JSON format 49 | self.file_data_org = None # Store Files Data original copy 50 | self.file_modified = False # Mark ture if config file is update. 51 | 52 | def load_config(self) -> bool: 53 | try: 54 | cmd = f"cat {self.file_path}{self.file_name}" 55 | out = self.ssh_obj.execute(command=cmd, expected_exit_code=0) 56 | 57 | config_data = "".join(out).strip() 58 | 59 | self.file_data = json.loads(config_data) 60 | self.file_data_org = self.file_data 61 | 62 | # logger.debug("Config file data {}.".format(type(self.file_data))) 63 | return True 64 | except Exception as e: 65 | logger.error(f"Load config failed. Error: '{e}'") 66 | print_exc() 67 | return False 68 | 69 | def _dump_config_data(self, data: str) -> bool: 70 | try: 71 | cmd = f"echo '' > {self.file_path}{self.file_name}" 72 | self.ssh_obj.execute(command=cmd, expected_exit_code=0) 73 | 74 | for line in data.split("\n"): 75 | cmd = f"echo '{line}' >> {self.file_path}{self.file_name}" 76 | self.ssh_obj.execute(command=cmd, expected_exit_code=0) 77 | 78 | return True 79 | except Exception as e: 80 | logger.error(f"Load config failed. Error: '{e}'") 81 | print_exc() 82 | return False 83 | 84 | def _copy_config_data(self, data: str) -> bool: 85 | try: 86 | src_file_name = ( 87 | f'temp_{datetime.now().strftime("%Y_%m_%H_%M")}_pos_conf.json' 88 | ) 89 | with open(src_file_name, "w") as fp: 90 | fp.write(f"{data}\n") 91 | 92 | dst_file_name = f"{self.file_path}{self.file_name}" 93 | self.ssh_obj.file_transfer( 94 | src_file_name, dst_file_name, move_to_local=False 95 | ) 96 | 97 | os.remove(src_file_name) 98 | return True 99 | except Exception as e: 100 | logger.error(f"Copy config failed. Error: '{e}'") 101 | print_exc() 102 | os.remove(src_file_name) 103 | return False 104 | 105 | def update_config(self, data: dict = None) -> bool: 106 | try: 107 | config_data_json = data or self.file_data 108 | config_data_str = json.dumps(config_data_json, indent=4) 109 | 110 | logger.debug("Config file data {}.".format(config_data_str)) 111 | 112 | # return self._dump_config_data(config_data_str) 113 | return self._copy_config_data(config_data_str) 114 | except Exception as e: 115 | logger.error(f"Load config failed. Error: '{e}'") 116 | print_exc() 117 | return False 118 | 119 | def restore_config(self, force: bool = False) -> bool: 120 | try: 121 | if not self.file_modified: 122 | logger.error("POS Config file is already in Old state") 123 | 124 | config_data_str = json.dumps(self.file_data_org, indent=4) 125 | 126 | logger.debug("Config file data {}.".format(config_data_str)) 127 | 128 | # return self._dump_config_data(config_data_str) 129 | return self._copy_config_data(config_data_str) 130 | except Exception as e: 131 | logger.error(f"Load config failed. Error: '{e}'") 132 | print_exc() 133 | return False 134 | 135 | def journal_state(self, enable: bool = True, update_now: bool = False) -> bool: 136 | journal_enable = self.file_data["journal"]["enable"] 137 | if enable: 138 | if journal_enable == True: 139 | logger.info("POS Journal is already enabled.") 140 | else: 141 | logger.info("Enable POS Journal") 142 | else: 143 | if journal_enable == False: 144 | logger.info("POS Journal is already disabled.") 145 | else: 146 | logger.info("Disable POS Journal") 147 | 148 | self.file_data["journal"]["enable"] = enable 149 | if update_now: 150 | self.file_modified = True 151 | return self.update_config() 152 | 153 | return True 154 | 155 | def rebuild_auto_start(self, auto_start: bool = True, update_now: bool = False) -> bool: 156 | rebuild_autostart = self.file_data["rebuild"]["auto_start"] 157 | if auto_start: 158 | if rebuild_autostart == True: 159 | logger.info("POS Rebuild Auto Start is already enabled.") 160 | else: 161 | logger.info("Enable POS Rebuild Auto Start") 162 | else: 163 | if rebuild_autostart == False: 164 | logger.info("POS Rebuild Auto Start is already disabled.") 165 | else: 166 | logger.info("Disable POS Rebuild Auto Start") 167 | 168 | self.file_data["rebuild"]["auto_start"] = auto_start 169 | if update_now: 170 | self.file_modified = True 171 | return self.update_config() 172 | 173 | return True 174 | 175 | def save_restore(self, enable: bool = True, update_now: bool = False) -> bool: 176 | save_restore = self.file_data["save_restore"]["enable"] 177 | if enable: 178 | if save_restore == True: 179 | logger.info("POS Save Restore is already enabled.") 180 | else: 181 | logger.info("Enable POS Save Restore") 182 | else: 183 | if save_restore == False: 184 | logger.info("POS Save Restore is already disabled.") 185 | else: 186 | logger.info("Disable POS Save Restore") 187 | 188 | self.file_data["save_restore"]["enable"] = enable 189 | if update_now: 190 | self.file_modified = True 191 | return self.update_config() 192 | 193 | return True 194 | 195 | 196 | if __name__ == "__main__": 197 | pass 198 | from pos import POS 199 | 200 | pos = POS() 201 | pos_config = POS_Config(pos.target_ssh_obj) 202 | assert pos_config.load_config() == True 203 | assert pos_config.journal_state() == True 204 | assert pos_config.rebuild_auto_start() == True 205 | assert pos_config.update_config() == True 206 | assert pos_config.restore_config() == True 207 | -------------------------------------------------------------------------------- /lib/tags.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | #!/usr/bin/python 35 | # Script for parsing output of lshw and hostnamectl and generate environmental tags in dictionary format 36 | import sys 37 | import logger 38 | import pytest 39 | from collections import OrderedDict 40 | from lxml import etree 41 | import threading 42 | 43 | # from hurry.filesize import size 44 | 45 | sys.path.insert(0, "../") 46 | from node import SSHclient 47 | from threadable_node import * 48 | 49 | logger = logger.get_logger(__name__) 50 | 51 | 52 | class EnvTags(SSHclient): 53 | def __init__(self, item, ip, username, password): 54 | self.item = item 55 | self.ip = ip 56 | self.username = username 57 | self.password = password 58 | self.inv = OrderedDict() 59 | try: 60 | self.conn = SSHclient(ip, username, password) 61 | except Exception as e: 62 | logger.error("Unable to connect to {} due to {}".format(self.item, e)) 63 | assert 0 64 | @threaded 65 | def get_tags(self): 66 | try: 67 | 68 | inventory = self.conn.execute("lshw -xml -numeric") 69 | inventory = "\n".join(inventory) 70 | inventory = etree.XML(inventory) 71 | 72 | find_system = etree.XPath(".//node[@class='system']") 73 | for sys in find_system(inventory): 74 | self.inv["System Model"] = sys.find("product").text 75 | self.inv["System Vendor"] = sys.find("vendor").text 76 | try: 77 | self.inv["System Serial Number"] = sys.find("serial").text 78 | except Exception as e: 79 | logger.info( 80 | "No serial number found for the node : {}".format(self.item) 81 | ) 82 | self.inv["System Serial Number"] = "Nil" 83 | find_bus = etree.XPath(".//node[@class='bus']") 84 | for bus in find_bus(inventory): 85 | if ( 86 | bus.find("description") is not None 87 | and bus.find("description").text == "Motherboard" 88 | ): 89 | try: 90 | self.inv["Motherboard Model"] = bus.find("product").text 91 | except Exception as e: 92 | logger.info( 93 | "No Motherboard Model found for the node : {}".format( 94 | self.item 95 | ) 96 | ) 97 | self.inv["Motherboard Model"] = "Nil" 98 | try: 99 | self.inv["Motherboard vendor"] = bus.find("vendor").text 100 | except Exception as e: 101 | logger.info( 102 | "No Motherboard Vendor found for the node : {}".format( 103 | self.item 104 | ) 105 | ) 106 | self.inv["Motherboard vendor"] = "Nil" 107 | try: 108 | self.inv["Motherboard Serial Number"] = bus.find("serial").text 109 | except Exception as e: 110 | logger.info( 111 | "No Motherboard Serial Number found for the node : {}".format( 112 | self.item 113 | ) 114 | ) 115 | self.inv["Motherboard Serial Number"] = "Nil" 116 | 117 | find_memory = etree.XPath(".//node[@class='memory']") 118 | for mem in find_memory(inventory): 119 | if ( 120 | mem.find("description") is not None 121 | and mem.find("description").text == "BIOS" 122 | ): 123 | self.inv["System BIOS"] = mem.find("vendor").text 124 | self.inv["System BIOS Version"] = mem.find("version").text 125 | self.inv["System BIOS Date"] = mem.find("date").text 126 | 127 | find_cpus = etree.XPath(".//node[@class='processor']") 128 | self.inv["Processsor Model"] = find_cpus(inventory)[0].find("product").text 129 | self.inv["Processsor Vendor"] = find_cpus(inventory)[0].find("vendor").text 130 | self.inv["Processor Sockets"] = len(find_cpus(inventory)) 131 | self.inv["Processor Cores Per Socket"] = ( 132 | find_cpus(inventory)[0] 133 | .find('configuration/setting/[@id="cores"]') 134 | .get("value") 135 | ) 136 | 137 | total_mem = 0 138 | for mem in find_memory(inventory): 139 | if mem.find("size") is not None: 140 | total_mem = total_mem + int(mem.find("size").text) 141 | self.inv["Total Memory"] = total_mem 142 | 143 | find_disks = etree.XPath(".//node[@class='disk']") 144 | numdisks = 0 145 | diskspace = 0 146 | for disk in find_disks(inventory): 147 | if disk.find("size") is not None: 148 | numdisks = numdisks + 1 149 | diskspace = diskspace + int(disk.find("size").text) 150 | self.inv["Device" + str(numdisks)] = ( 151 | disk.find("description").text 152 | + "_" 153 | + disk.find("product").text 154 | + "_" 155 | + disk.find("logicalname").text 156 | ) 157 | find_networks = etree.XPath(".//node[@class='network']") 158 | num_net = 0 159 | for net in find_networks(inventory): 160 | if net.find("product") is not None and net.find("vendor") is not None: 161 | num_net += 1 162 | self.inv["Network Interface" + str(num_net)] = ( 163 | net.find("description").text 164 | + "_" 165 | + net.find("product").text 166 | + "_" 167 | + net.find("vendor").text 168 | ) 169 | 170 | 171 | inventory = self.conn.execute("hostname") 172 | self.inv["Host Name"] = inventory[0] 173 | 174 | inventory = self.conn.execute("lsb_release -d") 175 | self.inv["Operating System"] = ( 176 | inventory[0].split("Description:", 1)[1].strip() 177 | ) 178 | 179 | inventory = self.conn.execute("uname -r") 180 | self.inv["Kernel"] = inventory[0] 181 | return True 182 | except Exception as e: 183 | logger.error( 184 | "lshw command execution on node {} failed due to: {}".format( 185 | self.item[0], e 186 | ) 187 | ) 188 | return False 189 | -------------------------------------------------------------------------------- /docs/UserGuide.md: -------------------------------------------------------------------------------- 1 | # Introduction 2 | ## Background 3 | Poseidon OS (POS) is a light-weight storage OS that offers the best performance and valuable features over NVMeOF. 4 | It is optimized for low-latency and high-throughput NVMe devices. 5 | It support NVMe-over-Fabrics interface over RDMA and TCP. As part of continued efforts by Samsung to contribute back to open source community, 6 | POS is open sourced. It can be downloaded from https://www.github.com/poseidonos. 7 | POS can be explored by using the in-built CLI command sets. This User Guide introduces the open source community with a test suite named as Trident. 8 | User can refer to this user guide to setup and use Trident. 9 | 10 | ## Design 11 | Trident is collection of libraries and sample test cases developed using pytest framework. 12 | Libraries are developed in pure python in order to give the choice of framework to user. 13 | However, test cases are developed using pytest framework. The software architecture for Trident is depicted in the diagram below. 14 | 15 | | ![Architecture](img/Trident_SW_Architecture.JPG) | 16 | | :--: | 17 | | *Figure 1 : Software Architecture* | 18 | 19 | Since it is developed on top of free and open technologies, Trident is both open and extensible. 20 | 21 | ### Open. 22 | Source code is available open for anyone to explore at following link https://github.com/poseidonos/trident . 23 | User can develop their own test cases utilising framework independent python libraries. 24 | As the framework for test cases is based on pytest, all advantages of pytest can be utilised by user to have efficient test cases. 25 | 26 | ### Extensible. 27 | Test suite provides core, intermediate APIs and following set of test cases: 28 | * Array, Volume, SubSystem, QOS management (create, delete, rename single and multi-arrays) 29 | * Array rebuild (SSD hot plug) 30 | * GC and flush 31 | * Other POS functionalities 32 | 33 | ## Preparing system for Trident Open source tool 34 | ### Software requirements 35 | #### Poseidon OS 36 | * OS: Ubuntu 18.04 (kernel: 5.3.0-19-generic) 37 | * GCC: 7.5.0 or later 38 | * SPDK/DPDK: 20.10 / 20.08 39 | 40 | #### Trident 41 | * Python 3.8 42 | * Pytest (Ubuntu 18.04 repo) 43 | * Paramiko (Python3 module) 44 | 45 | ### Pre-requisites for Project Poseidon 46 | Poseidon OS can be cloned from following link 47 | 48 | `$git clone https://github.com/poseidonos/poseidonos.git` 49 | 50 | Navigating to script folder and execute pkgdep.sh script downloads and installs all dependencies of the project. 51 | 52 | Please refer to https://github.com/poseidonos/poseidonos/blob/main/README.md for further details. 53 | 54 | ### Pre-requisites for Trident Open source 55 | Test suite is primarily based on Python3 and pytest. It is important to check if Python3 is already installed. Pip3 (python3-pip) also need to be installed as it is used to install all requirements for the test framework. After installation, update pip3 by running below command 56 | 57 | `$pip3 install --upgrade pip` 58 | 59 | To install the pre-requisites run following command: 60 | 61 | `$pip3 install -r requirements.txt` 62 | 63 | ## Directory structure of Trident 64 | Trident can be cloned from below git link 65 | 66 | `$git clone https://github.com/poseidonos/trident.git` 67 | 68 | ### Trident is organised in to following directories: 69 | #### Lib: 70 | Library functions are distributed in to different file depending on the functionality 71 | 72 | File|Description 73 | ----|----------- 74 | cli.py|This file contains class implementing all CLI commands that POS supports. 75 | pos.py|This file is wrapper for cli and other modules. 76 | logger.py|Customization over python logger modules 77 | node.py|APIs needed for implementing paramiko module 78 | proc.py|Process related API 79 | utils.py|APIs related to various operations such as creating/mounting file-systems, NVMe commands, threading and many more. 80 | target_utils.py|Target side API wrappers 81 | helper.py| methods contains API wrappers that can be used on both target and the host 82 | hetero_setup.py|APIs to support and configure NVMe disk to create hetero setup 83 | pos_config.py|APIs to update and reset the pos.conf during test execution 84 | prometheus.py|APIs to access prometheus DB 85 | 86 | ### Docs: 87 | To generate API documentation of libraries, Doxygen tool is to be used. It can be done by installing doxygen: 88 | 89 | `$sudo apt-get install doxygen` 90 | 91 | Documents are generated by entering directory trident/docs/doxygen/ and run command 92 | 93 | `$doxygen` 94 | 95 | ### Testcases: 96 | Contains test case scripts classified in to different directories. Each directory contains at least one test driver file. 97 | Each test driver file implements methods which work as one or more test cases. 98 | 99 | File or Folder|Description 100 | --------------|----------- 101 | conftest.py|Defined common fixtures to setup test infra(pytest infra). 102 | array| All array management TCs 103 | volume| All volume management TCs 104 | user-io| All GC/flush test cases 105 | subsystem | All Subsystem managment TCs 106 | config_files| This directory has topology.json which holds setup parameters such as target/initiator IP addresses etc 107 | telemetery| This directory is there to test telemetery feature 108 | 109 | ### Utils: 110 | This directory contains setup_tool.py, a tool to check user setup has SSDs supported by POS, all IPs are on same network and basic POS functionalities are working. 111 | 112 | # Pytest framework 113 | Pytest provides features which enable tester to write test code in well organised way. Following features of pytest are utilised here: fixtures and parametrization. 114 | Hence Trident uses pytest as base framework. Libraries are developed in pure python and do not depend on pytest. But the Test cases depend on pytest framework. 115 | If user decides to use any other test runner such as Avocado, he can still use the lib folder as is. 116 | 117 | # Execution 118 | Poseidon OS requires two systems, target and minimum one initiator. A target is any commodity server or PC with Ubuntu 18.04 server with kernel version 5.3.0-19-generic. 119 | Initiator currently supported is Ubuntu 18.04 with same kernel version. Trident can be installed on to a third executor machine which can be a PC or VM running any Linux variant. 120 | 121 | | ![setup](img/Test_Setup.JPG) | 122 | | :--: | 123 | | *Figure 2 : Test Setup* | 124 | 125 | Test can also be run on single system by updating local loop IP for target, initiator machine and data network interfaces in configuration file present in testcases/config_files/topology.json. 126 | Setup tool present in utils directory can be used to make sure, multi system setup that user has selected is good enough to run the tests. 127 | 128 | ## Executing test cases using pytest runner 129 | Navigate to Trident directory after cloning the test tool 130 | ### Executing all available test cases 131 | `python3 -m pytest -v -s testcase/` 132 | ### Executing only sanity suite 133 | `python3 -m pytest -v -s testcase/ -m 'sanity' ` 134 | 135 | # Test case examples 136 | In this section we will see some examples of test script and the scope provided, more can be seen from folder testcases in the package. 137 | 138 | Example 1.Array Management 139 | 140 | In the below example TC, The parameters mentioned creates all possible combinations of test to validate array management 141 | ``` 142 | @pytest.mark.sanity 143 | @pytest.mark.parametrize("writeback" , [True, False]) 144 | @pytest.mark.parametrize("raid_type", list(raid.keys())) 145 | @pytest.mark.parametrize("numvol", [1,256]) 146 | @pytest.mark.parametrize("fioruntime", [10]) 147 | @pytest.mark.parametrize("spor", [False]) #To enable SPOR add True in the list 148 | def test_SanityArray(raid_type, writeback, numvol, fioruntime, spor): 149 | ``` 150 | 151 | Example 2 Volume Management 152 | 153 | The Below Test provides a sample Scenario which requests the POS cli to create a 257th volume which is intended to fail as POS only supports 256 Vols per array 154 | ``` 155 | 156 | @pytest.mark.sanity() 157 | def test_volumesanity257vols(array_fixture): 158 | array_name = "array1" 159 | try: 160 | if pos.target_utils.helper.check_pos_exit() == True: 161 | assert pos.target_utils.pos_bring_up(data_dict=pos.data_dict) == True 162 | assert pos.cli.devel_resetmbr()[0] == True 163 | assert pos.target_utils.pci_rescan() == True 164 | assert pos.cli.device_list()[0] == True 165 | assert pos.cli.array_create(array_name="array1", data=pos.cli.dev_type['SSD'][0:5], write_buffer= pos.cli.dev_type['NVRAM'][0], raid_type= "RAID5", spare = [])[0] == True 166 | assert pos.cli.array_unmount(array_name="array1")[0] == True 167 | for i in range(256): 168 | vname = f'array1_vol{str(i)}' 169 | assert pos.cli.volume_create(volumename=vname, array_name="array1",size = "1gb")[0] == True 170 | assert pos.cli.volume_create(volumename="invalidvol", array_name="array1",size = "1gb")[0] == False 171 | 172 | except Exception as e: 173 | logger.error(f" ======= Test FAILED due to {e} ========") 174 | assert 0 175 | 176 | ``` 177 | 178 | # Contributing 179 | This test suite is being released as open source with intention of providing open framework that people can utilise and extend. 180 | Individuals can contribute in various ways to this project by filing bugs, contributing patches and providing more documentation by improving this guide. 181 | 182 | 183 | -------------------------------------------------------------------------------- /testcase/config_files/system_management.json: -------------------------------------------------------------------------------- 1 | { 2 | "por": 3 | { 4 | "ibof": 5 | { 6 | "npor": { "valid": true, "phase":"5,7" }, 7 | "spor": { "valid": false } 8 | }, 9 | "device": 10 | { 11 | "npor": { "valid": false }, 12 | "spor": { "valid": false } 13 | } 14 | }, 15 | "test_system_sanity_detach_attach_device_iteration_io_verify": 16 | { 17 | "total_phase":8, 18 | "total_time":20, 19 | "phase": 20 | [ 21 | { 22 | "volume": 23 | { 24 | "create" : { "valid":true, "basename":"system_sanity", "number":"10", "maxiops":"0", "maxbw":"0", "size":"10G"}, 25 | "delete" : { "valid":false}, 26 | "mount" : { "valid":true, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 27 | "unmount": { "valid":false} 28 | }, 29 | "nvmf_subsystem": 30 | { 31 | "create" : { "valid":true, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 32 | "delete" : { "valid":false}, 33 | "connect" : { "valid":true, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 34 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 35 | }, 36 | "io": 37 | { 38 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 39 | }, 40 | "por": 41 | { 42 | "ibof": { "npor":false, "spor":false }, 43 | "device": {} 44 | } 45 | }, 46 | { 47 | "volume": 48 | { 49 | "create" : { "valid":false}, 50 | "delete" : { "valid":false}, 51 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 52 | "unmount": { "valid":false} 53 | }, 54 | "nvmf_subsystem": 55 | { 56 | "create" : { "valid":false}, 57 | "delete" : { "valid":false}, 58 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 59 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 60 | }, 61 | "io": 62 | { 63 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 64 | }, 65 | "por": 66 | { 67 | "ibof": { "npor":false, "spor":false }, 68 | "device": {} 69 | } 70 | }, 71 | { 72 | "volume": 73 | { 74 | "create" : { "valid":false}, 75 | "delete" : { "valid":false}, 76 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 77 | "unmount": { "valid":false} 78 | }, 79 | "nvmf_subsystem": 80 | { 81 | "create" : { "valid":false}, 82 | "delete" : { "valid":false}, 83 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 84 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 85 | }, 86 | "io": 87 | { 88 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 89 | }, 90 | "por": 91 | { 92 | "ibof": { "npor":false, "spor":false }, 93 | "device": {} 94 | } 95 | }, 96 | { 97 | "volume": 98 | { 99 | "create" : { "valid":false}, 100 | "delete" : { "valid":false}, 101 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 102 | "unmount": { "valid":false, "basename":"system_sanity", "number":"10"} 103 | }, 104 | "nvmf_subsystem": 105 | { 106 | "create" : { "valid":false}, 107 | "delete" : { "valid":false}, 108 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 109 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 110 | }, 111 | "io": 112 | { 113 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 114 | }, 115 | "por": 116 | { 117 | "ibof": { "npor":false, "spor":false }, 118 | "device": {} 119 | } 120 | }, 121 | { 122 | "volume": 123 | { 124 | "create" : { "valid":false}, 125 | "delete" : { "valid":false}, 126 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 127 | "unmount": { "valid":false, "basename":"system_sanity", "number":"10"} 128 | }, 129 | "nvmf_subsystem": 130 | { 131 | "create" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 132 | "delete" : { "valid":false}, 133 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 134 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 135 | }, 136 | "io": 137 | { 138 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 139 | }, 140 | "por": 141 | { 142 | "ibof": { "npor":false, "spor":false }, 143 | "device": {} 144 | } 145 | }, 146 | { 147 | "volume": 148 | { 149 | "create" : { "valid":false}, 150 | "delete" : { "valid":false}, 151 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 152 | "unmount": { "valid":false, "basename":"system_sanity", "number":"10"} 153 | }, 154 | "nvmf_subsystem": 155 | { 156 | "create" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 157 | "delete" : { "valid":false}, 158 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 159 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 160 | }, 161 | "io": 162 | { 163 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 164 | }, 165 | "por": 166 | { 167 | "ibof": { "npor":false, "spor":false }, 168 | "device": {} 169 | } 170 | }, 171 | { 172 | "volume": 173 | { 174 | "create" : { "valid":false}, 175 | "delete" : { "valid":false}, 176 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 177 | "unmount": { "valid":false} 178 | }, 179 | "nvmf_subsystem": 180 | { 181 | "create" : { "valid":false}, 182 | "delete" : { "valid":false}, 183 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 184 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 185 | }, 186 | "io": 187 | { 188 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 189 | }, 190 | "por": 191 | { 192 | "ibof": { "npor":false, "spor":false }, 193 | "device": {} 194 | } 195 | }, 196 | { 197 | "volume": 198 | { 199 | "create" : { "valid":false}, 200 | "delete" : { "valid":false}, 201 | "mount" : { "valid":false, "basename":"system_sanity", "number":"10", "nqnbasename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 202 | "unmount": { "valid":false} 203 | }, 204 | "nvmf_subsystem": 205 | { 206 | "create" : { "valid":false}, 207 | "delete" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 208 | "connect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"}, 209 | "disconnect" : { "valid":false, "basename":"nqn.2019-04.ibof:subsystem_system_sanity"} 210 | }, 211 | "io": 212 | { 213 | "fio": { "rw":"write", "size":"100%", "iodepth":"64", "bs":"4kb", "ioverify":true} 214 | }, 215 | "por": 216 | { 217 | "ibof": { "npor":false, "spor":false }, 218 | "device": {} 219 | } 220 | } 221 | ] 222 | } 223 | } 224 | -------------------------------------------------------------------------------- /lib/pos.py: -------------------------------------------------------------------------------- 1 | """ 2 | BSD LICENSE 3 | 4 | Copyright (c) 2021 Samsung Electronics Corporation 5 | All rights reserved. 6 | 7 | Redistribution and use in source and binary forms, with or without 8 | modification, are permitted provided that the following conditions 9 | are met: 10 | 11 | * Redistributions of source code must retain the above copyright 12 | notice, this list of conditions and the following disclaimer. 13 | * Redistributions in binary form must reproduce the above copyright 14 | notice, this list of conditions and the following disclaimer in 15 | the documentation and/or other materials provided with the 16 | distribution. 17 | * Neither the name of Samsung Electronics Corporation nor the names of 18 | its contributors may be used to endorse or promote products derived 19 | from this software without specific prior written permission. 20 | 21 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 | LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 24 | A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 25 | OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 26 | SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 27 | LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 28 | DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 29 | THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 30 | (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 31 | OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 32 | """ 33 | 34 | import time 35 | from node import SSHclient 36 | from cli import Cli 37 | from target_utils import TargetUtils 38 | from pos_config import POS_Config 39 | from utils import Client 40 | from prometheus import Prometheus 41 | from json import load 42 | from os import path 43 | from sys import exit 44 | import logger 45 | import pathlib 46 | import inspect 47 | from copy import deepcopy 48 | from threadable_node import threaded 49 | 50 | logger = logger.get_logger(__name__) 51 | 52 | # TODO add support for multi initiaor client object 53 | 54 | Max_Client_Cnt = 256 # Maximum number of client that can connect 55 | 56 | 57 | class POS: 58 | """Class object contains object for 59 | 1, cli.py 60 | 2, target_utils.py 61 | 3, utils.py 62 | Args: 63 | data_path : path of pos_config data json | default = None if None read from testcase/config_files/pos_config.json 64 | config_path : path of toplogy file | default = None 65 | """ 66 | 67 | def __init__(self, data_path=None, config_path=None): 68 | 69 | if data_path is None: 70 | data_path = "pos_config.json" 71 | if config_path is None: 72 | config_path = "topology.json" 73 | trident_config = "trident_config.json" 74 | self.client_cnt = 0 75 | self.client_handle = [] 76 | self.obj_list = [] 77 | 78 | caller_file = inspect.stack()[1].filename 79 | caller_dir = pathlib.Path(caller_file).parent.resolve() 80 | is_file_exist = path.exists("{}/config_files/{}".format(caller_dir, data_path)) 81 | 82 | if is_file_exist: 83 | data_path = "{}/config_files/{}".format(caller_dir, data_path) 84 | self.data_dict = self._json_reader(data_path, abs_path=True)[1] 85 | else: 86 | self.data_dict = self._json_reader(data_path)[1] 87 | 88 | self.data_dict_bkp = deepcopy(self.data_dict) 89 | 90 | self.config_dict = self._json_reader(config_path)[1] 91 | self.trident_config = self._json_reader(trident_config)[1] 92 | self.pos_as_service = self.trident_config["pos_as_a_service"]["enable"] 93 | 94 | self.client_fio_conf = self.trident_config["forced_fio_config"] 95 | 96 | logger.info(f"Installed POS as Service : {self.pos_as_service}") 97 | 98 | self.target_ssh_obj = SSHclient( 99 | self.config_dict["login"]["target"]["server"][0]["ip"], 100 | self.config_dict["login"]["target"]["server"][0]["username"], 101 | self.config_dict["login"]["target"]["server"][0]["password"], 102 | ) 103 | self.obj_list.append(self.target_ssh_obj) 104 | pos_path = None 105 | if not self.pos_as_service: 106 | pos_path = self.config_dict["paths"]["pos_path"] 107 | 108 | self.cli = Cli(self.target_ssh_obj, data_dict=self.data_dict, 109 | pos_as_service=self.pos_as_service, 110 | pos_source_path=pos_path) 111 | 112 | self.target_utils = TargetUtils(self.target_ssh_obj, self.cli, 113 | self.data_dict, 114 | pos_as_service=self.pos_as_service) 115 | 116 | self.pos_conf = POS_Config(self.target_ssh_obj) 117 | self.pos_conf.load_config() 118 | if self.pos_as_service: 119 | self.prometheus = Prometheus(self.target_ssh_obj, self.data_dict) 120 | 121 | self.client_cnt = self.config_dict["login"]["initiator"]["number"] 122 | if self.client_cnt >= 1 and self.client_cnt < Max_Client_Cnt: 123 | for client_cnt in range(self.client_cnt): 124 | self.create_client_objects(client_cnt) 125 | else: 126 | assert 0 127 | 128 | self.collect_pos_core = False # Don't collect core after test fail 129 | 130 | def create_client_objects(self, client_cnt): 131 | client_list = self.config_dict["login"]["initiator"]["client"] 132 | ip = client_list[client_cnt]["ip"] 133 | username = client_list[client_cnt]["username"] 134 | password = client_list[client_cnt]["password"] 135 | client_ssh_obj = SSHclient(ip, username, password) 136 | self.obj_list.append(client_ssh_obj) 137 | client_obj = Client(client_ssh_obj) 138 | client_obj.set_fio_runtime(self.client_fio_conf) 139 | 140 | self.client_handle.append(client_obj) 141 | 142 | if self.client_cnt >= 1: 143 | self.client = self.client_handle[0] 144 | 145 | def _clearall_objects(self): 146 | if len(self.obj_list) > 0: 147 | for obj in self.obj_list: 148 | obj.close() 149 | return True 150 | 151 | def _clearall_objects(self): 152 | if len(self.obj_list) > 0: 153 | for obj in self.obj_list: 154 | obj.close() 155 | return True 156 | def _json_reader(self, json_file: str, abs_path=False) -> dict: 157 | """reads json file from /testcase/config_files 158 | 159 | Read the config file from following location: 160 | Args: 161 | json_file (str) json name [No path required] 162 | """ 163 | try: 164 | if abs_path: 165 | json_path = json_file 166 | else: 167 | dir_path = path.dirname(path.realpath(__file__)) 168 | json_path = f"{dir_path}/../testcase/config_files/{json_file}" 169 | 170 | logger.info(f"reading json file {json_path}") 171 | with open(f"{json_path}") as f: 172 | json_out = load(f) 173 | f.close() 174 | return True, json_out 175 | except OSError as e: 176 | logger.error(f" failed to read {json_file} due to {e}") 177 | exit() 178 | 179 | def set_core_collection(self, collect_pos_core: bool = False): 180 | """ Method is to eable core collection on test failure """ 181 | self.collect_pos_core = collect_pos_core 182 | 183 | def collect_core(self, is_pos_running: bool): 184 | """ Method to collect pos log and core dump """ 185 | try: 186 | if is_pos_running: 187 | assert self.target_utils.dump_core() == True 188 | return True 189 | except Exception as e: 190 | logger.error("Failed to collect core data due to {e}") 191 | return False 192 | 193 | def exit_handler(self, expected=False, hetero_setup=False, dump_cli=True): 194 | """ Method to exit out of a test script as per the the result """ 195 | try: 196 | assert self.target_utils.helper.check_system_memory() == True 197 | 198 | if dump_cli: 199 | self.cli.dump_cli_history(clean=True) 200 | 201 | is_pos_running = False 202 | if self.target_utils.helper.check_pos_exit() == False: 203 | is_pos_running = True 204 | 205 | # POS Client Cleanup 206 | for client in self.client_handle: 207 | assert client.reset(pos_run_status=is_pos_running) == True 208 | 209 | # If system stat is not expected and core collection in enable 210 | if expected == False and is_pos_running == True: 211 | logger.error("Test case failed!") 212 | if self.collect_pos_core: 213 | logger.error("Creating core dump") 214 | assert self.target_utils.dump_core() == True 215 | else: 216 | logger.error("System clean up") 217 | self.cli.pos_stop(grace_shutdown=False) 218 | if expected == True and is_pos_running == True: 219 | logger.error("System clean up") 220 | self.cli.pos_stop(grace_shutdown=False) 221 | 222 | # Reset the target to previous state 223 | self.pos_conf.restore_config() 224 | 225 | if hetero_setup and not is_pos_running: 226 | pass 227 | 228 | if expected == False: 229 | assert 0 230 | except Exception as e: 231 | logger.error(e) 232 | logger.info( 233 | "------------------------------------------ CLI HISTORY ------------------------------------------" 234 | ) 235 | for cli_cmd in self.cli.cli_history: 236 | logger.info(cli_cmd) 237 | 238 | logger.info( 239 | "-------------------------------------------------------------------------------------------------------" 240 | ) 241 | # time.sleep(10000) 242 | # self.cli.core_dump() 243 | #self.cli.stop_system(grace_shutdown=False) 244 | 245 | assert 0 246 | -------------------------------------------------------------------------------- /lib/composable/system_management.py: -------------------------------------------------------------------------------- 1 | import pytest, json, sys, os, time, random, codecs, re, datetime 2 | from random import randint 3 | 4 | import logger as logger 5 | import composable.composable_core as libcore 6 | 7 | logger = logger.get_logger(__name__) 8 | dir_path = os.path.dirname(os.path.realpath(__file__)) 9 | 10 | with open( 11 | "{}/../../testcase/config_files/system_management.json".format(dir_path) 12 | ) as p: 13 | tc_dict = json.load(p) 14 | 15 | 16 | def test_system_sanity_detach_attach_device_iteration_io_verify( 17 | target=None, client=None, phase=None, data_set=None, Time=None 18 | ): 19 | try: 20 | lock_status = None 21 | if ( 22 | target == None 23 | or client == None 24 | or phase == None 25 | or data_set == None 26 | or Time == None 27 | ): 28 | raise AssertionError 29 | 30 | test_dict = tc_dict[ 31 | "test_system_sanity_detach_attach_device_iteration_io_verify" 32 | ] 33 | 34 | phase_time = Time 35 | start_time = time.time() 36 | lock_status = target.cli.lock.acquire() 37 | logger.info(f"Lock status : acquire {lock_status}") 38 | 39 | assert ( 40 | libcore.subsystem_module( 41 | target=target, 42 | client=client, 43 | data_set=data_set, 44 | config_dict=test_dict, 45 | action="create", 46 | phase=phase, 47 | ) 48 | == True 49 | ) 50 | 51 | assert ( 52 | libcore.volume_module( 53 | target=target, 54 | data_set=data_set, 55 | config_dict=test_dict, 56 | action="create", 57 | phase=phase, 58 | ) 59 | == True 60 | ) 61 | assert ( 62 | libcore.volume_module( 63 | target=target, 64 | data_set=data_set, 65 | config_dict=test_dict, 66 | action="mount", 67 | phase=phase, 68 | ) 69 | == True 70 | ) 71 | 72 | assert target.cli.volume_list(target.cli.array_name)[0] == True 73 | assert ( 74 | libcore.subsystem_module( 75 | target=target, 76 | client=client, 77 | data_set=data_set, 78 | config_dict=test_dict, 79 | action="connect", 80 | phase=phase, 81 | ) 82 | == True 83 | ) 84 | lock_status = target.cli.lock.release() 85 | logger.info(f"Lock status : release {lock_status}") 86 | 87 | time.sleep(5) 88 | 89 | model_name = test_dict["phase"][0]["volume"]["create"]["basename"] 90 | assert client.nvme_list(model_name) == True 91 | fio_device = client.nvme_list_out 92 | 93 | current_time = time.time() 94 | 95 | while True: 96 | lock_status = target.cli.lock.acquire() 97 | logger.info(f"Lock status : acquire {lock_status}") 98 | 99 | assert target.cli.array_info(target.cli.array_name)[0] == True 100 | num_data_disks = len( 101 | target.cli.array_data[target.cli.array_name]["data_list"] 102 | ) 103 | stripe_size_for_writing = num_data_disks * 256 * 1024 104 | lock_status = target.cli.lock.release() 105 | logger.info(f"Lock status : release {lock_status}") 106 | 107 | fio_size = stripe_size_for_writing 108 | pattern_data = target.cli.helper.generate_pattern(8) 109 | pattern_data = "0x{}".format(pattern_data) 110 | bs = test_dict["phase"][phase]["io"]["fio"]["bs"] 111 | iod = test_dict["phase"][phase]["io"]["fio"]["iodepth"] 112 | assert ( 113 | client.fio_generic_runner( 114 | devices=fio_device, 115 | fio_user_data=( 116 | "fio --name=fio_{} --ioengine=libaio --rw={} --offset=0" 117 | " --bs=4kb --size={} --iodepth={} --direct=1 --numjobs=1" 118 | " --verify=pattern --verify_pattern={} --do_verify=1" 119 | " --verify_dump=1 --verify_fatal=1 --continue_on_error=none" 120 | " --group_reporting".format( 121 | "write", "write", fio_size, iod, pattern_data 122 | ) 123 | ), 124 | json_out="test_system_sanity_detach_attach_device_iteration_io_verify", 125 | )[0] 126 | == True 127 | ) 128 | 129 | assert ( 130 | client.fio_generic_runner( 131 | devices=fio_device, 132 | fio_user_data=( 133 | "fio --name=fio_{} --ioengine=libaio --rw={} --offset=0" 134 | " --bs=4kb --size={} --iodepth={} --direct=1 --numjobs=1" 135 | " --verify=pattern --verify_pattern={} --do_verify=1" 136 | " --continue_on_error=none" 137 | " --group_reporting".format( 138 | "read", "read", fio_size, iod, pattern_data 139 | ) 140 | ), 141 | json_out="test_system_sanity_detach_attach_device_iteration_io_verify", 142 | )[0] 143 | == True 144 | ) 145 | lock_status = target.cli.lock.acquire() 146 | logger.info(f"Lock status : acquire {lock_status}") 147 | assert target.cli.device_list()[0] == True 148 | logger.info("System Disks {}".format(target.cli.system_disks)) 149 | 150 | assert target.cli.array_info(array_name=target.cli.array_name)[0] == True 151 | data_disks = target.cli.array_data[target.cli.array_name]["data_list"] 152 | spare_disks = target.cli.array_data[target.cli.array_name]["spare_list"] 153 | 154 | logger.info( 155 | "Data Disks {}".format( 156 | target.cli.array_data[target.cli.array_name]["data_list"] 157 | ) 158 | ) 159 | logger.info( 160 | "Spare Disks {}".format( 161 | target.cli.array_data[target.cli.array_name]["spare_list"] 162 | ) 163 | ) 164 | 165 | dev_name = random.choice(data_disks + spare_disks) 166 | logger.info( 167 | "BDF of {} is {}".format( 168 | dev_name, target.cli.NVMe_BDF[dev_name]["addr"] 169 | ) 170 | ) 171 | lock_status = target.cli.lock.release() 172 | logger.info(f"Lock status : release {lock_status}") 173 | 174 | dev_name_list = [] 175 | dev_name_list.append(dev_name) 176 | assert target.target_utils.device_hot_remove(dev_name_list) == True 177 | assert ( 178 | client.fio_generic_runner( 179 | devices=fio_device, 180 | fio_user_data=( 181 | "fio --name=fio_{} --ioengine=libaio --rw={} --offset=0" 182 | " --bs=4kb --size={} --iodepth={} --direct=1 --numjobs=1" 183 | " --verify=pattern --verify_pattern={} --do_verify=1" 184 | " --continue_on_error=none" 185 | " --group_reporting".format( 186 | "read", "read", fio_size, iod, pattern_data 187 | ) 188 | ), 189 | json_out="test_system_sanity_detach_attach_device_iteration_io_verify", 190 | )[0] 191 | == True 192 | ) 193 | 194 | assert target.target_utils.pci_rescan() == True 195 | 196 | lock_status = target.cli.lock.acquire() 197 | logger.info(f"Lock status : acquire {lock_status}") 198 | for index in range(2): 199 | assert target.cli.device_list()[0] == True 200 | 201 | assert ( 202 | target.cli.array_info(array_name=target.cli.array_name)[0] == True 203 | ) 204 | system_disks = target.cli.system_disks 205 | normal_disks = target.cli.normal_data_disks 206 | data_disks = target.cli.array_data[target.cli.array_name]["data_list"] 207 | spare_disks = target.cli.array_data[target.cli.array_name]["spare_list"] 208 | 209 | if len(spare_disks) == 0: 210 | if len(normal_disks) < len(data_disks): 211 | assert ( 212 | target.cli.array_addspare( 213 | system_disks[0], target.cli.array_name 214 | )[0] 215 | == True 216 | ) 217 | elif len(normal_disks) == len(data_disks): 218 | if random.randint(0, 1): 219 | assert ( 220 | target.cli.array_addspare( 221 | system_disks[0], target.cli.array_name 222 | )[0] 223 | == True 224 | ) 225 | else: 226 | logger.info("Skip add spare device") 227 | else: 228 | logger.info("The spare device already exists") 229 | 230 | while True: 231 | assert ( 232 | target.cli.array_info(array_name=target.cli.array_name)[0] == True 233 | ) 234 | if ( 235 | "normal" 236 | in target.cli.array_data[target.cli.array_name]["situation"].lower() 237 | ): 238 | break 239 | time.sleep(2) 240 | lock_status = target.cli.lock.release() 241 | logger.info(f"Lock status : release {lock_status}") 242 | 243 | assert ( 244 | client.fio_generic_runner( 245 | devices=fio_device, 246 | fio_user_data=( 247 | "fio --name=fio_{} --ioengine=libaio --rw={} --offset=0" 248 | " --bs=4kb --size={} --iodepth={} --direct=1 --numjobs=1" 249 | " --verify=pattern --verify_pattern={} --do_verify=1" 250 | " --continue_on_error=none" 251 | " --group_reporting".format( 252 | "read", "read", fio_size, iod, pattern_data 253 | ) 254 | ), 255 | json_out="test_system_sanity_detach_attach_device_iteration_io_verify", 256 | )[0] 257 | == True 258 | ) 259 | # logger.info("The lastest offset {} length {} eache devices write size {}".format(client._fio_offset, client._fio_length, fio_size)) 260 | current_time = time.time() 261 | running_time = current_time - start_time 262 | if running_time >= phase_time: 263 | break 264 | 265 | except Exception as e: 266 | if lock_status: 267 | target.cli.lock.release() 268 | logger.info(f"Lock status : release {lock_status}") 269 | logger.error("Failed due to {}".format(e)) 270 | logger.error( 271 | "Failed test case name : {}".format(sys._getframe().f_code.co_name) 272 | ) 273 | logger.error("Failed test stage : {}".format(phase + 1)) 274 | raise 275 | -------------------------------------------------------------------------------- /testcase/cli/test_crud_sanity.py: -------------------------------------------------------------------------------- 1 | import pytest 2 | 3 | import re 4 | import time 5 | from common_libs import * 6 | 7 | import logger 8 | logger = logger.get_logger(__name__) 9 | 10 | @pytest.mark.sanity 11 | def test_crud_array_ops_all_raids(array_fixture): 12 | """ 13 | The purpose of this test is to do array crud operation with following matrix. 14 | 15 | RAID Types - (no-raid, raid0, raid5, raid6, raid10) 16 | Operations - 17 | C: create / autocreate 18 | R: list 19 | U: addspare / mount / rebuild / replace / rmspare / unmount 20 | D: delete 21 | 22 | Verification: POS CLI - Array CRUD Operation. 23 | """ 24 | logger.info( 25 | f" ==================== Test : test_crud_array_ops_all_raids ================== " 26 | ) 27 | pos = array_fixture 28 | try: 29 | assert pos.cli.device_list()[0] == True 30 | system_disks = pos.cli.system_disks 31 | 32 | for arr1_raid in ARRAY_ALL_RAID_LIST: 33 | arr2_raid = random.choice(ARRAY_ALL_RAID_LIST) 34 | arr1_disk = RAID_MIN_DISK_REQ_DICT[arr1_raid] 35 | arr2_disk = RAID_MIN_DISK_REQ_DICT[arr2_raid] 36 | 37 | if (arr1_disk + arr2_disk + 2) > len(system_disks): 38 | logger.warning("Array creation requied more disk") 39 | continue 40 | 41 | assert multi_array_data_setup(pos.data_dict, 2, (arr1_raid, arr2_raid), 42 | (arr1_disk, arr2_disk), (0, 0), 43 | ("WT", "WT"), (False, True)) == True 44 | 45 | # Create, Read and Update Ops 46 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 47 | 48 | # Read and Update Ops 49 | assert pos.cli.array_list()[0] == True 50 | array_list = list(pos.cli.array_dict.keys()) 51 | 52 | assert pos.cli.device_list()[0] == True 53 | system_disks = pos.cli.system_disks 54 | for array_name in array_list: 55 | assert pos.cli.array_info(array_name=array_name)[0] == True 56 | array_raid = pos.cli.array_data[array_name]["data_raid"] 57 | data_disk = pos.cli.array_data[array_name]["data_list"] 58 | 59 | # spare disk is not supported, continue 60 | logger.info(f"Array Raid {array_raid}") 61 | if array_raid == "RAID0" or array_raid == 'NONE': 62 | continue 63 | 64 | spare_disk = system_disks.pop(0) 65 | assert pos.cli.array_addspare(device_name=spare_disk, 66 | array_name=array_name)[0] == True 67 | 68 | assert pos.cli.array_rmspare(device_name=spare_disk, 69 | array_name=array_name)[0] == True 70 | 71 | assert pos.cli.array_addspare(device_name=spare_disk, 72 | array_name=array_name)[0] == True 73 | 74 | assert pos.cli.array_replace_disk(device_name=data_disk[0], 75 | array_name=array_name)[0] == True 76 | assert pos.target_utils.array_rebuild_wait(array_name=array_name) == True 77 | 78 | # 30 sec sleep after rebuild 79 | time.sleep(30) 80 | 81 | # Update and Delete Operation 82 | assert array_unmount_and_delete(pos) == True 83 | logger.info( 84 | " ============================= Test ENDs ======================================" 85 | ) 86 | except Exception as e: 87 | logger.error(f"Test script failed due to {e}") 88 | pos.exit_handler(expected=False) 89 | 90 | @pytest.mark.sanity 91 | def test_crud_listner_ops(system_fixture): 92 | """ 93 | The purpose of this test is to do listner crud operation with following matrix. 94 | 95 | Operations - 96 | C: create / create-transport 97 | R: list-listner 98 | U: add-listener 99 | D: remove-listener 100 | 101 | Verification: POS CLI - Subsystem Listner CRUD Operation. 102 | """ 103 | logger.info("================ test_crud_listner_ops ================") 104 | try: 105 | pos = system_fixture 106 | data_dict = pos.data_dict 107 | 108 | assert pos.cli.pos_start()[0] == True 109 | 110 | # Create - Create Transport 111 | assert pos.cli.subsystem_create_transport(buf_cache_size=64, 112 | num_shared_buf=4096, transport_type="TCP")[0] == True 113 | 114 | # Create - Create 1 susbsystem 115 | nqn = f"nqn.2022-10.pos-array:subsystem1" 116 | assert pos.cli.subsystem_create(nqn)[0] == True 117 | 118 | # Read - Subsystem List 119 | assert pos.target_utils.get_subsystems_list() == True 120 | assert nqn == pos.target_utils.ss_temp_list[0] 121 | 122 | ip_addr = pos.target_utils.helper.ip_addr[0] 123 | for subsystem in pos.target_utils.ss_temp_list: 124 | # Read - List Listner (No Listener) 125 | assert pos.cli.subsystem_list_listener(subsystem)[0] == True 126 | assert len(pos.cli.subsystem_listeners[subsystem]) == 0 127 | 128 | # Update - Add Listner 129 | assert pos.cli.subsystem_add_listner(subsystem, 130 | ip_addr, "1158")[0] == True 131 | 132 | # Read - List Listner 133 | assert pos.cli.subsystem_list_listener(subsystem)[0] == True 134 | assert len(pos.cli.subsystem_listeners[subsystem]) == 1 135 | 136 | # Update - Remove Listner 137 | assert pos.cli.subsystem_remove_listener(subsystem, 138 | ip_addr, "1158")[0] == True 139 | 140 | # Read - List Listner 141 | assert pos.cli.subsystem_list_listener(subsystem)[0] == True 142 | assert len(pos.cli.subsystem_listeners[subsystem]) == 0 143 | 144 | # Delete Subsystem 145 | for subsystem in pos.target_utils.ss_temp_list: 146 | assert pos.cli.subsystem_delete(subsystem)[0] == True 147 | 148 | # Read - Subsystem List 149 | assert pos.target_utils.get_subsystems_list() == True 150 | assert len(pos.target_utils.ss_temp_list) == 0 151 | 152 | except Exception as e: 153 | logger.error(f"Test script failed due to {e}") 154 | pos.exit_handler(expected=False) 155 | 156 | @pytest.mark.sanity 157 | def test_crud_subsystem_ops(system_fixture): 158 | """ 159 | The purpose of this test is to do subsystem crud operation with following matrix. 160 | 161 | Operations - 162 | C: create / create-transport 163 | R: list 164 | U: add-listener 165 | D: delete 166 | 167 | Verification: POS CLI - Array CRUD Operation. 168 | """ 169 | logger.info("================ test_npor_with_half_uram ================") 170 | try: 171 | pos = system_fixture 172 | data_dict = pos.data_dict 173 | 174 | assert pos.cli.pos_start()[0] == True 175 | 176 | # Create - Create Transport 177 | assert pos.cli.subsystem_create_transport(buf_cache_size=64, 178 | num_shared_buf=4096, transport_type="TCP")[0] == True 179 | 180 | # Create - Create 1024 susbsystem 181 | for ss_nr in range(1, 1024): 182 | nqn = f"nqn.2022-10.pos-array:subsystem{ss_nr}" 183 | ns_count = 512 184 | serial_number = "POS000000%04d"%ss_nr 185 | model_number = "POS_VOLUME_array" 186 | 187 | assert pos.cli.subsystem_create(nqn, ns_count, serial_number, 188 | model_number)[0] == True 189 | logger.info(f"Subsystem {ss_nr} created successfully.") 190 | 191 | # Read - Subsystem List 1023 + 1 discovery 192 | assert pos.target_utils.get_subsystems_list() == True 193 | assert len(pos.target_utils.ss_temp_list) == 1023 194 | 195 | # Update - Add Listner 196 | ip_addr = pos.target_utils.helper.ip_addr[0] 197 | for subsystem in pos.target_utils.ss_temp_list: 198 | assert pos.cli.subsystem_add_listner(subsystem, 199 | ip_addr, "1158")[0] == True 200 | 201 | # Connect to all subsystems from initiator 202 | for nqn in pos.target_utils.ss_temp_list[:256]: 203 | assert pos.client.nvme_connect(nqn, ip_addr, "1158") == True 204 | 205 | # Disconnect half subsystems from initiator 206 | for nqn in pos.target_utils.ss_temp_list[:128]: 207 | assert pos.client.nvme_disconnect(nqn, ip_addr, "1158") == True 208 | 209 | # Delete Subsystem 210 | for subsystem in pos.target_utils.ss_temp_list: 211 | assert pos.cli.subsystem_delete(subsystem)[0] == True 212 | 213 | # Disconnect half subsystems from initiator 214 | for nqn in pos.target_utils.ss_temp_list[128:256]: 215 | assert pos.client.nvme_disconnect(nqn, ip_addr, "1158") == True 216 | 217 | # Read - Subsystem List 218 | assert pos.target_utils.get_subsystems_list() == True 219 | assert len(pos.target_utils.ss_temp_list) == 0 220 | 221 | except Exception as e: 222 | logger.error(f"Test script failed due to {e}") 223 | pos.exit_handler(expected=False) 224 | 225 | @pytest.mark.sanity 226 | def test_crud_transport_ops(system_fixture): 227 | """ 228 | The purpose of this test is to do transport crud operation with following matrix. 229 | 230 | Operations - 231 | C: create 232 | R: list 233 | 234 | Verification: POS CLI - Transport CRUD Operation. 235 | """ 236 | logger.info("================ test_crud_transport_ops ================") 237 | try: 238 | pos = system_fixture 239 | data_dict = pos.data_dict 240 | 241 | assert pos.cli.pos_start()[0] == True 242 | 243 | # Create - Create Transport 244 | assert pos.cli.transport_create(buf_cache_size=64, 245 | num_shared_buf=4096, transport_type="TCP")[0] == True 246 | 247 | # Read - List Transport 248 | assert pos.cli.transport_list()[0] == True 249 | 250 | logger.info(f"Num of Transport : {pos.cli.num_transport}") 251 | assert pos.cli.num_transport == 1 252 | 253 | for transport in pos.cli.transports: 254 | logger.info(f"tr_type : {transport['tr_type']}, " 255 | f"q_depth: {transport['q_depth']}") 256 | except Exception as e: 257 | logger.error(f"Test script failed due to {e}") 258 | pos.exit_handler(expected=False) 259 | 260 | @pytest.mark.sanity 261 | def test_crud_volume_ops(array_fixture): 262 | """ 263 | The purpose of this test is to do volume crud operation on RAID5 and RAID10 264 | arrays for following matrix. 265 | 266 | Array RAID Types - (raid5, raid10) 267 | Operations - 268 | C: create 269 | R: list 270 | U: mount / mount-with-subsystem / rename / set-property / unmount 271 | D: delete 272 | 273 | Verification: POS CLI - Volume CRUD Operation. 274 | """ 275 | logger.info( 276 | f" ==================== Test : test_crud_volume_ops ================== " 277 | ) 278 | pos = array_fixture 279 | try: 280 | assert pos.cli.device_list()[0] == True 281 | system_disks = pos.cli.system_disks 282 | 283 | arr1_raid, arr2_raid = "RAID5", "RAID10" 284 | 285 | arr1_disk = RAID_MIN_DISK_REQ_DICT[arr1_raid] 286 | arr2_disk = RAID_MIN_DISK_REQ_DICT[arr2_raid] 287 | 288 | if (arr1_disk + arr2_disk + 2) > len(system_disks): 289 | pytest.skip("Array creation requied more disk") 290 | 291 | assert multi_array_data_setup(pos.data_dict, 2, (arr1_raid, arr2_raid), 292 | (arr1_disk, arr2_disk), (0, 0), 293 | ("WT", "WT"), (False, True)) == True 294 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 295 | 296 | array_list = list(pos.cli.array_dict.keys()) 297 | 298 | assert pos.target_utils.get_subsystems_list() == True 299 | subs_list = pos.target_utils.ss_temp_list 300 | 301 | nr_vol_list = [1, 16, 256] 302 | for num_vols in nr_vol_list: 303 | logger.info(f"Create, Mount, List, Unmount, Delete {num_vols} Vols") 304 | # Create, Read and Update Ops 305 | assert volume_create_and_mount_multiple(pos, num_vols, 306 | array_list=array_list, subs_list=subs_list) == True 307 | 308 | # Update and Delete Operation 309 | assert volume_unmount_and_delete_multiple(pos, array_list) == True 310 | 311 | # Create 2 volumes from each array 312 | assert volume_create_and_mount_multiple(pos, 2, array_list=array_list, 313 | mount_vols=False, subs_list=subs_list) == True 314 | 315 | for array_name in array_list: 316 | assert pos.cli.volume_list(array_name=array_name)[0] == True 317 | for vol_name in pos.cli.vol_dict.keys(): 318 | assert pos.cli.volume_rename("new" + vol_name, vol_name, 319 | array_name=array_name)[0] == True 320 | #assert pos.cli.volume_mount_with_subsystem() 321 | 322 | # Update and Delete Operation 323 | assert volume_unmount_and_delete_multiple(pos, array_list) == True 324 | 325 | logger.info( 326 | " ============================= Test ENDs ======================================" 327 | ) 328 | except Exception as e: 329 | logger.error(f"Test script failed due to {e}") 330 | pos.exit_handler(expected=False) 331 | -------------------------------------------------------------------------------- /lib/composable/io_management.py: -------------------------------------------------------------------------------- 1 | import pytest, json, sys, os, time, random, codecs, re, datetime 2 | from random import randint 3 | 4 | import logger as logger 5 | import composable.composable_core as libcore 6 | 7 | # sys.path.insert(0, "/root/poseidon/commit2505/trident") 8 | logger = logger.get_logger(__name__) 9 | dir_path = os.path.dirname(os.path.realpath(__file__)) 10 | 11 | with open("{}/../../testcase/config_files/io_management.json".format(dir_path)) as p: 12 | tc_dict = json.load(p) 13 | 14 | 15 | def test_io_sanity_iteration_io_verify_random_pattern( 16 | target=None, client=None, phase=None, data_set=None, Time=None 17 | ): 18 | try: 19 | lock_status = None 20 | if ( 21 | target == None 22 | or client == None 23 | or phase == None 24 | or data_set == None 25 | or Time == None 26 | ): 27 | raise AssertionError 28 | 29 | test_dict = tc_dict["test_io_sanity_iteration_io_verify_random_pattern"] 30 | 31 | phase_time = Time 32 | start_time = time.time() 33 | 34 | lock_status = target.cli.lock.acquire() 35 | logger.info(f"Lock status : acquire {lock_status}") 36 | assert ( 37 | libcore.subsystem_module( 38 | target=target, 39 | client=client, 40 | data_set=data_set, 41 | config_dict=test_dict, 42 | action="create", 43 | phase=phase, 44 | ) 45 | == True 46 | ) 47 | 48 | assert ( 49 | libcore.volume_module( 50 | target=target, 51 | data_set=data_set, 52 | config_dict=test_dict, 53 | action="create", 54 | phase=phase, 55 | ) 56 | == True 57 | ) 58 | assert ( 59 | libcore.volume_module( 60 | target=target, 61 | data_set=data_set, 62 | config_dict=test_dict, 63 | action="mount", 64 | phase=phase, 65 | ) 66 | == True 67 | ) 68 | 69 | assert target.cli.volume_list(target.cli.array_name)[0] == True 70 | assert ( 71 | libcore.subsystem_module( 72 | target=target, 73 | client=client, 74 | data_set=data_set, 75 | config_dict=test_dict, 76 | action="connect", 77 | phase=phase, 78 | ) 79 | == True 80 | ) 81 | lock_status = target.cli.lock.release() 82 | logger.info(f"Lock status : release {lock_status}") 83 | 84 | time.sleep(5) 85 | model_name = test_dict["phase"][0]["volume"]["create"]["basename"] 86 | assert client.nvme_list(model_name) == True 87 | fio_device = client.nvme_list_out 88 | 89 | current_time = time.time() 90 | fio_time = phase_time - (current_time - start_time) 91 | if fio_time < 5: 92 | fio_time = 10 93 | 94 | while True: 95 | fio_size = random.choice(["5%", "10%"]) 96 | pattern_data = target.cli.helper.generate_pattern(8) 97 | pattern_data = "0x{}".format(pattern_data) 98 | bs = test_dict["phase"][phase]["io"]["fio"]["bs"] 99 | iod = test_dict["phase"][phase]["io"]["fio"]["iodepth"] 100 | assert ( 101 | client.fio_generic_runner( 102 | devices=fio_device, 103 | fio_user_data=( 104 | "fio --name=fio_write --rw=write --size={} --ioengine=libaio" 105 | " --direct=1 --iodepth={} --bs={} --numjobs=1 --offset=0" 106 | " --verify=pattern --verify_pattern={} --do_verify=1" 107 | " --verify_dump=1 --verify_fatal=1 --continue_on_error=none" 108 | " --group_reporting".format(fio_size, iod, bs, pattern_data) 109 | ), 110 | json_out="test_io_sanity_iteration_io_verify_random_pattern", 111 | )[0] 112 | == True 113 | ) 114 | 115 | # logger.info("The lastest offset {} length {} eache devices write size {}".format(client._fio_offset, client._fio_length, fio_size)) 116 | current_time = time.time() 117 | running_time = current_time - start_time 118 | if running_time >= phase_time: 119 | break 120 | 121 | bs = test_dict["phase"][phase]["io"]["fio"]["bs"] 122 | iod = test_dict["phase"][phase]["io"]["fio"]["iodepth"] 123 | 124 | assert ( 125 | client.fio_generic_runner( 126 | devices=fio_device, 127 | fio_user_data=( 128 | "fio --name=fio_read --rw=read --size={} --ioengine=libaio" 129 | " --direct=1 --iodepth={} --bs={} --numjobs=1 --offset=0" 130 | " --verify=pattern --verify_pattern={} --do_verify=1" 131 | " --continue_on_error=none" 132 | " --group_reporting".format(fio_size, iod, bs, pattern_data) 133 | ), 134 | json_out="test_io_sanity_iteration_io_verify_random_pattern", 135 | )[0] 136 | == True 137 | ) 138 | 139 | current_time = time.time() 140 | running_time = current_time - start_time 141 | if running_time >= phase_time: 142 | break 143 | 144 | except Exception as e: 145 | if lock_status: 146 | target.cli.lock.release() 147 | logger.info(f"Lock status : release {lock_status}") 148 | logger.error("Failed due to {}".format(e)) 149 | logger.error( 150 | "Failed test case name : {}".format(sys._getframe().f_code.co_name) 151 | ) 152 | logger.error("Failed test stage : {}".format(phase + 1)) 153 | raise 154 | 155 | 156 | def test_io_sanity_set_get_threashold_io_gc( 157 | target=None, client=None, phase=None, data_set=None, Time=None 158 | ): 159 | try: 160 | lock_status = None 161 | if ( 162 | target == None 163 | or client == None 164 | or phase == None 165 | or data_set == None 166 | or Time == None 167 | ): 168 | raise AssertionError 169 | 170 | test_dict = tc_dict["test_io_sanity_set_get_threashold_io_gc"] 171 | 172 | phase_time = Time 173 | start_time = time.time() 174 | 175 | lock_status = target.cli.lock.acquire() 176 | logger.info(f"Lock status : acquire {lock_status}") 177 | assert ( 178 | libcore.subsystem_module( 179 | target=target, 180 | client=client, 181 | data_set=data_set, 182 | config_dict=test_dict, 183 | action="create", 184 | phase=phase, 185 | ) 186 | == True 187 | ) 188 | assert ( 189 | libcore.volume_module( 190 | target=target, 191 | data_set=data_set, 192 | config_dict=test_dict, 193 | action="create", 194 | phase=phase, 195 | ) 196 | == True 197 | ) 198 | assert ( 199 | libcore.volume_module( 200 | target=target, 201 | data_set=data_set, 202 | config_dict=test_dict, 203 | action="mount", 204 | phase=phase, 205 | ) 206 | == True 207 | ) 208 | assert target.cli.volume_list(target.cli.array_name)[0] == True 209 | assert ( 210 | libcore.subsystem_module( 211 | target=target, 212 | client=client, 213 | data_set=data_set, 214 | config_dict=test_dict, 215 | action="connect", 216 | phase=phase, 217 | ) 218 | == True 219 | ) 220 | lock_status = target.cli.lock.release() 221 | logger.info(f"Lock status : release {lock_status}") 222 | 223 | time.sleep(5) 224 | 225 | model_name = test_dict["phase"][0]["volume"]["create"]["basename"] 226 | assert client.nvme_list(model_name) == True 227 | fio_device = client.nvme_list_out 228 | 229 | current_time = time.time() 230 | 231 | while True: 232 | lock_status = target.cli.lock.acquire() 233 | logger.info(f"Lock status : acquire {lock_status}") 234 | assert ( 235 | target.cli.wbt_get_gc_status(array_name=target.cli.array_name)[0] 236 | == True 237 | ) 238 | assert ( 239 | target.cli.wbt_get_gc_threshold(array_name=target.cli.array_name)[0] 240 | == True 241 | ) 242 | 243 | if target.cli.free_segments > 5: 244 | normal_threshold = random.randint(4, int(target.cli.free_segments)) 245 | urgent_threshold = random.randint(2, int(target.cli.gc_normal) - 1) 246 | if normal_threshold < urgent_threshold: 247 | normal_threshold, urgent_threshold = ( 248 | urgent_threshold, 249 | normal_threshold, 250 | ) 251 | logger.info( 252 | "Set normal threshold {}, urgent threshold {}".format( 253 | normal_threshold, urgent_threshold 254 | ) 255 | ) 256 | assert ( 257 | target.cli.wbt_set_gc_threshold( 258 | array_name=target.cli.array_name, 259 | normal=normal_threshold, 260 | urgent=urgent_threshold, 261 | )[0] 262 | == True 263 | ) 264 | assert ( 265 | target.cli.wbt_get_gc_threshold(array_name=target.cli.array_name)[0] 266 | == True 267 | ) 268 | 269 | assert target.cli.array_info(target.cli.array_name)[0] == True 270 | num_data_disks = len( 271 | target.cli.array_data[target.cli.array_name]["data_list"] 272 | ) 273 | stripe_size_for_writing = num_data_disks * 256 * 1024 274 | lock_status = target.cli.lock.release() 275 | logger.info(f"Lock status : release {lock_status}") 276 | 277 | 278 | fio_size = stripe_size_for_writing 279 | 280 | lock_status = target.cli.lock.acquire() 281 | logger.info(f"Lock status : acquire {lock_status}") 282 | 283 | pattern_data = target.cli.helper.generate_pattern(8) 284 | 285 | lock_status = target.cli.lock.release() 286 | logger.info(f"Lock status : release {lock_status}") 287 | 288 | pattern_data = "0x{}".format(pattern_data) 289 | bs = test_dict["phase"][phase]["io"]["fio"]["bs"] 290 | iod = test_dict["phase"][phase]["io"]["fio"]["iodepth"] 291 | assert ( 292 | client.fio_generic_runner( 293 | devices=fio_device, 294 | fio_user_data=( 295 | "fio --name=fio_write --rw=write --size={} --ioengine=libaio" 296 | " --direct=1 --iodepth={} --bs={} --numjobs=1 --offset=0" 297 | " --verify=pattern --verify_pattern={} --do_verify=1" 298 | " --verify_dump=1 --verify_fatal=1 --continue_on_error=none" 299 | " --group_reporting".format(fio_size, iod, bs, pattern_data) 300 | ), 301 | json_out="test_io_sanity_set_get_threashold_io_gc", 302 | )[0] 303 | == True 304 | ) 305 | 306 | # logger.info("The lastest offset {} length {} eache devices write size {}".format(client._fio_offset, client._fio_length, fio_size)) 307 | current_time = time.time() 308 | running_time = current_time - start_time 309 | if running_time >= phase_time: 310 | break 311 | 312 | bs = test_dict["phase"][phase]["io"]["fio"]["bs"] 313 | iod = test_dict["phase"][phase]["io"]["fio"]["iodepth"] 314 | 315 | assert ( 316 | client.fio_generic_runner( 317 | devices=fio_device, 318 | fio_user_data=( 319 | "fio --name=fio_read --rw=read --size={} --ioengine=libaio" 320 | " --direct=1 --iodepth={} --bs={} --numjobs=1 --offset=0" 321 | " --verify=pattern --verify_pattern={} --do_verify=1" 322 | " --continue_on_error=none" 323 | " --group_reporting".format(fio_size, iod, bs, pattern_data) 324 | ), 325 | json_out="test_io_sanity_set_get_threashold_io_gc", 326 | )[0] 327 | == True 328 | ) 329 | 330 | current_time = time.time() 331 | running_time = current_time - start_time 332 | if running_time >= phase_time: 333 | break 334 | 335 | lock_status = target.cli.lock.acquire() 336 | logger.info(f"Lock status : acquire {lock_status}") 337 | assert ( 338 | libcore.volume_module( 339 | target=target, 340 | data_set=data_set, 341 | config_dict=test_dict, 342 | action="unmount", 343 | phase=phase, 344 | ) 345 | == True 346 | ) 347 | lock_status = target.cli.lock.release() 348 | logger.info(f"Lock status : release {lock_status}") 349 | except Exception as e: 350 | if lock_status: 351 | target.cli.lock.release() 352 | logger.info(f"Lock status : release {lock_status}") 353 | logger.error("Failed due to {}".format(e)) 354 | logger.error( 355 | "Failed test case name : {}".format(sys._getframe().f_code.co_name) 356 | ) 357 | logger.error("Failed test stage : {}".format(phase + 1)) 358 | raise 359 | -------------------------------------------------------------------------------- /testcase/conftest.py: -------------------------------------------------------------------------------- 1 | import pytest, sys, json, os, shutil 2 | import uuid 3 | import traceback 4 | from time import sleep 5 | from copy import deepcopy 6 | 7 | from datetime import datetime 8 | 9 | from requests import session 10 | 11 | sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../lib"))) 12 | 13 | from tags import EnvTags 14 | from pos import POS 15 | from utils import Client 16 | from _pytest.runner import runtestprotocol 17 | 18 | import logger as logging 19 | logger = logging.get_logger(__name__) 20 | 21 | global pos, method_name #,i trident_config_data, static_dict, config_dict, mapping_dict 22 | 23 | 24 | # Global Files 25 | dir_path = os.path.dirname(os.path.realpath(__file__)) 26 | trident_config_file = f"{dir_path}/config_files/trident_config.json" 27 | static_config_file = f"{dir_path}/config_files/static.json" 28 | topology_file = f"{dir_path}/config_files/topology.json" 29 | trident_mapping_file = f"{dir_path}/config_files/trident_mapping.json" 30 | 31 | def trident_test_init(): 32 | """ 33 | Tridnet Init Sequnce - To be called during each session start. 34 | """ 35 | try: 36 | global trident_config_data, static_dict, config_dict, mapping_dict 37 | global login, pos 38 | logger.info("Trident Init Sequence Started...") 39 | 40 | logger.debug("Load Trident Config") 41 | with open(trident_config_file) as f: 42 | trident_config_data = json.load(f) 43 | 44 | logger.debug("Load Trident Static Config") 45 | with open(static_config_file) as f: 46 | static_dict = json.load(f) 47 | 48 | logger.debug("Load Topology Information") 49 | with open(topology_file) as f: 50 | config_dict = json.load(f) 51 | 52 | logger.debug("Load Tridnet Mapping") 53 | with open(trident_mapping_file) as f: 54 | mapping_dict = json.load(f) 55 | 56 | login = [] 57 | login = config_dict["login"]["initiator"]["client"] 58 | login.append(config_dict["login"]["target"]["server"][0]) 59 | 60 | pos = POS("pos_config.json") 61 | if trident_config_data["dump_pos_core"]["enable"] == True: 62 | pos.set_collect_core(get_core_dump=True) 63 | logger.info("POS core dump collection enabled") 64 | 65 | logger.info("Trident Init Sequence Completed !!!") 66 | except Exception as e: 67 | logger.error(f"Trident Init Sequence Failed due to {e}") 68 | return False 69 | return True 70 | 71 | def make_dir(path): 72 | if not os.path.exists(path): 73 | os.makedirs(path) 74 | 75 | def copy_dir(source_item): 76 | path_list = source_item.split("/") 77 | if os.path.isdir(source_item): 78 | destination_item = "/root/cdc/{}".format(path_list[-1]) 79 | make_dir(destination_item) 80 | sub_items = os.listdir(source_item) 81 | for sub_item in sub_items: 82 | full_file_name = os.path.join(source_item, sub_item) 83 | Full_destination_item = os.path.join(destination_item, sub_item) 84 | if os.path.isfile(full_file_name): 85 | shutil.copy(full_file_name, Full_destination_item) 86 | 87 | ###################################### Pytest Functions ##################### 88 | 89 | def pytest_sessionstart(session): 90 | """ Pytest Session Start """ 91 | global session_start_time 92 | session_start_time = datetime.now() 93 | start_time = session_start_time.strftime("%m/%d/%Y, %H:%M:%S") 94 | logger.info(f"Test Session Start Time : {start_time}") 95 | 96 | assert trident_test_init() == True 97 | 98 | @pytest.hookimpl(tryfirst=False, hookwrapper=True) 99 | def pytest_runtest_protocol(item, nextitem): 100 | global method_name 101 | driver = item.nodeid.split("::")[0] 102 | method = item.nodeid.split("::")[1] 103 | method_name = method 104 | try: 105 | issuekey = mapping_dict[method] 106 | except: 107 | issuekey = "No mapping found" 108 | 109 | seperator = "=" * 20 110 | logger.info(f"{seperator} START OF {method} {seperator}") 111 | 112 | start_time = datetime.now() 113 | logger.info("Start Time : {}".format(start_time.strftime("%m/%d/%Y, %H:%M:%S"))) 114 | 115 | target_ip = login[-1]["ip"] 116 | 117 | if trident_config_data["elk_log_stage"]["enable"] == True: 118 | tags_info(target_ip, method, start_time, driver, issuekey) 119 | 120 | # Update the copy of data dict aka pos config. 121 | pos.data_dict = deepcopy(pos.data_dict_bkp) 122 | 123 | yield 124 | 125 | logger.info(f"{seperator} END OF {method} {seperator}") 126 | 127 | end_time = datetime.now() 128 | logger.info("End Time : {}".format(end_time.strftime("%m/%d/%Y, %H:%M:%S"))) 129 | execution_time = end_time - start_time 130 | execution_minutes = divmod(execution_time.seconds, 60) 131 | logger.info( 132 | "Execution Time : {} minutes {} seconds".format( 133 | execution_minutes[0], execution_minutes[1] 134 | ) 135 | ) 136 | 137 | pos.cli.clean_cli_history() 138 | 139 | def pytest_runtest_logreport(report): 140 | log_status = "======================== Test Status : {} ========================" 141 | if report.when == "setup": 142 | setup_status = report.outcome 143 | if setup_status == "failed": 144 | logger.info(log_status.format("FAIL")) 145 | elif setup_status == "skipped": 146 | logger.info(log_status.format("SKIP")) 147 | if report.when == "call": 148 | test_status = report.outcome 149 | if test_status == "passed": 150 | logger.info(log_status.format("PASS")) 151 | elif test_status == "failed": 152 | logger.info(log_status.format("FAIL")) 153 | 154 | global method_name 155 | if not pos_logs_core_dump(report, method_name): 156 | logger.error("Failed to generate and save the core dump") 157 | 158 | @pytest.hookimpl(tryfirst=True) 159 | def pytest_configure(config): 160 | log_path = logging.get_logpath() 161 | config.option.htmlpath = log_path + "/report.html" 162 | config.option.self_contained_html = True 163 | 164 | def pytest_sessionfinish(session): 165 | session_end_time = datetime.now() 166 | log_path = logging.get_logpath() 167 | logger.info("Test Session End Time : {}".format( 168 | session_end_time.strftime("%m/%d/%Y, %H:%M:%S"))) 169 | 170 | session_time = session_end_time - session_start_time 171 | session_minutes = divmod(session_time.seconds, 60) 172 | logger.info("Total Session Time : {} minutes {} seconds".format( 173 | session_minutes[0], session_minutes[1])) 174 | 175 | logger.info(f"Logs and Html report for executed TCs are present in {log_path}") 176 | copy_dir(log_path) 177 | try: 178 | if pos: 179 | if pos.target_utils.helper.check_pos_exit() == False: 180 | pos.cli.pos_stop(grace_shutdown = False) 181 | pos._clearall_objects() 182 | except NameError: 183 | return "Exiting" 184 | 185 | logger.info("\n") 186 | 187 | def teardown_session(): 188 | logger.info("============= CLEANUP SESSION AFER TEST") 189 | pos.exit_handler(expected=False) 190 | 191 | ###################################### Pytest Fixtures ###################### 192 | 193 | @pytest.fixture(scope="function") 194 | def system_fixture(): 195 | start_time = test_setup_msg() 196 | 197 | # Stop POS if running before test 198 | if pos.target_utils.helper.check_pos_exit() == False: 199 | assert pos.cli.pos_stop(grace_shutdown = False)[0] == True 200 | 201 | yield pos 202 | 203 | test_cleanup_msg(start_time) 204 | # Stop POS if running after test 205 | if pos.target_utils.helper.check_pos_exit() == False: 206 | assert pos.cli.pos_stop(grace_shutdown = False)[0] == True 207 | 208 | 209 | @pytest.fixture(scope="function") 210 | def array_fixture(): 211 | start_time = test_setup_msg() 212 | assert check_pos_and_bringup() == True 213 | 214 | yield pos 215 | 216 | test_cleanup_msg(start_time) 217 | is_pos_running = False 218 | if pos.target_utils.helper.check_pos_exit() == False: 219 | is_pos_running = True 220 | 221 | assert client_teardown(is_pos_running) == True 222 | assert target_teardown(is_pos_running) == True 223 | 224 | 225 | @pytest.fixture(scope="function") 226 | def volume_fixture(): 227 | start_time = test_setup_msg() 228 | assert check_pos_and_bringup() == True 229 | assert pos.target_utils.bringup_array(data_dict=pos.data_dict) == True 230 | 231 | yield pos 232 | 233 | test_cleanup_msg(start_time) 234 | is_pos_running = False 235 | if pos.target_utils.helper.check_pos_exit() == False: 236 | is_pos_running = True 237 | 238 | assert client_teardown(is_pos_running) == True 239 | assert target_teardown(is_pos_running) == True 240 | 241 | ###################################### Functions ############################ 242 | def test_setup_msg(): 243 | logger.info("========== SETUP BEFORE TEST =========") 244 | start_time = datetime.now() 245 | logger.info("Test Session Start Time : {}".format( 246 | start_time.strftime("%m/%d/%Y, %H:%M:%S"))) 247 | 248 | return start_time 249 | 250 | def test_cleanup_msg(start_time): 251 | logger.info("========== CLEANUP AFTER TEST =========") 252 | end_time = datetime.now() 253 | logger.info("Test Session End Time : {}".format( 254 | end_time.strftime("%m/%d/%Y, %H:%M:%S"))) 255 | 256 | session_time = end_time - start_time 257 | session_minutes = divmod(session_time.seconds, 60) 258 | logger.info("Total Test Session Time : {} minutes {} seconds".format( 259 | session_minutes[0], session_minutes[1])) 260 | 261 | 262 | def check_pos_and_bringup(): 263 | try: 264 | pos.data_dict["system"]["phase"] = "true" 265 | pos.data_dict["subsystem"]["phase"] = "true" 266 | pos.data_dict["device"]["phase"] = "true" 267 | if pos.target_utils.helper.check_pos_exit() == True: 268 | assert pos.target_utils.bringup_system(data_dict=pos.data_dict) == True 269 | assert pos.target_utils.bringup_device(data_dict=pos.data_dict) == True 270 | assert pos.target_utils.bringup_subsystem(data_dict=pos.data_dict) == True 271 | assert pos.target_utils.get_subsystems_list() == True 272 | array_cleanup() 273 | else: 274 | logger.info("pos is already running") 275 | # Plese Ignore the Status of following commnd 276 | pos.cli.subsystem_create_transport() 277 | logger.info("Please ignore the status of above command") 278 | assert pos.cli.device_scan()[0] == True 279 | assert pos.cli.device_list()[0] == True 280 | if (len(pos.cli.system_buffer_devs) == 0 and 281 | len(pos.cli.array_buffer_devs) == 0) : 282 | assert pos.target_utils.bringup_device(data_dict=pos.data_dict) == True 283 | 284 | assert pos.target_utils.get_subsystems_list() == True 285 | if(len(pos.target_utils.ss_temp_list) == 0): 286 | assert pos.target_utils.bringup_subsystem(data_dict=pos.data_dict) == True 287 | assert pos.target_utils.get_subsystems_list() == True 288 | array_cleanup() 289 | 290 | return True 291 | except Exception as e: 292 | logger.error(e) 293 | traceback.print_exc() 294 | assert pos.cli.pos_stop(grace_shutdown=False)[0] == True 295 | return False 296 | 297 | def client_teardown(is_pos_running: bool) -> bool: 298 | """ Teardown function reset client """ 299 | pos.client.reset(pos_run_status=is_pos_running) 300 | return True 301 | 302 | def target_teardown(is_pos_running: bool): 303 | """ Teardown function to reset target """ 304 | assert pos.target_utils.helper.check_system_memory() == True 305 | if is_pos_running: 306 | try: 307 | array_cleanup() 308 | except Exception as e: 309 | logger.error(f"Array cleanup failed due to {e}") 310 | # Stop POS as array cleanup failed 311 | assert pos.cli.pos_stop(grace_shutdown=False)[0] == True 312 | assert pos.target_utils.re_scan() == True 313 | return True 314 | 315 | def array_cleanup(): 316 | assert pos.cli.array_list()[0] == True 317 | for array in list(pos.cli.array_dict.keys()): 318 | assert pos.cli.array_info(array_name=array)[0] == True 319 | if pos.cli.array_dict[array].lower() == "mounted": 320 | assert pos.cli.array_unmount(array_name=array)[0] == True 321 | assert pos.cli.array_delete(array_name=array)[0] == True 322 | 323 | assert pos.cli.devel_resetmbr()[0] == True 324 | 325 | def pos_logs_core_dump(report, issuekey): 326 | time_stamp = datetime.now().strftime('%Y%m%d_%H%M%S') 327 | unique_key = f"{issuekey}_{time_stamp}" 328 | if (report.when == 'call' and report.outcome == 'failed'): 329 | #TODO update pos path 330 | 331 | if trident_config_data["dump_pos_core"]["enable"] == True: 332 | assert pos.target_utils.copy_core(unique_key) == True 333 | 334 | if trident_config_data["copy_pos_log"]["test_fail"]: 335 | assert pos.target_utils.copy_pos_log(unique_key) == True 336 | 337 | elif (report.when == 'call' and report.outcome == 'passed' and 338 | trident_config_data["copy_pos_log"]["test_pass"] == True): 339 | assert pos.target_utils.copy_pos_log(unique_key) == True 340 | return True 341 | 342 | ###################################### ELK Tags ############################# 343 | 344 | def tags_info(target_ip, method, start_time, driver, issuekey): 345 | logger.info("################### Start Tag - Test Info ###################") 346 | logger.info( 347 | "TC Unique ID : {}_{}_{}_{}".format( 348 | str(uuid.uuid4()), 349 | target_ip, 350 | method, 351 | (start_time.strftime("%m_%d_%Y_%H_%M_%S")), 352 | ) 353 | ) 354 | 355 | for key in static_dict["Project Info"]: 356 | logger.info(key + " : " + str(static_dict["Project Info"][key])) 357 | for key in static_dict["Test Cycle Info"]: 358 | logger.info(key + " : " + str(static_dict["Test Cycle Info"][key])) 359 | logger.info("Test Case Driver File Name : " + driver) 360 | logger.info("Test Case Name : " + method) 361 | logger.info("JIRA_TC_ID : " + issuekey) 362 | logger.info("################### End Tag - Test Info #####################") 363 | invent = {} 364 | for item in login: 365 | node = [str(item["ip"]), str(item["username"]), str(item["password"])] 366 | tag = EnvTags(node, item["ip"], item["username"], item["password"]) 367 | out = tag.get_tags() 368 | if out: 369 | logger.info("Tags received for the node : {}".format(node[0])) 370 | invent[item["ip"]] = tag.inv 371 | else: 372 | logger.error("No tags received from the node : {}".format(node[0])) 373 | assert 0 374 | sleep(3) 375 | logger.info("################### Start Tag - System Info #####################") 376 | for key, value in invent.items(): 377 | value.update({"IP": str(key)}) 378 | value.move_to_end("IP", last=False) 379 | logger.info("Test Config :" + str(dict(value))) 380 | logger.info("################### End Tag - System Info #####################") 381 | 382 | -------------------------------------------------------------------------------- /lib/composable/composable_core.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | import time, random, sys 3 | from random import randint 4 | import logger as logger 5 | 6 | # sys.path.insert(0, "/root/poseidon/trident_dev/trident") 7 | logger = logger.get_logger(__name__) 8 | 9 | 10 | class _Data: 11 | def __init__(self, seed): 12 | self.vols = defaultdict(list) 13 | self.subsystem = defaultdict(list) 14 | self.seed = seed 15 | 16 | class _Vol: 17 | def __init__(self, name, size, maxiops, maxbw): 18 | self.name = name 19 | self.size = size 20 | self.maxiops = maxiops 21 | self.maxbw = maxbw 22 | self.attachNqn = None 23 | self.state = "unmount" 24 | 25 | class _Subsystem: 26 | def __init__(self, name, serial, model): 27 | self.name = name 28 | self.serial = serial 29 | self.model = model 30 | self.address = None 31 | self.port = None 32 | self.transport = None 33 | self.state = "disconnect" 34 | 35 | def add_volume(self, basename, maxiops=0, maxbw=0, size=21474836480): 36 | if not basename: 37 | raise AssertionError 38 | if not self.vols.get(basename): 39 | start = 0 40 | else: 41 | last_vol = self.vols[basename][len(self.vols[basename]) - 1].name 42 | start = int(last_vol.split("_")[-1]) + 1 43 | name = basename + "_" + str(self.seed) + "_" + str(start) 44 | tVol = self._Vol(name=name, size=size, maxiops=maxiops, maxbw=maxbw) 45 | self.vols[basename].append(tVol) 46 | return name 47 | 48 | def remove_volume(self, basename): 49 | if not basename: 50 | raise AssertionError 51 | if not self.vols.get(basename): 52 | raise AssertionError 53 | tVol = self.vols[basename][0] 54 | if tVol.state == "mount": 55 | raise AssertionError 56 | del self.vols[basename][0] 57 | return tVol.name 58 | 59 | def get_volume(self, basename, number): 60 | if not basename or not number: 61 | raise AssertionError 62 | if not self.vols.get(basename): 63 | raise AssertionError 64 | vol_list = [] 65 | for idx in range(int(number)): 66 | tVol = self.vols[basename][idx] 67 | vol_list.append(tVol.name) 68 | return vol_list 69 | 70 | def get_all_volumes(self): 71 | vol_list = [] 72 | for vol_key in self.vols: 73 | for vol_entry in self.vols[vol_key]: 74 | vol_list.append(vol_entry) 75 | return vol_list 76 | 77 | def set_volume_state(self, basename, number, nqnname=None, state="unmount"): 78 | if not basename or not number: 79 | raise AssertionError 80 | if not self.vols.get(basename): 81 | raise AssertionError 82 | for idx in range(int(number)): 83 | self.vols[basename][idx].state = state 84 | self.vols[basename][idx].attachNqn = nqnname 85 | 86 | def add_subsystem(self, basename, serial="IBOF00000000000001", model="IBOF_VOLUME"): 87 | if not basename: 88 | raise AssertionError 89 | if not self.subsystem.get(basename): 90 | start = 0 91 | else: 92 | last_subsystem = self.subsystem[basename][ 93 | len(self.subsystem[basename]) - 1 94 | ].name 95 | start = int(last_subsystem.split("_")[-1]) + 1 96 | name = basename + "_" + str(self.seed) + "_" + str(start) 97 | tSubsystem = self._Subsystem(name=name, serial=serial, model=model) 98 | self.subsystem[basename].append(tSubsystem) 99 | return name 100 | 101 | def remove_subsystem(self, basename): 102 | if not basename: 103 | raise AssertionError 104 | if not self.subsystem.get(basename): 105 | raise AssertionError 106 | tSubsystem = self.subsystem[basename][0] 107 | del self.subsystem[basename][0] 108 | return tSubsystem.name 109 | 110 | def get_subsystem(self, basename): 111 | if not basename: 112 | raise AssertionError 113 | if not self.subsystem.get(basename): 114 | raise AssertionError 115 | return self.subsystem[basename][0].name 116 | 117 | def get_all_subsystem(self): 118 | tSubsystem = [] 119 | logger.info(" Inside get_all_subsystem ") 120 | for subsystem_dict in self.subsystem: 121 | logger.info("subsystem_dict {}".format(subsystem_dict)) 122 | for subsystem in self.subsystem[subsystem_dict]: 123 | logger.info("subsystem {}".format(subsystem)) 124 | tSubsystem.append(subsystem) 125 | return tSubsystem 126 | 127 | def set_subsystem_state(self, basename, state="disconnect"): 128 | if not basename: 129 | raise AssertionError 130 | if not self.subsystem.get(basename): 131 | raise AssertionError 132 | self.subsystem[basename][-1].state = state 133 | 134 | def set_all_subsystem_state(self, state): 135 | for subsystem_dict in self.subsystem: 136 | for subsystem in subsystem_dict: 137 | subsystem.state = state 138 | 139 | 140 | def subsystem_module(target, client, data_set, config_dict, action, phase=None): 141 | try: 142 | if not action in ("create", "delete", "connect", "disconnect", "get_name"): 143 | raise AssertionError 144 | if phase == None: 145 | raise AssertionError 146 | 147 | def create(basename): 148 | model_number = config_dict["phase"][0]["volume"]["create"]["basename"] 149 | nqn_name = data_set.add_subsystem(basename, model=model_number) 150 | # nqn_name = data_set.add_subsystem(basename, model=data_set.seed) 151 | # model_number = "{}{}".format("POS_VOLUME", data_set.seed) 152 | # ip = target.get_transport_protocol_ip() 153 | # ip = target.ibof_obj.ssh_obj.hostname 154 | ip = target.target_utils.helper.get_mellanox_interface_ip()[1][0] 155 | # transport_protocol = target.params["transport_protocol"] 156 | port = "1158" 157 | data_set.subsystem[basename][-1].address = ip 158 | data_set.subsystem[basename][-1].port = port 159 | # data_set.subsystem[basename][-1].transport = transport_protocol 160 | assert ( 161 | target.cli.subsystem_create( 162 | nqn_name=nqn_name, ns_count="256", model_name=model_number, serial_number = "POS000000000001" 163 | )[0] 164 | == True 165 | ) 166 | assert target.target_utils.get_subsystems_list() == True 167 | assert ( 168 | target.cli.subsystem_add_listner( 169 | nqn_name=nqn_name, mellanox_interface=ip, port=port 170 | )[0] 171 | == True 172 | ) 173 | 174 | def delete(basename): 175 | nqn_name = data_set.remove_subsystem(basename) 176 | assert target.cli.subsystem_delete(nqn_name=nqn_name)[0] == True 177 | 178 | def connect(basename): 179 | nqn_name = data_set.get_subsystem(basename) 180 | ip = data_set.subsystem[basename][-1].address 181 | # transport_protocol = data_set.subsystem[basename][-1].transport 182 | port = data_set.subsystem[basename][-1].port 183 | assert ( 184 | client.nvme_connect(nqn_name=nqn_name, mellanox_switch_ip=ip, port=port) 185 | == True 186 | ) 187 | data_set.set_subsystem_state(basename=basename, state="connect") 188 | 189 | def disconnect(basename): 190 | nqn_name = data_set.get_subsystem(basename) 191 | logger.info("Disonnecting nqn {}".format(nqn_name)) 192 | assert client.nvme_disconnect(nqn=[nqn_name]) == True 193 | data_set.set_subsystem_state(basename=basename, state="disconnect") 194 | 195 | def get_name(basename): 196 | nqn_name = data_set.get_subsystem(basename) 197 | return nqn_name 198 | 199 | if action == "get_name": 200 | if not config_dict["phase"][phase]["nvmf_subsystem"]["connect"]["valid"]: 201 | return False 202 | subsystem_name = config_dict["phase"][phase]["nvmf_subsystem"]["connect"][ 203 | "basename" 204 | ].split(",") 205 | subnqn_list = [] 206 | for basename in subsystem_name: 207 | subnqn_list.append(get_name(basename)) 208 | return subnqn_list 209 | else: 210 | if not config_dict["phase"][phase]["nvmf_subsystem"][action]["valid"]: 211 | return True 212 | subsystem_name = config_dict["phase"][phase]["nvmf_subsystem"][action][ 213 | "basename" 214 | ].split(",") 215 | for basename in subsystem_name: 216 | if action == "create": 217 | create(basename) 218 | elif action == "delete": 219 | delete(basename) 220 | elif action == "connect": 221 | connect(basename) 222 | elif action == "disconnect": 223 | disconnect(basename) 224 | else: 225 | raise AssertionError 226 | return True 227 | except Exception as e: 228 | logger.error("{} Failed {}".format(__name__, e)) 229 | return False 230 | 231 | 232 | def volume_module(target, data_set, config_dict, action, phase=None): 233 | try: 234 | logger.info( 235 | "Action={}, phase={}, valid_action? {}".format( 236 | action, phase, config_dict["phase"][phase]["volume"][action]["valid"] 237 | ) 238 | ) 239 | if not action in ("unmount", "delete", "create", "mount", "update"): 240 | raise AssertionError 241 | if phase == None: 242 | raise AssertionError 243 | if not config_dict["phase"][phase]["volume"][action]["valid"]: 244 | return True 245 | 246 | assert target.cli.array_list()[0] == True 247 | array_list = list(target.cli.array_dict.keys()) 248 | array_name = array_list[0] 249 | 250 | def create(basename, number): 251 | maxiops = config_dict["phase"][phase]["volume"]["create"]["maxiops"] 252 | maxbw = config_dict["phase"][phase]["volume"]["create"]["maxbw"] 253 | size = config_dict["phase"][phase]["volume"]["create"]["size"] 254 | for idx in range(int(number)): 255 | vol = data_set.add_volume(basename) 256 | assert ( 257 | target.cli.volume_create( 258 | volumename=vol, 259 | size=size, 260 | array_name=array_name, 261 | iops=maxiops, 262 | bw=maxbw, 263 | )[0] 264 | == True 265 | ) 266 | 267 | def delete(basename, number): 268 | for idx in range(int(number)): 269 | vol = data_set.remove_volume(basename) 270 | assert ( 271 | target.cli.volume_delete(volumename=vol, array_name=array_name)[0] 272 | == True 273 | ) 274 | 275 | def mount(basename, number, subsystem): 276 | nqnname = data_set.get_subsystem(subsystem) 277 | vol_list = data_set.get_volume(basename, number) 278 | for vol in vol_list: 279 | assert ( 280 | target.cli.volume_mount( 281 | volumename=vol, array_name=array_name, nqn=nqnname 282 | )[0] 283 | == True 284 | ) 285 | data_set.set_volume_state( 286 | basename=basename, number=number, nqnname=nqnname, state="mount" 287 | ) 288 | 289 | def unmount(basename, number): 290 | vol_list = data_set.get_volume(basename, number) 291 | for vol in vol_list: 292 | assert ( 293 | target.cli.volume_unmount(volumename=vol, array_name=array_name)[0] 294 | == True 295 | ) 296 | data_set.set_volume_state(basename=basename, number=number, state="unmount") 297 | 298 | def update(basename, number): 299 | vol_list = data_set.get_volume(basename, number) 300 | for vol in vol_list: 301 | # both min and max are decided by SRS(v0.10.1) 302 | iops = random.randint(10, 18446744073709551) 303 | bw = random.randint(10, 17592186044415) 304 | assert ( 305 | target.cli.qos_create_volume_policy( 306 | volumename=vol, 307 | arrayname=array_name, 308 | maxiops=iops, 309 | maxbw=bw, 310 | miniops=0, 311 | minbw=0, 312 | )[0] 313 | == True 314 | ) 315 | 316 | vol_basename = config_dict["phase"][phase]["volume"][action]["basename"].split( 317 | "," 318 | ) 319 | vol_number = config_dict["phase"][phase]["volume"][action]["number"].split(",") 320 | for idx, (basename, number) in enumerate(zip(vol_basename, vol_number)): 321 | if action == "mount": 322 | subsystem = config_dict["phase"][phase]["volume"]["mount"][ 323 | "nqnbasename" 324 | ].split(",")[idx] 325 | mount(basename, number, subsystem) 326 | elif action == "unmount": 327 | unmount(basename, number) 328 | elif action == "create": 329 | create(basename, number) 330 | elif action == "delete": 331 | delete(basename, number) 332 | elif action == "update": 333 | update(basename, number) 334 | else: 335 | raise AssertionError 336 | 337 | except Exception as e: 338 | logger.error("{} Failed {}".format(__name__, e)) 339 | return False 340 | return True 341 | 342 | 343 | def npor_recover(target, data_set): 344 | try: 345 | assert target.target_utils.npor_and_save_state() == True 346 | assert target.cli.subsystem_create_transport()[0] == True 347 | 348 | logger.info("Post NPOR get_all_subsystem") 349 | 350 | total_subsystem = data_set.get_all_subsystem() 351 | 352 | logger.info("total_subsystem {}".format(total_subsystem)) 353 | 354 | for subsystem in total_subsystem: 355 | name = subsystem.name 356 | serial = subsystem.serial 357 | model = subsystem.model 358 | ip = subsystem.address 359 | port = subsystem.port 360 | assert ( 361 | target.cli.subsystem_create( 362 | nqn_name=name, ns_count="256", model_name=model, serial_number = "POS000000000001" 363 | )[0] 364 | == True 365 | ) 366 | assert ( 367 | target.cli.subsystem_add_listner( 368 | nqn_name=name, mellanox_interface=ip, port=port 369 | )[0] 370 | == True 371 | ) 372 | 373 | assert target.cli.array_list()[0] == True 374 | array_list = list(target.cli.array_dict.keys()) 375 | array_name = array_list[0] 376 | 377 | vol_list = data_set.get_all_volumes() 378 | for vol in vol_list: 379 | nqn_name = vol.attachNqn 380 | if nqn_name == None: 381 | assert 0 382 | assert ( 383 | target.cli.volume_mount( 384 | volumename=vol.name, array_name=array_name, nqn=nqn_name 385 | )[0] 386 | == True 387 | ) 388 | return True 389 | 390 | except Exception as e: 391 | logger.error("{} Failed {}".format(__name__, e)) 392 | return False 393 | 394 | time.sleep(10) 395 | --------------------------------------------------------------------------------