├── .dockerignore ├── .gitignore ├── AUTHORS ├── Dockerfile ├── Docs ├── index.md └── v2_plans.md ├── INSTALL.md ├── LICENSE ├── Makefile ├── README.md ├── configs-examples ├── default.yaml ├── local_block_device.yml ├── local_hdd.yaml ├── local_vm_ceph.yml ├── logging.yaml ├── openstack_ceph.yaml └── perf_lab.yml ├── fio_binaries ├── fio_artful_x86_64.bz2 ├── fio_bionic_x86_64.bz2 ├── fio_trusty_x86_64.bz2 └── fio_xenial_x86_64.bz2 ├── pylint.rc ├── report_templates ├── base.html ├── index.html ├── main.css ├── report_all.html ├── report_ceph.html ├── report_cinder_iscsi.html ├── report_hdd.html ├── report_iops_vs_lat.html └── report_linearity.html ├── requirements.txt ├── requirements_dev.txt ├── scripts ├── __init__.py ├── assumptions_check.py ├── build_fio_ubuntu.sh ├── config.sh ├── data.py ├── data2.py ├── data_extractor.py ├── data_generator.py ├── disk_io_pp.py ├── fio_tests_configs │ ├── 1.cfg │ ├── 2.cfg │ ├── io_task.cfg │ └── io_task_test.cfg ├── gen_load.sh ├── generate_load.py ├── grafana.py ├── grafana_template.js ├── hdd.fio ├── influx_exporter.py ├── install.sh ├── koder.js ├── perf.py ├── postprocessing │ ├── __init__.py │ ├── bottleneck.py │ ├── io_py_result_processor.py │ └── stat.py ├── prepare.sh ├── qcow2_vdb.txt ├── qcow2_vm.txt ├── qcow_compute.txt ├── raw_vdb.txt ├── raw_vm.txt ├── receiver.py ├── run.py ├── run.sh ├── run_all_tests.sh ├── run_test.sh ├── run_vm.sh ├── sensors_webui.html ├── show_disk_delta.py ├── single_node_test_complete.sh ├── single_node_test_short.sh ├── tests.yaml ├── wally └── webui.py ├── stubs ├── paramiko.pyi ├── psutil.pyi └── yaml.pyi ├── tests ├── __init__.py ├── test_executors.py ├── test_fio_parser.py ├── test_hlstorage.py ├── test_math.py ├── test_rpc.py └── test_ssh.py ├── wally ├── __init__.py ├── __main__.py ├── ceph.py ├── config.py ├── console_report.py ├── data_selectors.py ├── legacy_report.py ├── logger.py ├── main.py ├── openstack.py ├── openstack_api.py ├── plot.py ├── pretty_yaml.py ├── report.py ├── report_profiles.py ├── resources.py ├── result_classes.py ├── result_storage.py ├── run_test.py ├── sensors.py ├── stage.py ├── storage_structure.yaml ├── suits │ ├── __init__.py │ ├── all_suits.py │ ├── io │ │ ├── __init__.py │ │ ├── ceph.cfg │ │ ├── check_distribution.cfg │ │ ├── check_linearity.cfg │ │ ├── cinder_iscsi.cfg │ │ ├── defaults.cfg │ │ ├── defaults_qd.cfg │ │ ├── fio.py │ │ ├── fio_hist.py │ │ ├── fio_job.py │ │ ├── fio_task_parser.py │ │ ├── hdd.cfg │ │ ├── lat_vs_iops.cfg │ │ ├── mixed_hdd.cfg │ │ ├── mixed_ssd.cfg │ │ ├── one_step.cfg │ │ ├── rpc_plugin.py │ │ ├── rpc_plugin.pyi │ │ ├── rrd.cfg │ │ ├── rrd_qd_scan.cfg │ │ ├── rrd_raw.cfg │ │ └── verify.cfg │ ├── itest.py │ ├── job.py │ ├── mysql │ │ ├── __init__.py │ │ ├── prepare.sh │ │ └── run.sh │ ├── omgbench │ │ ├── __init__.py │ │ ├── prepare.sh │ │ └── run.sh │ └── postgres │ │ ├── __init__.py │ │ ├── prepare.sh │ │ └── run.sh ├── test_run_class.py └── utils.py └── web_app ├── __init__.py ├── app.py ├── rest_api.py ├── static ├── script.js └── style.css └── templates ├── base.html ├── index.html ├── lab_header.html ├── lab_main.html ├── table.html └── test.html /.dockerignore: -------------------------------------------------------------------------------- 1 | .git 2 | .cache 3 | .ipynb_checkpoints 4 | .mypy_cache 5 | Docs -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | 5 | # C extensions 6 | *.so 7 | 8 | # Distribution / packaging 9 | .Python 10 | env/ 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | lib/ 17 | lib64/ 18 | parts/ 19 | sdist/ 20 | var/ 21 | *.egg-info/ 22 | .installed.cfg 23 | *.egg 24 | 25 | # PyInstaller 26 | # Usually these files are written by a python script from a template 27 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 28 | *.manifest 29 | *.spec 30 | 31 | # Installer logs 32 | pip-log.txt 33 | pip-delete-this-directory.txt 34 | 35 | # Unit test / coverage reports 36 | htmlcov/ 37 | .tox/ 38 | .coverage 39 | .cache 40 | nosetests.xml 41 | coverage.xml 42 | 43 | # Translations 44 | *.mo 45 | *.pot 46 | 47 | # Django stuff: 48 | *.log 49 | 50 | # Sphinx documentation 51 | docs/_build/ 52 | 53 | # PyBuilder 54 | target/ 55 | .idea/ 56 | 57 | .env/ 58 | .ipynb_checkpoints/ 59 | .project 60 | .pydevproject 61 | *.ipynb 62 | *.o 63 | .mypy_cache/ 64 | -------------------------------------------------------------------------------- /AUTHORS: -------------------------------------------------------------------------------- 1 | Alyona Kiseleva 2 | Dmitry Yatsushkevich 3 | Kostiantyn Danylov aka koder 4 | Peter Lomakin 5 | Ved-vampir 6 | Yulia Portnova 7 | gstepanov 8 | koder aka kdanilov 9 | stgleb 10 | yportnova 11 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | # docker build -t ubuntu1604py36 2 | FROM ubuntu:18.04 3 | 4 | LABEL maintainer="Kostiantyn Danylov " version="2.0" 5 | 6 | RUN apt-get update && \ 7 | DEBIAN_FRONTEND=noninteractive apt upgrade -yq && \ 8 | DEBIAN_FRONTEND=noninteractive apt install -yq vim git tmux build-essential \ 9 | python3 python3-dev python3-pip python3-venv python3-tk wget 10 | 11 | COPY . /opt/wally 12 | 13 | # git clone https://github.com/Mirantis/disk_perf_test_tool.git /opt/disk_perf_tool 14 | 15 | RUN git clone https://github.com/koder-ua/cephlib.git /opt/cephlib && \ 16 | git clone https://github.com/koder-ua/xmlbuilder3.git /opt/xmlbuilder3 && \ 17 | git clone https://github.com/koder-ua/agent.git /opt/agent && \ 18 | python3.6 -m pip install pip --upgrade && \ 19 | cd /opt/wally && \ 20 | python3.6 -m pip install wheel && \ 21 | python3.6 -m pip install -r requirements.txt && \ 22 | python3.6 -m pip install -U cryptography && \ 23 | ln -s /opt/wally/scripts/wally /usr/bin && \ 24 | chmod a+x /opt/wally/scripts/wally 25 | 26 | RUN apt purge -y python3-dev build-essential && apt -y autoremove 27 | 28 | ENV PYTHONPATH /opt/cephlib:/opt/xmlbuilder3:/opt/agent:/opt/wally 29 | 30 | CMD ["/bin/bash"] 31 | -------------------------------------------------------------------------------- /INSTALL.md: -------------------------------------------------------------------------------- 1 | Installation 2 | ============ 3 | 4 | Requirements: 5 | * python 3.6+ (all packages: python3 python3-dev python3-pip python3-venv python3-tk) 6 | 7 | Manual: 8 | ------- 9 | 10 | git clone https://github.com/Mirantis/disk_perf_test_tool.git 11 | git clone https://github.com/koder-ua/cephlib.git 12 | git clone https://github.com/koder-ua/xmlbuilder3.git 13 | git clone https://github.com/koder-ua/agent.git 14 | cd disk_perf_test_tool 15 | python3.6 -m pip install wheel 16 | python3.6 -m pip install -r requirements.txt 17 | python3.6 -m wally --help 18 | 19 | 20 | Docker: 21 | ------- 22 | 23 | Build: 24 | 25 | git clone https://github.com/Mirantis/disk_perf_test_tool.git 26 | 27 | docker build -t /wally . 28 | OR (to get smaller image) 29 | docker build --squash -t /wally . 30 | 31 | Pull existing container: 32 | docker pull wally:v2 33 | 34 | To run container use: 35 | 36 | docker run -ti /wally /bin/bash 37 | wally --help 38 | 39 | -------------------------------------------------------------------------------- /Makefile: -------------------------------------------------------------------------------- 1 | .PHONY: mypy pylint pylint_e docker 2 | 3 | ALL_FILES=$(shell find wally/ -type f -name '*.py') 4 | STUBS="stubs:../venvs/wally/lib/python3.5/site-packages/" 5 | 6 | mypy: 7 | MYPYPATH=${STUBS} python -m mypy --ignore-missing-imports --follow-imports=skip ${ALL_FILES} 8 | 9 | PYLINT_FMT=--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg} 10 | 11 | pylint: 12 | python -m pylint '${PYLINT_FMT}' --rcfile=pylint.rc ${ALL_FILES} 13 | 14 | pylint_e: 15 | python3 -m pylint -E '${PYLINT_FMT}' --rcfile=pylint.rc ${ALL_FILES} 16 | 17 | docker: 18 | docker build --squash -t wally:v2 . 19 | docker tag wally:v2 ${DOCKER_ID_USER}/wally:v2 20 | 21 | docker_push: 22 | docker push ${DOCKER_ID_USER}/wally:v2 23 | 24 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | Block storage devices tests tool. See wiki for details 2 | Look into INSTALL.md for installation steps. 3 | 4 | Look into config-example for examples of config file. 5 | Copy example in same folder and replace ${VAR} with appropriate value 6 | 7 | 8 | 9 | | | Size, TiB | $/TiB | IOPS WR | IOPS RD | BW WR MB| BW RD MB| Lat ms | 10 | |:--------:|----------:|:-----:|:-------:|:-------:|:-------:|:-------:|:------:| 11 | | SATA HDD | 10 | 25-50 | 50-150 | 50-150 | 100-200 | 100-200 | 3-7 | 12 | | SSD | 2 | 200 | 100-5k | 1k-20k | 50-400 | 200-500 | 0.1-1 | 13 | | NVME | 2 | 400 | 400-20k | 2k-50k | 200-1.5k| 500-2k | 0.01-1 | -------------------------------------------------------------------------------- /configs-examples/default.yaml: -------------------------------------------------------------------------------- 1 | # ------------------------------------ CONFIGS ------------------------------------------------------------------- 2 | #openstack: 3 | # skip_preparation: false 4 | # openrc: /home/koder/workspace/scale_openrc 5 | # insecure: true 6 | # openrc: ENV 7 | # openrc: 8 | # OS_USERNAME: USER 9 | # OS_PASSWORD: PASSWD 10 | # OS_TENANT_NAME: KEY_FILE 11 | # OS_AUTH_URL: URL 12 | # OS_INSECURE: OPTIONAL 13 | # vms: 14 | # - "USERNAME@VM_NAME_PREFIX" 15 | # 16 | #ceph: 17 | # cluster: ceph << Optional 18 | # config: PATH << Optional 19 | # keyfile: PATH << Optional 20 | # key: KEY << not supported for now 21 | # root_node: NODE_NAME 22 | # 23 | # 24 | # nodes: - map of explicit nodes URLS to node roles 25 | # in format 26 | # USERNAME[:PASSWD]@VM_NAME_PREFIX[::KEY_FILE] or localhost: role1, role2, role3.... 27 | 28 | collect_info: true 29 | var_dir_root: /tmp/perf_tests 30 | settings_dir: ~/.wally 31 | connect_timeout: 30 32 | max_time_diff_ms: 5000 33 | rpc_log_level: DEBUG 34 | include: logging.yaml 35 | default_test_local_folder: "/tmp/wally_{name}_{uuid}" 36 | keep_raw_files: false # don't change this value, keep is not supported atm 37 | download_rpc_logs: true 38 | 39 | vm_configs: 40 | keypair_file_private: wally_vm_key_perf3.pem 41 | keypair_file_public: wally_vm_key_perf3.pub 42 | keypair_name: wally_vm_key 43 | 44 | wally_1024: 45 | image: 46 | name: wally_ubuntu 47 | user: ubuntu 48 | url: https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img 49 | 50 | flavor: 51 | name: wally_1024 52 | hdd_size: 100 53 | ram_size: 1024 54 | cpu_count: 2 55 | 56 | vol_sz: 100 57 | name_templ: wally-{group}-{id} 58 | aa_group_name: wally-aa-{0} 59 | security_group: wally_ssh_to_everyone 60 | 61 | 62 | ceph_opts: nodeep-scrub, noscrub 63 | #----------------------------------------- STEPS ------------------------------------------------------------------ 64 | # discover: a,b,c,... - comma separated list of clusters to discover. May be ommited 65 | # List may contains - ceph, openstack, fuel 66 | # Also - ignore_errors - mean to ignore errors during dicovery 67 | # metadata - mean to discrover cluster metadata only, but not nodes 68 | # spawn: ... 69 | # connect: ... 70 | # sensors: ... 71 | # test: ... 72 | 73 | sensors: 74 | online: true 75 | roles_mapping: 76 | testnode: system-cpu, block-io, net-io 77 | ceph-osd: 78 | system-cpu: "*" 79 | block-io: "*" 80 | net-io: "*" 81 | ceph: 82 | sources: [historic] 83 | osds: all 84 | compute: 85 | system-cpu: "*" 86 | block-io: "sd*" 87 | net-io: "*" 88 | cluster: ceph-pools-io, ceph-pgs-io 89 | 90 | #---------------------------------- TEST PROFILES -------------------------------------------------------------------- 91 | profiles: 92 | spawn: 93 | OS_1_to_1: 94 | openstack: 95 | count: "=1" 96 | cfg_name: wally_1024 97 | network_zone_name: net04 98 | flt_ip_pool: net04_ext 99 | skip_preparation: true 100 | 101 | test: 102 | ceph_vdb: 103 | - io: 104 | load: ceph 105 | params: 106 | FILENAME: /dev/vdb 107 | FILESIZE: AUTO 108 | 109 | cinder_iscsi_vdb: 110 | - io: 111 | load: cinder_iscsi 112 | params: 113 | FILENAME: /dev/vdb 114 | FILESIZE: AUTO 115 | 116 | nova_io: 117 | - io: 118 | load: hdd 119 | params: 120 | FILENAME: /opt/test.bin 121 | FILESIZE: AUTO 122 | 123 | openstack_ceph: OS_1_to_1 + ceph_vdb 124 | openstack_cinder: OS_1_to_1 + ceph_iscsi_vdb 125 | openstack_nova: OS_1_to_1 + nova_io 126 | 127 | 128 | default_dev_roles: 129 | - role=testnode: 130 | - type=cpu: client_cpu 131 | - type=block: client_disk 132 | - type=eth: client_net 133 | - type=weth: client_net 134 | 135 | - role=storage: 136 | - type=cpu: storage_cpu 137 | - type=block: storage_disk 138 | - type=eth: storage_net 139 | - type=weth: storage_net 140 | 141 | - role=compute: 142 | - type=cpu: compute_cpu 143 | - type=block: compute_disk 144 | - type=eth: compute_net 145 | - type=weth: compute_net 146 | -------------------------------------------------------------------------------- /configs-examples/local_block_device.yml: -------------------------------------------------------------------------------- 1 | include: default.yaml 2 | run_sensors: false 3 | results_storage: {STORAGE_FOLDER} 4 | 5 | nodes: 6 | localhost: testnode 7 | 8 | tests: 9 | - fio: 10 | load: hdd 11 | params: 12 | FILENAME: {STORAGE_DEV_OR_FILE_NAME} 13 | FILESIZE: {STORAGE_OR_FILE_SIZE} 14 | -------------------------------------------------------------------------------- /configs-examples/local_hdd.yaml: -------------------------------------------------------------------------------- 1 | include: default.yaml 2 | run_sensors: true 3 | results_storage: /var/wally_results 4 | 5 | nodes: 6 | localhost: testnode 7 | 8 | tests: 9 | - fio: 10 | load: verify 11 | params: 12 | FILENAME: /dev/rbd0 13 | FILESIZE: 4G 14 | -------------------------------------------------------------------------------- /configs-examples/local_vm_ceph.yml: -------------------------------------------------------------------------------- 1 | include: default.yaml 2 | run_sensors: true 3 | results_storage: /var/wally_results 4 | discover: ceph 5 | 6 | ceph: 7 | root_node: ceph-client 8 | 9 | sleep: 0 10 | 11 | nodes: 12 | {USER}@ceph-client: testnode 13 | 14 | tests: 15 | - fio: 16 | load: ceph 17 | params: 18 | FILENAME: /dev/rbd0 19 | FILESIZE: {SIZE} 20 | RUNTIME: 180 21 | -------------------------------------------------------------------------------- /configs-examples/logging.yaml: -------------------------------------------------------------------------------- 1 | logging: 2 | version: 1 3 | disable_existing_loggers: true 4 | formatters: 5 | simple: 6 | format: "%(asctime)s - %(levelname)s - %(message)s" 7 | datefmt: "%H:%M:%S" 8 | handlers: 9 | console: 10 | level: INFO 11 | class: logging.StreamHandler 12 | formatter: simple 13 | stream: "ext://sys.stdout" 14 | log_file: 15 | level: DEBUG 16 | class: logging.FileHandler 17 | formatter: simple 18 | filename: null 19 | loggers: 20 | cmd: {"level": "DEBUG", "handlers": ["console", "log_file"]} 21 | storage: {"level": "DEBUG", "handlers": ["console", "log_file"]} 22 | rpc: {"level": "DEBUG", "handlers": ["console", "log_file"]} 23 | cephlib: {"level": "DEBUG", "handlers": ["console", "log_file"]} 24 | collect: {"level": "DEBUG", "handlers": ["console", "log_file"]} 25 | agent: {"level": "DEBUG", "handlers": ["console", "log_file"]} 26 | wally: {"level": "DEBUG", "handlers": ["console", "log_file"]} 27 | -------------------------------------------------------------------------------- /configs-examples/openstack_ceph.yaml: -------------------------------------------------------------------------------- 1 | include: default.yaml 2 | discover: openstack,fuel_openrc_only 3 | run_sensors: true 4 | results_storage: /var/wally_results 5 | 6 | fuel: 7 | url: http://FUEL_MASTER_EXTERNAL_IP:FUEL_MASTER_EXTERNAL_IP_DEFAULT_8000/ 8 | creds: FUEL_KS_USER:FUEL_KS_PASSWD@FUEL_KS_TENANT 9 | ssh_creds: USER:PASSWD 10 | openstack_env: ENV_NAME 11 | 12 | kubernetes: null 13 | lxd: null 14 | docker_swarm: null 15 | 16 | openstack: 17 | OPENRC: /home/koder/workspace/scale_openrc 18 | auth: USER:PASSWD:KEY_FILE 19 | 20 | openstack_reuse: 21 | VM: ["ubuntu@wally-phytographic-sharla"] 22 | test: ["some_testname"] 23 | 24 | test_profile: openstack_ceph 25 | -------------------------------------------------------------------------------- /configs-examples/perf_lab.yml: -------------------------------------------------------------------------------- 1 | include: default.yaml 2 | run_sensors: true 3 | results_storage: /var/wally_results 4 | 5 | discover: ceph 6 | ceph: 7 | root_node: root@cz7625 8 | ip_remap: 9 | 10.8.0.4: 172.16.164.71 10 | 10.8.0.3: 172.16.164.72 11 | 10.8.0.2: 172.16.164.73 12 | 10.8.0.5: 172.16.164.74 13 | 10.8.0.6: 172.16.164.75 14 | 10.8.0.7: 172.16.164.76 15 | 10.8.0.8: 172.16.164.77 16 | 10.8.0.9: 172.16.164.78 17 | 18 | nodes: 19 | root@cz7625: testnode 20 | root@cz7626: testnode 21 | root@cz7627: testnode 22 | 23 | # sleep: 5 24 | 25 | tests: 26 | - fio: 27 | load: verify 28 | params: 29 | FILENAME: /dev/rbd0 30 | FILESIZE: 700G 31 | RUNTIME: 600 32 | 33 | 34 | dev_roles: 35 | - role=testnode: 36 | - rbd0: client_disk 37 | - role=ceph-osd: 38 | - sd[g-z]: [storage_disk, ceph_storage] 39 | - sd[c-f]: [storage_disk, ceph_journal] 40 | - role=compute: 41 | - type=hdd: compute_disk 42 | - type=eth: compute_net 43 | -------------------------------------------------------------------------------- /fio_binaries/fio_artful_x86_64.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/fio_binaries/fio_artful_x86_64.bz2 -------------------------------------------------------------------------------- /fio_binaries/fio_bionic_x86_64.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/fio_binaries/fio_bionic_x86_64.bz2 -------------------------------------------------------------------------------- /fio_binaries/fio_trusty_x86_64.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/fio_binaries/fio_trusty_x86_64.bz2 -------------------------------------------------------------------------------- /fio_binaries/fio_xenial_x86_64.bz2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/fio_binaries/fio_xenial_x86_64.bz2 -------------------------------------------------------------------------------- /report_templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Performance Report 5 | 7 | 8 | 9 | 12 | 13 | 23 | 24 |
25 |
26 |
27 |
28 | {% block content %}{% endblock %} 29 |
30 |
31 |
32 |
33 | 34 | -------------------------------------------------------------------------------- /report_templates/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | Report 7 | 8 | 9 | 10 | 11 | 12 | 13 | 24 |
25 |
26 |
27 | {{{content}}} 28 |
29 |
30 |
31 | 32 | 33 | 34 | 35 | -------------------------------------------------------------------------------- /report_templates/main.css: -------------------------------------------------------------------------------- 1 | @import url('//maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css'); 2 | 3 | html { 4 | font-size: 14px; 5 | min-height: 100%; 6 | position: relative; 7 | } 8 | 9 | body { 10 | background: #ffffff; 11 | font-family: 'Open Sans', sans-serif; 12 | font-size: 14px; 13 | overflow-y: scroll; 14 | height: 100%; 15 | min-width: 800px; 16 | } 17 | 18 | a:focus { 19 | outline: none; 20 | } 21 | 22 | h1, h2, h3, h4, h5 { 23 | font-weight: 100; 24 | } 25 | 26 | .navigation { 27 | width: 200px; 28 | display: block; 29 | background-color: #5E6D70; 30 | position: absolute; 31 | height: 100%; 32 | top: 0px; 33 | left: 0px; 34 | bottom: 0; 35 | right: 0; 36 | z-index: 99; 37 | } 38 | 39 | .container-fluid { 40 | padding-left: 200px; 41 | } 42 | 43 | .row { 44 | margin: 0; 45 | padding: 0; 46 | position: relative; 47 | } 48 | 49 | .sidebar { 50 | clear: both; 51 | position: fixed; 52 | } 53 | 54 | .panel { 55 | background-color: #5E6D70; 56 | border: 1px solid transparent; 57 | border-radius: 0; 58 | box-shadow: 0 0 0 rgba(0, 0, 0, 0.05); 59 | margin-bottom: 20px; 60 | } 61 | 62 | .list-group { 63 | margin-top: 20px; 64 | } 65 | 66 | .nav-group { 67 | display: block; 68 | padding: 10px 15px; 69 | position: relative; 70 | background-color: #424E4F; 71 | text-transform: uppercase; 72 | font-weight: 500; 73 | color: #ffffff; 74 | margin: auto; 75 | max-width: 300px; 76 | text-decoration: none; 77 | } 78 | .nav-group:hover, .nav-group:focus { 79 | background-color: #424E4F; 80 | color: #ffffff; 81 | text-decoration: none; 82 | border: none; 83 | } 84 | 85 | .nav-group-item { 86 | display: block; 87 | color: #ffffff; 88 | padding: 10px 15px; 89 | position: relative; 90 | text-decoration: none; 91 | font-size: 13px; 92 | padding-left: 20px; 93 | font-weight: 300; 94 | } 95 | 96 | .nav-group-item:hover { 97 | color: #b4d0d6; 98 | text-decoration: none; 99 | } 100 | 101 | 102 | #content1 { 103 | display: none; 104 | clear: both; 105 | } 106 | 107 | #content1:target { 108 | display: block; 109 | } 110 | 111 | #content2 { 112 | display: none; 113 | clear: both; 114 | } 115 | 116 | #content2:target { 117 | display: block; 118 | } -------------------------------------------------------------------------------- /report_templates/report_all.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | {% block content %} 3 | 4 | {% if ceph_summary is not None or OS_summary is not None %} 5 |

Summary

6 | 7 | {% if ceph_summary is not None %} 8 | 9 | 10 | 11 | 12 | {% endif OS_summary is not None %} 13 | 14 | {% endif %} 15 |
OSD count{{ceph_summary.osd_count}}Total Ceph disks count{{ceph_summary.OSD_hdd_count}}
Compute count{{OS_summary.compute_count}}
16 | {% endif %} 17 | 18 | {% if perf_summary is not None %} 19 |
20 |

Random direct performance,
4KiB blocks

21 | {% make_table(2, 'style="width: auto;" class="table table-bordered table-striped"', 22 | "Operation", "IOPS", 23 | Read, {{perf_summary.direct_iops_r_max[0]}} ~ {{perf_summary.direct_iops_r_max[1]}}%, 24 | Write, {{perf_summary.direct_iops_w_max[0]}} ~ {{perf_summary.direct_iops_w_max[1]}}%) %} 25 |
           26 |

Random direct performance,
16MiB blocks

27 | {% make_table(2, 'style="width: auto;" class="table table-bordered table-striped"', 28 | "Operation", "BW MiBps", 29 | Read, {{perf_summary.bw_read_max[0]}} ~ {{perf_summary.bw_read_max[1]}}%, 30 | Write, {{perf_summary.bw_write_max[0]}} ~ {{perf_summary.bw_write_max[1]}}%) %} 31 |
           32 |

Maximal sync random write IOPS
for given latency, 4KiB

33 | {% make_table(2, 'style="width: auto;" class="table table-bordered table-striped">', 34 | "Latency ms", "IOPS", 35 | 10, {{perf_summary.rws4k_10ms}}, 36 | 30, {{perf_summary.rws4k_30ms}}, 37 | 100, {{perf_summary.rws4k_100ms}}) %} 38 |
39 |
40 | 41 | {% make_table_nh(2, "", 42 | {{perf_summary.rand_read_iops}}, {{perf_summary.rand_write_iops}}, 43 | {{perf_summary.rand_read_bw}}, {{perf_summary.rand_write_bw}}) %} 44 | {% endif %} 45 | 46 | {% endblock %} 47 | -------------------------------------------------------------------------------- /report_templates/report_ceph.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Report 5 | 7 | 8 | 9 | 10 | 14 | 15 | 25 | 26 |
27 | 28 |
29 |
30 |
31 | 32 |

Summary

33 | 34 | 35 | 36 | 37 | 38 | 39 |
Compute countcomputesOSD countOSD countTotal Ceph disks countOSD_hdd_count
40 | 41 |
42 |

Random direct performance,
4KiB blocks

43 | 44 | 45 | 46 | 47 | 48 | 49 | 50 | 51 | 52 | 53 | 54 | 55 | 56 |
OperationIOPS +- conf% ~ dev%
Read
{direct_iops_r_max[0]} +- {direct_iops_r_max[1]} ~ {direct_iops_r_max[2]}
Write
{direct_iops_w_max[0]} +- {direct_iops_w_max[1]} ~ {direct_iops_w_max[2]}
57 |
           58 |

Random direct performance,
16MiB blocks

59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 | 70 | 71 | 72 |
OperationBW MiBps +- conf% ~ dev%
Read
{bw_read_max[0]} +- {bw_read_max[1]} ~ {bw_read_max[2]}
Write
{bw_write_max[0]} +- {bw_write_max[1]} ~ {bw_write_max[2]}
73 |
           74 |

Maximal sync random write IOPS
for given latency, 4KiB

75 | 76 | 77 | 78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | 88 | 89 | 90 | 91 | 92 |
Latency msIOPS
10
{rws4k_10ms}
30
{rws4k_30ms}
100
{rws4k_100ms}
93 |
94 |
95 |

96 | 97 | 98 | 99 | 100 | 101 | 102 |
{rand_read_4k}{rand_write_4k}
{rand_read_16m}{rand_write_16m}
103 |
104 | 105 |
106 |
107 | 127 | 128 | 129 | 130 | -------------------------------------------------------------------------------- /report_templates/report_cinder_iscsi.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Report 5 | 7 | 8 | 9 | 10 | 14 |
15 |
16 |
17 |
18 |
19 |

Random direct performance,
blocks

20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 |
OperationIOPS
Read 4KiB
{direct_iops_r_max[0]} ~ {direct_iops_r_max[1]}%
Write 4KiB
{direct_iops_w_max[0]} ~ {direct_iops_w_max[1]}%
34 |
           35 |

Sequenced direct performance,
1MiB blocks

36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 |
OperationBW MiBps
Read
{bw_read_max[0]} ~ {bw_read_max[1]}%
Write
{bw_write_max[0]} ~ {bw_write_max[1]}%
50 |
           51 |

Maximal sync random write IOPS
for given latency, 4KiB

52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 |
Latency msIOPS
10
{rws4k_10ms}
30
{rws4k_30ms}
100
{rws4k_100ms}
70 |
71 |
72 |
73 |
74 |
75 |
{rand_read_4k}
76 |
{rand_write_4k}
77 |
78 |
79 | 80 | 81 | -------------------------------------------------------------------------------- /report_templates/report_hdd.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Report 5 | 7 | 8 | 9 | 10 | 14 |
15 |
16 |
17 |
18 |
19 |

Random direct performance,
4KiB blocks

20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 |
OperationIOPS
Read
{direct_iops_r_max[0]} ~ {direct_iops_r_max[1]}%
Write
{direct_iops_w_max[0]} ~ {direct_iops_w_max[1]}%
34 |
           35 |

Sequenced direct performance,
1MiB blocks

36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 48 | 49 |
OperationBW MiBps
Read
{bw_read_max[0]} ~ {bw_read_max[1]}%
Write
{bw_write_max[0]} ~ {bw_write_max[1]}%
50 |
           51 |

Maximal sync random write IOPS
for given latency, 4KiB

52 | 53 | 54 | 55 | 56 | 57 | 58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | 66 | 67 | 68 | 69 |
Latency msIOPS
10
{rws4k_10ms}
30
{rws4k_30ms}
100
{rws4k_100ms}
70 |
71 |
72 |
73 |
74 |
75 |
{rand_read_4k}
76 |
{rand_write_4k}
77 |
78 | 98 |
99 | 100 | 101 | -------------------------------------------------------------------------------- /report_templates/report_iops_vs_lat.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Report 5 | 7 | 8 | 9 | 10 | 13 |
14 | 15 |
16 |
17 |
18 |

Latency vs IOPS vs requested IOPS, {oper_descr}


19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /report_templates/report_linearity.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | Report 5 | 7 | 8 | 9 | 10 | 13 |
14 | 15 |
16 |
17 |
18 |

{descr[oper_descr]} VM_COUNT:{descr[vm_count]} Thread per vm:{descr[concurence]}


19 |
{iops_vs_lat}{iops_vs_requested}
20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | decorator 2 | ecdsa 3 | futures 4 | ipython 5 | iso8601 6 | matplotlib 7 | netaddr 8 | numpy 9 | oktest 10 | paramiko 11 | petname 12 | prest 13 | prettytable 14 | psutil 15 | psutil 16 | pycrypto 17 | pytest 18 | python-cinderclient 19 | python-glanceclient 20 | python-keystoneclient 21 | python-novaclient 22 | PyYAML 23 | requests 24 | scipy 25 | seaborn 26 | simplejson 27 | statsmodels 28 | texttable -------------------------------------------------------------------------------- /requirements_dev.txt: -------------------------------------------------------------------------------- 1 | mypy 2 | pylint 3 | -------------------------------------------------------------------------------- /scripts/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/scripts/__init__.py -------------------------------------------------------------------------------- /scripts/assumptions_check.py: -------------------------------------------------------------------------------- 1 | import sys 2 | 3 | import texttable as TT 4 | 5 | import numpy as np 6 | import matplotlib.pyplot as plt 7 | from numpy.polynomial.chebyshev import chebfit, chebval 8 | 9 | from .io_results_loader import load_data, filter_data 10 | from .statistic import approximate_line, difference 11 | 12 | 13 | def linearity_table(data, types, vals): 14 | """ create table by pyplot with diferences 15 | between original and approximated 16 | vals - values to make line""" 17 | fields = 'blocksize_b', 'iops_mediana' 18 | for tp in types: 19 | filtered_data = filter_data('linearity_test_' + tp, fields) 20 | # all values 21 | x = [] 22 | y = [] 23 | # values to make line 24 | ax = [] 25 | ay = [] 26 | 27 | for sz, med in sorted(filtered_data(data)): 28 | iotime_ms = 1000. // med 29 | x.append(sz / 1024.0) 30 | y.append(iotime_ms) 31 | if sz in vals: 32 | ax.append(sz / 1024.0) 33 | ay.append(iotime_ms) 34 | 35 | ynew = approximate_line(ax, ay, x, True) 36 | 37 | dif, _, _ = difference(y, ynew) 38 | table_data = [] 39 | for i, d in zip(x, dif): 40 | row = ["{0:.1f}".format(i), "{0:.1f}".format(d[0]), "{0:.0f}".format(d[1]*100)] 41 | table_data.append(row) 42 | 43 | tab = TT.Texttable() 44 | tab.set_deco(tab.VLINES) 45 | 46 | header = ["BlockSize, kB", "Absolute difference (ms)", "Relative difference (%)"] 47 | tab.add_row(header) 48 | tab.header = header 49 | 50 | for row in table_data: 51 | tab.add_row(row) 52 | 53 | # uncomment to get table in pretty pictures :) 54 | # colLabels = ("BlockSize, kB", "Absolute difference (ms)", "Relative difference (%)") 55 | # fig = plt.figure() 56 | # ax = fig.add_subplot(111) 57 | # ax.axis('off') 58 | # #do the table 59 | # the_table = ax.table(cellText=table_data, 60 | # colLabels=colLabels, 61 | # loc='center') 62 | # plt.savefig(tp+".png") 63 | 64 | 65 | def th_plot(data, tt): 66 | fields = 'concurence', 'iops_mediana', 'lat_mediana' 67 | conc_4k = filter_data('concurrence_test_' + tt, fields, blocksize='4k') 68 | filtered_data = sorted(list(conc_4k(data))) 69 | 70 | x, iops, lat = zip(*filtered_data) 71 | 72 | _, ax1 = plt.subplots() 73 | 74 | xnew = np.linspace(min(x), max(x), 50) 75 | # plt.plot(xnew, power_smooth, 'b-', label='iops') 76 | ax1.plot(x, iops, 'b*') 77 | 78 | for degree in (3,): 79 | c = chebfit(x, iops, degree) 80 | vals = chebval(xnew, c) 81 | ax1.plot(xnew, vals, 'g--') 82 | 83 | # ax1.set_xlabel('thread count') 84 | # ax1.set_ylabel('iops') 85 | 86 | # ax2 = ax1.twinx() 87 | # lat = [i / 1000 for i in lat] 88 | # ax2.plot(x, lat, 'r*') 89 | 90 | # tck = splrep(x, lat, s=0.0) 91 | # power_smooth = splev(xnew, tck) 92 | # ax2.plot(xnew, power_smooth, 'r-', label='lat') 93 | 94 | # xp = xnew[0] 95 | # yp = power_smooth[0] 96 | # for _x, _y in zip(xnew[1:], power_smooth[1:]): 97 | # if _y >= 100: 98 | # xres = (_y - 100.) / (_y - yp) * (_x - xp) + xp 99 | # ax2.plot([xres, xres], [min(power_smooth), max(power_smooth)], 'g--') 100 | # break 101 | # xp = _x 102 | # yp = _y 103 | 104 | # ax2.plot([min(x), max(x)], [20, 20], 'g--') 105 | # ax2.plot([min(x), max(x)], [100, 100], 'g--') 106 | 107 | # ax2.set_ylabel("lat ms") 108 | # plt.legend(loc=2) 109 | 110 | 111 | def main(argv): 112 | data = list(load_data(open(argv[1]).read())) 113 | linearity_table(data, ["rwd", "rws", "rrd"], [4096, 4096*1024]) 114 | # linearity_plot(data, ["rwd", "rws", "rrd"])#, [4096, 4096*1024]) 115 | # linearity_plot(data, ["rws", "rwd"]) 116 | # th_plot(data, 'rws') 117 | # th_plot(data, 'rrs') 118 | plt.show() 119 | 120 | 121 | if __name__ == "__main__": 122 | exit(main(sys.argv)) 123 | -------------------------------------------------------------------------------- /scripts/build_fio_ubuntu.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | set -xe 3 | 4 | apt update 5 | apt -y install g++ git zlib1g-dev libaio-dev make bzip2 6 | cd /tmp 7 | git clone https://github.com/axboe/fio.git 8 | cd fio 9 | ./configure 10 | make -j 4 11 | . /etc/lsb-release 12 | chmod a-x fio 13 | bzip2 -z -9 fio 14 | mv fio.bz2 "fio_${DISTRIB_CODENAME}_x86_64.bz2" 15 | -------------------------------------------------------------------------------- /scripts/config.sh: -------------------------------------------------------------------------------- 1 | FLAVOR_NAME="disk_io_perf.1024" 2 | 3 | SERV_GROUPS="disk_io_perf.aa.0 disk_io_perf.aa.1 disk_io_perf.aa.2 disk_io_perf.aa.3 disk_io_perf.aa.4 disk_io_perf.aa.5 disk_io_perf.aa.6 disk_io_perf.aa.7" 4 | 5 | KEYPAIR_NAME="disk_io_perf" 6 | IMAGE_NAME="disk_io_perf" 7 | KEY_FILE_NAME="${KEYPAIR_NAME}.pem" 8 | IMAGE_URL="https://cloud-images.ubuntu.com/trusty/current/trusty-server-cloudimg-amd64-disk1.img" 9 | IMAGE_USER="ubuntu" 10 | NETWORK_ZONE_NAME="net04" 11 | FL_NETWORK_ZONE_NAME="net04_ext" 12 | VM_COUNT="x1" 13 | TESTER_TYPE="iozone" 14 | RUNNER="ssh" 15 | SECGROUP='disk_io_perf' 16 | -------------------------------------------------------------------------------- /scripts/data.py: -------------------------------------------------------------------------------- 1 | import re 2 | import sys 3 | import json 4 | 5 | from disk_perf_test_tool.utils import kb_to_ssize, ssize_to_kb 6 | 7 | splitter_rr = "(?ms)=====+\n" 8 | 9 | test_time_rr = r""" 10 | (?ims)(?P[:0-9]{8}) - DEBUG - io-perf-tool - Passing barrier, starting test 11 | (?P[:0-9]{8}) - DEBUG - io-perf-tool - Done\. Closing connection 12 | """ 13 | 14 | test_time_rr = test_time_rr.strip().replace('\n', '\\s+') 15 | test_time_rr = test_time_rr.strip().replace(' ', '\\s+') 16 | test_time_re = re.compile(test_time_rr) 17 | 18 | 19 | def to_sec(val): 20 | assert val.count(":") == 2 21 | h, m, s = val.split(":") 22 | return int(h) * 3600 + int(m) * 60 + int(s) 23 | 24 | 25 | def to_min_sec(val): 26 | return "{0:2d}:{1:02d}".format(val / 60, val % 60) 27 | 28 | 29 | def get_test_time(block): 30 | time_m = test_time_re.search(block) 31 | if time_m is None: 32 | raise ValueError("Can't found time") 33 | 34 | start_time = to_sec(time_m.group('start_time')) 35 | finish_time = to_sec(time_m.group('finish_time')) 36 | test_time = finish_time - start_time 37 | 38 | if test_time < 0: 39 | # ..... really need print UTC to logs 40 | test_time += 24 * 60 * 60 41 | return test_time 42 | 43 | 44 | run_test_params_rr = r"(?ims)Run\s+test\s+with" + \ 45 | r"\s+'.*?--iosize\s+(?P[^ ]*)" 46 | run_test_params_re = re.compile(run_test_params_rr) 47 | 48 | 49 | def get_orig_size(block): 50 | orig_size = run_test_params_re.search(block) 51 | if orig_size is None: 52 | print block 53 | raise ValueError("Can't find origin size") 54 | return orig_size.group(1) 55 | 56 | 57 | def get_data_from_output(fname): 58 | results = {} 59 | results_meta = {} 60 | fc = open(fname).read() 61 | prev_block = None 62 | 63 | for block in re.split(splitter_rr, fc): 64 | block = block.strip() 65 | 66 | if block.startswith("[{u'__meta__':"): 67 | 68 | for val in eval(block): 69 | meta = val['__meta__'] 70 | 71 | if meta['sync']: 72 | meta['sync'] = 's' 73 | elif meta['direct']: 74 | meta['sync'] = 'd' 75 | else: 76 | meta['sync'] = 'a' 77 | 78 | meta['fsize'] = kb_to_ssize(meta['size'] * meta['concurence']) 79 | key = ("{action} {sync} {blocksize}k " + 80 | "{concurence} {fsize}").format(**meta) 81 | results.setdefault(key, []).append(val['bw']) 82 | 83 | cmeta = results_meta.setdefault(key, {}) 84 | cmeta.setdefault('times', []).append(get_test_time(prev_block)) 85 | cmeta['orig_size'] = get_orig_size(prev_block) 86 | 87 | prev_block = block 88 | 89 | processed_res = {} 90 | 91 | for k, v in results.items(): 92 | v.sort() 93 | med = float(sum(v)) / len(v) 94 | ran = sum(abs(x - med) for x in v) / len(v) 95 | processed_res[k] = (int(med), int(ran)) 96 | t = results_meta[k]['times'] 97 | results_meta[k]['times'] = int(float(sum(t)) / len(t)) 98 | 99 | return processed_res, results_meta 100 | 101 | 102 | def ksort(x): 103 | op, sync, sz, conc, fsize = x.split(" ") 104 | return (op, sync, int(sz[:-1]), int(conc)) 105 | 106 | 107 | def create_json_results(meta, file_data): 108 | row = {"build_id": "", 109 | "type": "", 110 | "iso_md5": ""} 111 | row.update(file_data) 112 | return json.dumps(row) 113 | 114 | 115 | LINES_PER_HEADER = 20 116 | 117 | 118 | def show_data(*pathes): 119 | begin = "| {:>10} {:>6} {:>5} {:>3} {:>5} {:>7} {:>7}" 120 | first_file_templ = " | {:>6} ~ {:>5} {:>2}% {:>5} {:>6}" 121 | other_file_templ = first_file_templ + " ---- {:>6}%" 122 | 123 | line_templ = begin + first_file_templ + \ 124 | other_file_templ * (len(pathes) - 1) + " |" 125 | 126 | header_ln = line_templ.replace("<", "^").replace(">", "^") 127 | 128 | params = ["Oper", "Sync", "BSZ", "CC", "DSIZE", "OSIZE", "XSIZE", 129 | "BW1", "DEV1", "%", "IOPS1", "TIME"] 130 | for pos in range(1, len(pathes)): 131 | params += "BW{0}+DEV{0}+%+IOPS{0}+DIFF %+TTIME".format(pos).split("+") 132 | 133 | header_ln = header_ln.format(*params) 134 | 135 | sep = '-' * len(header_ln) 136 | 137 | results = [] 138 | metas = [] 139 | 140 | for path in pathes: 141 | result, meta = get_data_from_output(path) 142 | results.append(result) 143 | metas.append(meta) 144 | 145 | print sep 146 | print header_ln 147 | print sep 148 | 149 | prev_tp = None 150 | 151 | common_keys = set(results[0].keys()) 152 | for result in results[1:]: 153 | common_keys &= set(result.keys()) 154 | 155 | lcount = 0 156 | for k in sorted(common_keys, key=ksort): 157 | tp = k.rsplit(" ", 3)[0] 158 | op, s, sz, conc, fsize = k.split(" ") 159 | 160 | xsize = int(ssize_to_kb(fsize) / ssize_to_kb(sz)) / int(conc) 161 | 162 | s = {'a': 'async', "s": "sync", "d": "direct"}[s] 163 | 164 | if tp != prev_tp and prev_tp is not None: 165 | print sep 166 | 167 | if lcount > LINES_PER_HEADER: 168 | print header_ln 169 | print sep 170 | lcount = 0 171 | 172 | prev_tp = tp 173 | 174 | m0, d0 = results[0][k] 175 | iops0 = m0 / int(sz[:-1]) 176 | perc0 = int(d0 * 100.0 / m0 + 0.5) 177 | 178 | data = [op, s, sz, conc, fsize, 179 | metas[0][k]['orig_size'], xsize, 180 | m0, d0, perc0, iops0, 181 | to_min_sec(metas[0][k]['times'])] 182 | 183 | for meta, result in zip(metas[1:], results[1:]): 184 | m, d = result[k] 185 | iops = m / int(sz[:-1]) 186 | perc = int(d * 100.0 / m + 0.5) 187 | avg_diff = int(((m - m0) * 100.) / m + 0.5) 188 | 189 | dtime = to_min_sec(meta[k]['times']) 190 | data.extend([m, d, perc, iops, avg_diff, dtime]) 191 | 192 | print line_templ.format(*data) 193 | lcount += 1 194 | 195 | print sep 196 | 197 | 198 | def main(argv): 199 | path1 = argv[0] 200 | if path1 == '--json': 201 | print create_json_results(*get_data_from_output(argv[1])) 202 | else: 203 | show_data(*argv) 204 | return 0 205 | 206 | if __name__ == "__main__": 207 | exit(main(sys.argv[1:])) 208 | # print " ", results[k] 209 | -------------------------------------------------------------------------------- /scripts/data2.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from data_stat import med_dev, round_deviation, groupby_globally 3 | from data_stat import read_data_agent_result 4 | 5 | 6 | def key(x): 7 | return (x['__meta__']['blocksize'], 8 | 'd' if x['__meta__']['direct_io'] else 's', 9 | x['__meta__']['action'], 10 | x['__meta__']['concurence']) 11 | 12 | 13 | template = "{bs:>4} {action:>12} {cache_tp:>3} {conc:>4}" 14 | template += " | {iops[0]:>6} ~ {iops[1]:>5} | {bw[0]:>7} ~ {bw[1]:>6}" 15 | template += " | {lat[0]:>6} ~ {lat[1]:>5} |" 16 | 17 | headers = dict(bs="BS", 18 | action="operation", 19 | cache_tp="S/D", 20 | conc="CONC", 21 | iops=("IOPS", "dev"), 22 | bw=("BW kBps", "dev"), 23 | lat=("LAT ms", "dev")) 24 | 25 | 26 | def main(argv): 27 | data = read_data_agent_result(sys.argv[1]) 28 | grouped = groupby_globally(data, key) 29 | 30 | print template.format(**headers) 31 | 32 | for (bs, cache_tp, act, conc), curr_data in sorted(grouped.items()): 33 | iops = med_dev([i['iops'] * int(conc) for i in curr_data]) 34 | bw = med_dev([i['bw'] * int(conc) for i in curr_data]) 35 | lat = med_dev([i['lat'] / 1000 for i in curr_data]) 36 | 37 | iops = round_deviation(iops) 38 | bw = round_deviation(bw) 39 | lat = round_deviation(lat) 40 | 41 | params = dict( 42 | bs=bs, 43 | action=act, 44 | cache_tp=cache_tp, 45 | iops=iops, 46 | bw=bw, 47 | lat=lat, 48 | conc=conc 49 | ) 50 | 51 | print template.format(**params) 52 | 53 | 54 | if __name__ == "__main__": 55 | exit(main(sys.argv)) 56 | 57 | # vals = [(123, 23), (125678, 5678), (123.546756, 23.77), 58 | # (123.546756, 102.77), (0.1234, 0.0224), 59 | # (0.001234, 0.000224), (0.001234, 0.0000224)] 60 | # for val in : 61 | # print val, "=>", round_deviation(val) 62 | -------------------------------------------------------------------------------- /scripts/data_generator.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import uuid 4 | import random 5 | import itertools 6 | 7 | from petname import Generate as pet_generate 8 | from storage_api import create_storage 9 | 10 | from report import ssize_to_kb 11 | 12 | types = ["GA", "master"] + [pet_generate(2, '-') for _ in range(2)] 13 | random.shuffle(types) 14 | tp = itertools.cycle(types) 15 | 16 | sz = ["1k", "4k", "64k", "256k", "1m"] 17 | op_type = ["randread", "read", "randwrite", "write"] 18 | is_sync = ["s", "a"] 19 | 20 | storage = create_storage(sys.argv[1], "", "") 21 | combinations = list(itertools.product(op_type, is_sync, sz)) 22 | 23 | for i in range(30): 24 | row = {"build_id": pet_generate(2, " "), 25 | "type": next(tp), 26 | "iso_md5": uuid.uuid4().get_hex()} 27 | 28 | for op_type, is_sync, sz in combinations: 29 | ((random.random() - 0.5) * 0.2 + 1) 30 | row[" ".join([op_type, is_sync, sz])] = ( 31 | ((random.random() - 0.5) * 0.2 + 1) * (ssize_to_kb(sz) ** 0.5), 32 | ((random.random() - 0.5) * 0.2 + 1) * (ssize_to_kb(sz) ** 0.5) * 0.15) 33 | 34 | print len(row) 35 | storage.store(row) 36 | -------------------------------------------------------------------------------- /scripts/disk_io_pp.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import collections 3 | 4 | import scipy.stats as stats 5 | import matplotlib.mlab as mlab 6 | import matplotlib.pyplot as plt 7 | 8 | from data_stat import med_dev, round_deviation 9 | from data_stat import read_data_agent_result 10 | 11 | data = read_data_agent_result(sys.argv[1]) 12 | 13 | # for run in data: 14 | # for name, numbers in run['res'].items(): 15 | # # med, dev = round_deviation(med_dev(numbers['iops'])) 16 | # # print name, med, '~', dev 17 | # distr = collections.defaultdict(lambda: 0.0) 18 | # for i in numbers['iops']: 19 | # distr[i] += 1 20 | 21 | # print name 22 | # for key, val in sorted(distr.items()): 23 | # print " ", key, val 24 | # print 25 | 26 | 27 | 28 | # # example data 29 | # mu = 100 # mean of distribution 30 | # sigma = 15 # standard deviation of distribution 31 | # x = mu + sigma * np.random.randn(10000) 32 | 33 | x = data[0]['res'][sys.argv[2]]['iops'] 34 | # mu, sigma = med_dev(x) 35 | # print mu, sigma 36 | 37 | # med_sz = 1 38 | # x2 = x[:len(x) // med_sz * med_sz] 39 | # x2 = [sum(vals) / len(vals) for vals in zip(*[x2[i::med_sz] 40 | # for i in range(med_sz)])] 41 | 42 | mu, sigma = med_dev(x) 43 | print mu, sigma 44 | print stats.normaltest(x) 45 | 46 | num_bins = 20 47 | # the histogram of the data 48 | n, bins, patches = plt.hist(x, num_bins, normed=1, facecolor='green', alpha=0.5) 49 | # add a 'best fit' line 50 | 51 | y = mlab.normpdf(bins, mu, sigma) 52 | plt.plot(bins, y, 'r--') 53 | 54 | plt.xlabel('Smarts') 55 | plt.ylabel('Probability') 56 | plt.title(r'Histogram of IQ: $\mu={}$, $\sigma={}$'.format(int(mu), int(sigma))) 57 | 58 | # Tweak spacing to prevent clipping of ylabel 59 | plt.subplots_adjust(left=0.15) 60 | plt.show() 61 | -------------------------------------------------------------------------------- /scripts/fio_tests_configs/1.cfg: -------------------------------------------------------------------------------- 1 | [writetest_10 * 55] 2 | startdelay=10 3 | numjobs=1 4 | blocksize=4k 5 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 6 | rw=randwrite 7 | direct=1 8 | buffered=0 9 | iodepth=1 10 | size=1Gb 11 | runtime=10 12 | time_based 13 | wait_for_previous 14 | 15 | [writetest_20 * 55] 16 | startdelay=10 17 | numjobs=1 18 | blocksize=4k 19 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 20 | rw=randwrite 21 | direct=1 22 | buffered=0 23 | iodepth=1 24 | size=1Gb 25 | runtime=20 26 | time_based 27 | wait_for_previous 28 | 29 | [writetest_30 * 55] 30 | startdelay=10 31 | numjobs=1 32 | blocksize=4k 33 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 34 | rw=randwrite 35 | direct=1 36 | buffered=0 37 | iodepth=1 38 | size=1Gb 39 | runtime=30 40 | time_based 41 | wait_for_previous 42 | 43 | [writetest_120 * 55] 44 | startdelay=10 45 | numjobs=1 46 | blocksize=4k 47 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 48 | rw=randwrite 49 | direct=1 50 | buffered=0 51 | iodepth=1 52 | size=1Gb 53 | runtime=120 54 | time_based 55 | wait_for_previous 56 | 57 | [writetest_30_5 * 55] 58 | ramp_time=5 59 | startdelay=10 60 | numjobs=1 61 | blocksize=4k 62 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 63 | rw=randwrite 64 | direct=1 65 | buffered=0 66 | iodepth=1 67 | size=1Gb 68 | runtime=30 69 | time_based 70 | wait_for_previous 71 | 72 | [writetest_30_10 * 55] 73 | ramp_time=10 74 | startdelay=10 75 | numjobs=1 76 | blocksize=4k 77 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 78 | rw=randwrite 79 | direct=1 80 | buffered=0 81 | iodepth=1 82 | size=1Gb 83 | runtime=30 84 | time_based 85 | wait_for_previous 86 | 87 | [writetest_30_15 * 55] 88 | ramp_time=15 89 | startdelay=10 90 | numjobs=1 91 | blocksize=4k 92 | filename=/media/koder/a5230078-4c27-4c3b-99aa-26148e78b2e7/xxx.bin 93 | rw=randwrite 94 | direct=1 95 | buffered=0 96 | iodepth=1 97 | size=1Gb 98 | runtime=30 99 | time_based 100 | wait_for_previous 101 | -------------------------------------------------------------------------------- /scripts/fio_tests_configs/2.cfg: -------------------------------------------------------------------------------- 1 | [writetest_10_20 * 3] 2 | ramp_time=5 3 | numjobs=1 4 | blocksize=4k 5 | filename={FILENAME} 6 | rw=randwrite 7 | direct=1 8 | buffered=0 9 | iodepth=1 10 | size=1Gb 11 | runtime=5 12 | time_based 13 | wait_for_previous 14 | -------------------------------------------------------------------------------- /scripts/fio_tests_configs/io_task.cfg: -------------------------------------------------------------------------------- 1 | [hdd_test] 2 | blocksize=4k 3 | rw=randread 4 | group_reporting=1 5 | buffered=0 6 | iodepth=1 7 | direct=1 8 | filename=/tmp/t/x 9 | size=1m 10 | numjobs=4 11 | ioengine=null 12 | randrepeat=0 13 | # io_size=4g 14 | 15 | -------------------------------------------------------------------------------- /scripts/fio_tests_configs/io_task_test.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | group_reporting 3 | wait_for_previous 4 | ramp_time=0 5 | filename={FILENAME} 6 | buffered=0 7 | iodepth=1 8 | size=1000m 9 | time_based 10 | RUNTIME=10 11 | 12 | [writetest] 13 | blocksize=4k 14 | rw=randwrite 15 | direct=1 16 | runtime={RUNTIME} 17 | numjobs=1 18 | 19 | [readtest] 20 | numjobs=4 21 | blocksize=4k 22 | rw=randread 23 | direct=1 24 | runtime={RUNTIME} 25 | -------------------------------------------------------------------------------- /scripts/gen_load.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | TESTER="--tester-type fio" 4 | CACHE="--cache-modes d" 5 | REPEATS="--repeats 3" 6 | 7 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x1000 8 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x2000 9 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x4000 10 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x8000 11 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x16000 12 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x32000 13 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x64000 14 | # python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 --direct-default-size x128000 15 | 16 | python generate_load.py $TESTER --size 4k --opers randwrite $CACHE --concurrences 1 4 8 $REPEATS --io-size 10G 17 | python generate_load.py $TESTER --size 4k --opers randread $CACHE --concurrences 1 4 8 $REPEATS --io-size 10G 18 | 19 | python generate_load.py $TESTER --size 4k --opers randwrite --cache-modes s --concurrences 1 $REPEATS --io-size 10G 20 | python generate_load.py $TESTER --size 4k --opers randread randwrite $CACHE --concurrences 1 $REPEATS --io-size 10G 21 | python generate_load.py $TESTER --size 2m --opers read write $CACHE --concurrences 1 $REPEATS --io-size 10G 22 | -------------------------------------------------------------------------------- /scripts/generate_load.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import argparse 3 | 4 | from disk_perf_test_tool.utils import ssize2b 5 | 6 | 7 | def make_list(x): 8 | if not isinstance(x, (list, tuple)): 9 | return [x] 10 | return x 11 | 12 | 13 | def make_load(settings): 14 | 15 | iodepth = 1 16 | for conc in make_list(settings.concurrences): 17 | for bsize in make_list(settings.sizes): 18 | for oper in make_list(settings.opers): 19 | for cache_mode in make_list(settings.cache_modes): 20 | 21 | # filter out too slow options 22 | if bsize in "1k 4k" and cache_mode == "a": 23 | continue 24 | 25 | # filter out sync reads 26 | if oper in "read randread" and cache_mode == "s": 27 | continue 28 | 29 | if settings.io_size is not None: 30 | size_sync_opts = " --iosize " + str(settings.io_size) 31 | if cache_mode == "s": 32 | size_sync_opts += " -s" 33 | elif cache_mode == "d": 34 | size_sync_opts += " -d" 35 | else: 36 | if cache_mode == "s": 37 | size_sync_opts = "--iosize {0} -s".format( 38 | settings.sync_default_size) 39 | elif cache_mode == "d": 40 | if oper == 'randread': 41 | assert settings.sync_default_size[0] == 'x' 42 | max_f = int(settings.sync_default_size[1:]) 43 | else: 44 | max_f = None 45 | 46 | mmax_f = ssize2b(settings.hdd_size) / \ 47 | (int(conc) * ssize2b(bsize)) 48 | 49 | if max_f is None or mmax_f > max_f: 50 | max_f = mmax_f 51 | 52 | assert settings.direct_default_size[0] == 'x' 53 | if max_f > int(settings.direct_default_size[1:]): 54 | max_f = settings.direct_default_size 55 | else: 56 | max_f = "x{0}".format(max_f) 57 | 58 | size_sync_opts = "--iosize {0} -d".format(max_f) 59 | 60 | else: 61 | if oper == 'randread' or oper == 'read': 62 | size_sync_opts = "--iosize " + \ 63 | str(settings.sync_default_size) 64 | else: 65 | size_sync_opts = "--iosize " + \ 66 | str(settings.sync_default_size) 67 | 68 | # size_sync_opts = get_file_size_opts(sync_type) 69 | 70 | io_opts = "--type {0} ".format(settings.tester_type) 71 | io_opts += "-a {0} ".format(oper) 72 | io_opts += "--iodepth {0} ".format(iodepth) 73 | io_opts += "--blocksize {0} ".format(bsize) 74 | io_opts += size_sync_opts + " " 75 | io_opts += "--concurrency {0}".format(conc) 76 | 77 | for i in range(settings.repeats): 78 | yield io_opts 79 | 80 | 81 | def parse_opts(args): 82 | parser = argparse.ArgumentParser() 83 | parser.add_argument('--sizes', nargs="+", required=True) 84 | parser.add_argument('--opers', nargs="+", required=True) 85 | parser.add_argument('--cache-modes', nargs="+", required=True) 86 | parser.add_argument('--concurrences', nargs="+", required=True) 87 | parser.add_argument('--repeats', type=int, default=3) 88 | parser.add_argument("--hdd-size", default="45G") 89 | parser.add_argument("--tester-type", default="iozone") 90 | parser.add_argument("--io-size", default=None) 91 | 92 | parser.add_argument("--direct-default-size", default="x1000") 93 | parser.add_argument("--sync-default-size", default="x1000") 94 | parser.add_argument("--async-default-size", default="r2") 95 | 96 | return parser.parse_args(args[1:]) 97 | 98 | 99 | def main(args): 100 | opts = parse_opts(args) 101 | for io_opts in make_load(opts): 102 | print "python io.py --test-file /opt/xxx.bin " + io_opts 103 | 104 | if __name__ == "__main__": 105 | exit(main(sys.argv)) 106 | -------------------------------------------------------------------------------- /scripts/grafana.py: -------------------------------------------------------------------------------- 1 | import json 2 | 3 | 4 | query = """ 5 | select value from "{series}" 6 | where $timeFilter and 7 | host='{host}' and device='{device}' 8 | order asc 9 | """ 10 | 11 | 12 | def make_dashboard_file(config): 13 | series = ['writes_completed', 'sectors_written'] 14 | dashboards = [] 15 | 16 | for serie in series: 17 | dashboard = dict(title=serie, type='graph', 18 | span=12, fill=1, linewidth=2, 19 | tooltip={'shared': True}) 20 | 21 | targets = [] 22 | 23 | for ip, devs in config.items(): 24 | for device in devs: 25 | params = { 26 | 'series': serie, 27 | 'host': ip, 28 | 'device': device 29 | } 30 | 31 | target = dict( 32 | target="disk io", 33 | query=query.replace("\n", " ").format(**params).strip(), 34 | interval="", 35 | alias="{0} io {1}".format(ip, device), 36 | rawQuery=True 37 | ) 38 | targets.append(target) 39 | 40 | dashboard['targets'] = targets 41 | dashboards.append(dashboard) 42 | 43 | fc = open("grafana_template.js").read() 44 | return fc % (json.dumps(dashboards),) 45 | 46 | 47 | print make_dashboard_file({'192.168.0.104': ['sda1', 'rbd1']}) 48 | -------------------------------------------------------------------------------- /scripts/grafana_template.js: -------------------------------------------------------------------------------- 1 | /* global _ */ 2 | 3 | /* 4 | * Complex scripted dashboard 5 | * This script generates a dashboard object that Grafana can load. It also takes a number of user 6 | * supplied URL parameters (int ARGS variable) 7 | * 8 | * Return a dashboard object, or a function 9 | * 10 | * For async scripts, return a function, this function must take a single callback function as argument, 11 | * call this callback function with the dashboard object (look at scripted_async.js for an example) 12 | */ 13 | 14 | 15 | 16 | // accessable variables in this scope 17 | var window, document, ARGS, $, jQuery, moment, kbn; 18 | 19 | // Setup some variables 20 | var dashboard; 21 | 22 | // All url parameters are available via the ARGS object 23 | var ARGS; 24 | 25 | // Intialize a skeleton with nothing but a rows array and service object 26 | dashboard = {rows : []}; 27 | 28 | // Set a title 29 | dashboard.title = 'Tests dash'; 30 | 31 | // Set default time 32 | // time can be overriden in the url using from/to parameteres, but this is 33 | // handled automatically in grafana core during dashboard initialization 34 | dashboard.time = { 35 | from: "now-5m", 36 | to: "now" 37 | }; 38 | 39 | dashboard.rows.push({ 40 | title: 'Chart', 41 | height: '300px', 42 | panels: %s 43 | }); 44 | 45 | 46 | return dashboard; 47 | -------------------------------------------------------------------------------- /scripts/hdd.fio: -------------------------------------------------------------------------------- 1 | [test] 2 | wait_for_previous=1 3 | group_reporting=1 4 | time_based=1 5 | buffered=0 6 | iodepth=1 7 | softrandommap=1 8 | filename=/media/data/xxx.bin 9 | # filename=/tmp/xxx.bin 10 | randrepeat=0 11 | size=10G 12 | ramp_time=5 13 | runtime=15 14 | blocksize=4k 15 | rw=randwrite 16 | sync=1 17 | direct=1 18 | thread=1 19 | numjobs=50 20 | 21 | -------------------------------------------------------------------------------- /scripts/influx_exporter.py: -------------------------------------------------------------------------------- 1 | from urlparse import urlparse 2 | from influxdb import InfluxDBClient 3 | 4 | 5 | def connect(url): 6 | parsed_url = urlparse(url) 7 | user_passwd, host_port = parsed_url.netloc.rsplit("@", 1) 8 | user, passwd = user_passwd.split(":", 1) 9 | host, port = host_port.split(":") 10 | return InfluxDBClient(host, int(port), user, passwd, parsed_url.path[1:]) 11 | 12 | 13 | def add_data(conn, hostname, data): 14 | per_sensor_data = {} 15 | for serie in data: 16 | serie = serie.copy() 17 | gtime = serie.pop('time') 18 | for key, val in serie.items(): 19 | dev, sensor = key.split('.') 20 | data = per_sensor_data.setdefault(sensor, []) 21 | data.append([gtime, hostname, dev, val]) 22 | 23 | infl_data = [] 24 | columns = ['time', 'host', 'device', 'value'] 25 | for sensor_name, points in per_sensor_data.items(): 26 | infl_data.append( 27 | {'columns': columns, 28 | 'name': sensor_name, 29 | 'points': points}) 30 | 31 | conn.write_points(infl_data) 32 | -------------------------------------------------------------------------------- /scripts/install.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | FULL="$1" 4 | 5 | pushd $(dirname "$0") > /dev/null 6 | SCRIPTPATH=$(pwd -P) 7 | popd > /dev/null 8 | 9 | function install_apt() { 10 | apt-get install -y python-openssl python-pip 11 | } 12 | 13 | function install_yum() { 14 | yum -y install pyOpenSSL python-pip python-ecdsa 15 | } 16 | 17 | if which apt-get >/dev/null; then 18 | install_apt 19 | else 20 | if which yum >/dev/null; then 21 | install_yum 22 | else 23 | echo "Error: Neither apt-get, not yum installed. Can't install binary dependencies." 24 | exit 1 25 | fi 26 | fi 27 | 28 | pip install -r "$SCRIPTPATH/../requirements.txt" 29 | 30 | if [ "$FULL" == "--full" ] ; then 31 | pip install -r "$SCRIPTPATH/../requirements_extra.txt" 32 | fi 33 | -------------------------------------------------------------------------------- /scripts/koder.js: -------------------------------------------------------------------------------- 1 | /* global _ */ 2 | 3 | /* 4 | * Complex scripted dashboard 5 | * This script generates a dashboard object that Grafana can load. It also takes a number of user 6 | * supplied URL parameters (int ARGS variable) 7 | * 8 | * Return a dashboard object, or a function 9 | * 10 | * For async scripts, return a function, this function must take a single callback function as argument, 11 | * call this callback function with the dashboard object (look at scripted_async.js for an example) 12 | */ 13 | 14 | 15 | 16 | // accessable variables in this scope 17 | var window, document, ARGS, $, jQuery, moment, kbn; 18 | 19 | // Setup some variables 20 | var dashboard; 21 | 22 | // All url parameters are available via the ARGS object 23 | var ARGS; 24 | 25 | // Intialize a skeleton with nothing but a rows array and service object 26 | dashboard = {rows : []}; 27 | 28 | // Set a title 29 | dashboard.title = 'Tests dash'; 30 | 31 | // Set default time 32 | // time can be overriden in the url using from/to parameteres, but this is 33 | // handled automatically in grafana core during dashboard initialization 34 | dashboard.time = { 35 | from: "now-5m", 36 | to: "now" 37 | }; 38 | 39 | dashboard.rows.push({ 40 | title: 'Chart', 41 | height: '300px', 42 | panels: [{"span": 12, "title": "writes_completed", "linewidth": 2, "type": "graph", "targets": [{"alias": "192.168.0.104 io sda1", "interval": "", "target": "disk io", "rawQuery": true, "query": "select value from \"writes_completed\" where $timeFilter and host='192.168.0.104' and device='sda1' order asc"}, {"alias": "192.168.0.104 io rbd1", "interval": "", "target": "disk io", "rawQuery": true, "query": "select value from \"writes_completed\" where $timeFilter and host='192.168.0.104' and device='rbd1' order asc"}], "tooltip": {"shared": true}, "fill": 1}, {"span": 12, "title": "sectors_written", "linewidth": 2, "type": "graph", "targets": [{"alias": "192.168.0.104 io sda1", "interval": "", "target": "disk io", "rawQuery": true, "query": "select value from \"sectors_written\" where $timeFilter and host='192.168.0.104' and device='sda1' order asc"}, {"alias": "192.168.0.104 io rbd1", "interval": "", "target": "disk io", "rawQuery": true, "query": "select value from \"sectors_written\" where $timeFilter and host='192.168.0.104' and device='rbd1' order asc"}], "tooltip": {"shared": true}, "fill": 1}] 43 | }); 44 | 45 | 46 | return dashboard; 47 | 48 | -------------------------------------------------------------------------------- /scripts/perf.py: -------------------------------------------------------------------------------- 1 | from wally import main 2 | opts = "X -l DEBUG report /tmp/perf_tests/warm_doe".split() 3 | 4 | def x(): 5 | main.main(opts) 6 | 7 | x() 8 | -------------------------------------------------------------------------------- /scripts/postprocessing/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/scripts/postprocessing/__init__.py -------------------------------------------------------------------------------- /scripts/postprocessing/io_py_result_processor.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import math 3 | import itertools 4 | 5 | from colorama import Fore, Style 6 | 7 | 8 | def med_dev(vals): 9 | med = sum(vals) / len(vals) 10 | dev = ((sum(abs(med - i) ** 2 for i in vals) / len(vals)) ** 0.5) 11 | return int(med), int(dev) 12 | 13 | 14 | def round_deviation(med_dev): 15 | med, dev = med_dev 16 | 17 | if dev < 1E-7: 18 | return med_dev 19 | 20 | dev_div = 10.0 ** (math.floor(math.log10(dev)) - 1) 21 | dev = int(dev / dev_div) * dev_div 22 | med = int(med / dev_div) * dev_div 23 | return (type(med_dev[0])(med), 24 | type(med_dev[1])(dev)) 25 | 26 | 27 | def groupby_globally(data, key_func): 28 | grouped = {} 29 | grouped_iter = itertools.groupby(data, key_func) 30 | 31 | for (bs, cache_tp, act), curr_data_it in grouped_iter: 32 | key = (bs, cache_tp, act) 33 | grouped.setdefault(key, []).extend(curr_data_it) 34 | 35 | return grouped 36 | 37 | 38 | class Data(object): 39 | def __init__(self, name): 40 | self.name = name 41 | self.series = {} 42 | self.processed_series = {} 43 | 44 | 45 | def process_inplace(data): 46 | processed = {} 47 | for key, values in data.series.items(): 48 | processed[key] = round_deviation(med_dev(values)) 49 | data.processed_series = processed 50 | 51 | 52 | def diff_table(*datas): 53 | res_table = {} 54 | 55 | for key in datas[0].processed_series: 56 | baseline = datas[0].processed_series[key] 57 | base_max = baseline[0] + baseline[1] 58 | base_min = baseline[0] - baseline[1] 59 | 60 | res_line = [baseline] 61 | 62 | for data in datas[1:]: 63 | val, dev = data.processed_series[key] 64 | val_min = val - dev 65 | val_max = val + dev 66 | 67 | diff_1 = int(float(val_min - base_max) / base_max * 100) 68 | diff_2 = int(float(val_max - base_min) / base_max * 100) 69 | 70 | diff_max = max(diff_1, diff_2) 71 | diff_min = min(diff_1, diff_2) 72 | 73 | res_line.append((diff_max, diff_min)) 74 | res_table[key] = res_line 75 | 76 | return [data.name for data in datas], res_table 77 | 78 | 79 | def print_table(headers, table): 80 | lines = [] 81 | items = sorted(table.items()) 82 | lines.append([(len(i), i) for i in [""] + headers]) 83 | item_frmt = "{0}{1:>4}{2} ~ {3}{4:>4}{5}" 84 | 85 | for key, vals in items: 86 | ln1 = "{0:>4} {1} {2:>9} {3}".format(*map(str, key)) 87 | ln2 = "{0:>4} ~ {1:>3}".format(*vals[0]) 88 | 89 | line = [(len(ln1), ln1), (len(ln2), ln2)] 90 | 91 | for idx, val in enumerate(vals[1:], 2): 92 | cval = [] 93 | for vl in val: 94 | if vl < -10: 95 | cval.extend([Fore.RED, vl, Style.RESET_ALL]) 96 | elif vl > 10: 97 | cval.extend([Fore.GREEN, vl, Style.RESET_ALL]) 98 | else: 99 | cval.extend(["", vl, ""]) 100 | 101 | ln = len(item_frmt.format("", cval[1], "", "", cval[4], "")) 102 | line.append((ln, item_frmt.format(*cval))) 103 | 104 | lines.append(line) 105 | 106 | max_columns_with = [] 107 | for idx in range(len(lines[0])): 108 | max_columns_with.append( 109 | max(line[idx][0] for line in lines)) 110 | 111 | sep = '-' * (4 + sum(max_columns_with) + 3 * (len(lines[0]) - 1)) 112 | 113 | print sep 114 | for idx, line in enumerate(lines): 115 | cline = [] 116 | for (curr_len, txt), exp_ln in zip(line, max_columns_with): 117 | cline.append(" " * (exp_ln - curr_len) + txt) 118 | print "| " + " | ".join(cline) + " |" 119 | if 0 == idx: 120 | print sep 121 | print sep 122 | 123 | 124 | def key_func(x): 125 | return (x['__meta__']['blocksize'], 126 | 'd' if 'direct' in x['__meta__'] else 's', 127 | x['__meta__']['name']) 128 | 129 | 130 | template = "{bs:>4} {action:>12} {cache_tp:>3} {conc:>4}" 131 | template += " | {iops[0]:>6} ~ {iops[1]:>5} | {bw[0]:>7} ~ {bw[1]:>6}" 132 | template += " | {lat[0]:>6} ~ {lat[1]:>5} |" 133 | 134 | headers = dict(bs="BS", 135 | action="operation", 136 | cache_tp="S/D", 137 | conc="CONC", 138 | iops=("IOPS", "dev"), 139 | bw=("BW kBps", "dev"), 140 | lat=("LAT ms", "dev")) 141 | 142 | 143 | def load_io_py_file(fname): 144 | with open(fname) as fc: 145 | block = None 146 | for line in fc: 147 | if line.startswith("{"): 148 | block = line 149 | elif block is not None: 150 | block += line 151 | 152 | if block is not None and block.count('}') == block.count('{'): 153 | cut = block.rfind('}') 154 | block = block[0:cut+1] 155 | yield eval(block) 156 | block = None 157 | 158 | if block is not None and block.count('}') == block.count('{'): 159 | yield eval(block) 160 | 161 | 162 | def main(argv): 163 | items = [] 164 | CONC_POS = 3 165 | for hdr_fname in argv[1:]: 166 | hdr, fname = hdr_fname.split("=", 1) 167 | data = list(load_io_py_file(fname)) 168 | item = Data(hdr) 169 | for key, vals in groupby_globally(data, key_func).items(): 170 | item.series[key] = [val['iops'] * key[CONC_POS] for val in vals] 171 | process_inplace(item) 172 | items.append(item) 173 | 174 | print_table(*diff_table(*items)) 175 | 176 | # print template.format(**headers) 177 | 178 | # for (bs, cache_tp, act, conc), curr_data in sorted(grouped.items()): 179 | # iops = med_dev([i['iops'] * int(conc) for i in curr_data]) 180 | # bw = med_dev([i['bw'] * int(conc) for i in curr_data]) 181 | # lat = med_dev([i['lat'] / 1000 for i in curr_data]) 182 | 183 | # iops = round_deviation(iops) 184 | # bw = round_deviation(bw) 185 | # lat = round_deviation(lat) 186 | 187 | # params = dict( 188 | # bs=bs, 189 | # action=act, 190 | # cache_tp=cache_tp, 191 | # iops=iops, 192 | # bw=bw, 193 | # lat=lat, 194 | # conc=conc 195 | # ) 196 | 197 | # print template.format(**params) 198 | 199 | 200 | if __name__ == "__main__": 201 | exit(main(sys.argv)) 202 | 203 | # vals = [(123, 23), (125678, 5678), (123.546756, 23.77), 204 | # (123.546756, 102.77), (0.1234, 0.0224), 205 | # (0.001234, 0.000224), (0.001234, 0.0000224)] 206 | # for val in : 207 | # print val, "=>", round_deviation(val) 208 | -------------------------------------------------------------------------------- /scripts/postprocessing/stat.py: -------------------------------------------------------------------------------- 1 | import sys 2 | import time 3 | 4 | from copy import deepcopy 5 | 6 | import numpy 7 | import scipy.optimize as scp 8 | import matplotlib.pyplot as plt 9 | 10 | import io_py_result_processor as io_test 11 | 12 | key_pos = {'blocksize': 0, 'direct': 1, 'name': 2} 13 | actions = ['randwrite', 'randread', 'read', 'write'] 14 | types = ['s', 'd'] 15 | colors = ['red', 'green', 'blue', 'cyan', 16 | 'magenta', 'black', 'yellow', 'burlywood'] 17 | 18 | 19 | def get_key(x, no): 20 | """ x = (), no = key_pos key """ 21 | keys = deepcopy(key_pos) 22 | del keys[no] 23 | key = [x[n] for n in keys.values()] 24 | return tuple(key), x[key_pos[no]] 25 | 26 | 27 | def generate_groups(data, group_id): 28 | """ select data for plot by group_id 29 | data - processed_series""" 30 | grouped = {} 31 | 32 | for key, val in data.items(): 33 | new_key, group_val = get_key(key, group_id) 34 | group = grouped.setdefault(new_key, {}) 35 | group[group_val] = val 36 | 37 | return grouped 38 | 39 | 40 | def gen_dots(val): 41 | """Generate dots from real data 42 | val = dict (x:y) 43 | return ox, oy lists """ 44 | oy = [] 45 | ox = [] 46 | for x in sorted(val.keys()): 47 | ox.append(int(x[:-1])) 48 | if val[x][0] != 0: 49 | oy.append(1.0/val[x][0]) 50 | else: 51 | oy.append(0) 52 | return ox, oy 53 | 54 | 55 | def gen_line_numpy(x, y): 56 | A = numpy.vstack([x, numpy.ones(len(x))]).T 57 | coef = numpy.linalg.lstsq(A, y)[0] 58 | funcLine = lambda tpl, x: tpl[0] * x + tpl[1] 59 | print coef 60 | return x, funcLine(coef, x) 61 | 62 | 63 | def gen_line_scipy(x, y): 64 | funcLine = lambda tpl, x: tpl[0] * x + tpl[1] 65 | ErrorFunc = lambda tpl, x, y: 1.0 - y/funcLine(tpl, x) 66 | tplInitial = (1.0, 0.0) 67 | # print x, y 68 | tplFinal, success = scp.leastsq(ErrorFunc, tplInitial[:], args=(x, y), 69 | diag=(1./x.mean(), 1./y.mean())) 70 | if success not in range(1, 4): 71 | raise ValueError("No line for this dots") 72 | xx = numpy.linspace(x.min(), x.max(), 50) 73 | print tplFinal 74 | # print x, ErrorFunc(tplFinal, x, y) 75 | return xx, funcLine(tplFinal, xx) 76 | 77 | 78 | def gen_app_plot(key, val, plot, color): 79 | """ Plots with fake line and real dots around""" 80 | ox, oy = gen_dots(val) 81 | name = "_".join(str(k) for k in key) 82 | if len(ox) < 2: 83 | # skip single dots 84 | return False 85 | # create approximation 86 | x = numpy.array(ox)#numpy.log(ox)) 87 | y = numpy.array(oy)#numpy.log(oy)) 88 | print x, y 89 | try: 90 | print name 91 | x1, y1 = gen_line_scipy(x, y) 92 | plot.plot(x1, y1, color=color) 93 | # 94 | #plot.loglog(x1, y1, color=color) 95 | except ValueError: 96 | # just don't draw it - it's ok 97 | # we'll see no appr and bad dots 98 | # not return False, because we need see dots 99 | pass 100 | plot.plot(x, y, '^', label=name, markersize=7, color=color) 101 | #plot.loglog(x, y, '^', label=name, markersize=7, color=color) 102 | return True 103 | 104 | 105 | def save_plot(key, val): 106 | """ one plot from one dict item with value list""" 107 | ox, oy = gen_dots(val) 108 | name = "_".join(str(k) for k in key) 109 | plt.plot(ox, oy, label=name) 110 | 111 | 112 | def plot_generation(fname, group_by): 113 | """ plots for value group_by in imgs by actions""" 114 | data = list(io_test.load_io_py_file(fname)) 115 | item = io_test.Data("hdr") 116 | for key, vals in io_test.groupby_globally(data, io_test.key_func).items(): 117 | item.series[key] = [val['iops'] for val in vals] 118 | io_test.process_inplace(item) 119 | 120 | pr_data = generate_groups(item.processed_series, group_by) 121 | print pr_data 122 | 123 | #fig = plt.figure() 124 | plot = plt.subplot(111) 125 | 126 | for action in actions: 127 | for tp in types: 128 | color = 0 129 | hasPlot = False 130 | for key, val in pr_data.items(): 131 | if action in key and tp in key: 132 | ok = gen_app_plot(key, val, plot, colors[color]) 133 | hasPlot = hasPlot or ok 134 | color += 1 135 | # use it for just connect dots 136 | #save_plot(key, val) 137 | if hasPlot: 138 | # Shrink current axis by 10% 139 | box = plot.get_position() 140 | plot.set_position([box.x0, box.y0 + box.height * 0.1, 141 | box.width, box.height * 0.9]) 142 | 143 | # Put a legend to the bottom 144 | plot.legend(loc='lower center', bbox_to_anchor=(0.5, -0.25), 145 | fancybox=True, shadow=True, ncol=4, 146 | fontsize='xx-small') 147 | plt.title("Plot for %s on %s" % (group_by, action)) 148 | plt.ylabel("time") 149 | plt.xlabel(group_by) 150 | plt.grid() 151 | # use it if want scale plot somehow 152 | # plt.axis([0.0, 5000.0, 0.0, 64.0]) 153 | name = "%s__%s_%s.png" % (group_by, action, tp) 154 | plt.savefig(name, format='png', dpi=100) 155 | plt.clf() 156 | plot = plt.subplot(111) 157 | color = 0 158 | 159 | 160 | def deviation_on_deviation(groups_list, data): 161 | """ calc deviation of data all and by selection groups""" 162 | total_dev = io_test.round_deviation(io_test.med_dev(data)) 163 | grouped_dev = [total_dev] 164 | for group in groups_list: 165 | beg = 0 166 | end = group 167 | local_dev = [] 168 | while end <= len(data): 169 | local_dev.append(io_test.round_deviation(io_test.med_dev(data[beg:end]))[0]) 170 | beg += group 171 | end += group 172 | grouped_dev.append(io_test.round_deviation(io_test.med_dev(local_dev))) 173 | return grouped_dev 174 | 175 | 176 | 177 | def deviation_generation(fname, groups_list): 178 | """ Print deviation by groups for data from fname """ 179 | CONC_POS = key_pos['concurence'] 180 | int_list = [int(i) for i in groups_list] 181 | data = list(io_test.load_io_py_file(fname)) 182 | item = io_test.Data("hdr") 183 | for key, vals in io_test.groupby_globally(data, io_test.key_func).items(): 184 | item.series[key] = [val['iops'] * key[CONC_POS] for val in vals] 185 | print deviation_on_deviation(int_list, item.series[key]) 186 | 187 | 188 | def main(argv): 189 | if argv[1] == "plot": 190 | plot_generation(argv[2], argv[3]) 191 | elif argv[1] == "dev": 192 | deviation_generation(argv[2], argv[3:]) 193 | 194 | 195 | if __name__ == "__main__": 196 | exit(main(sys.argv)) 197 | 198 | 199 | 200 | 201 | -------------------------------------------------------------------------------- /scripts/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | function lookup_for_objects() { 5 | set +e 6 | 7 | echo -n "Looking for image $IMAGE_NAME ... " 8 | export img_id=$(nova $INSECURE image-list | grep " $IMAGE_NAME " | awk '{print $2}') 9 | if [ ! -z "$img_id" ] ; then 10 | echo " Found" 11 | else 12 | echo " Not Found" 13 | fi 14 | 15 | echo -n "Looking for flavor $FLAVOR_NAME ... " 16 | export flavor_id=$(nova $INSECURE flavor-list | grep " $FLAVOR_NAME " | awk '{print $2}') 17 | if [ ! -z "$flavor_id" ] ; then 18 | echo " Found" 19 | else 20 | echo " Not Found" 21 | fi 22 | 23 | groups_ids="" 24 | export missed_groups="" 25 | for SERV_GROUP in $SERV_GROUPS ; do 26 | echo -n "Looking for server-group $SERV_GROUP ... " 27 | group_id=$(nova $INSECURE server-group-list | grep " $SERV_GROUP " | awk '{print $2}' ) 28 | if [ ! -z "$group_id" ] ; then 29 | echo " Found" 30 | export groups_ids="$groups_ids $group_id" 31 | else 32 | echo " Not Found" 33 | export missed_groups="$missed_groups $SERV_GROUP" 34 | fi 35 | done 36 | 37 | if [ ! -z "$KEYPAIR_NAME" ] ; then 38 | echo -n "Looking for keypair $KEYPAIR_NAME ... " 39 | export keypair_id=$(nova $INSECURE keypair-list | grep " $KEYPAIR_NAME " | awk '{print $2}' ) 40 | if [ ! -z "$keypair_id" ] ; then 41 | echo " Found" 42 | else 43 | echo " Not Found" 44 | fi 45 | fi 46 | 47 | echo -n "Looking for security group $SECGROUP ... " 48 | export secgroup_id=$(nova $INSECURE secgroup-list | grep " $SECGROUP " | awk '{print $2}' ) 49 | if [ ! -z "$secgroup_id" ] ; then 50 | echo " Found" 51 | else 52 | echo " Not Found" 53 | fi 54 | 55 | set -e 56 | } 57 | 58 | function clean() { 59 | lookup_for_objects 60 | 61 | if [ ! -z "$img_id" ] ; then 62 | echo "Deleting $IMAGE_NAME image" 63 | nova $INSECURE image-delete "$img_id" >/dev/null 64 | fi 65 | 66 | if [ ! -z "$flavor_id" ] ; then 67 | echo "Deleting $FLAVOR_NAME flavor" 68 | nova $INSECURE flavor-delete "$flavor_id" >/dev/null 69 | fi 70 | 71 | for group_id in $groups_ids ; do 72 | echo "Deleting server-group $SERV_GROUP" 73 | nova $INSECURE server-group-delete "$group_id" >/dev/null 74 | done 75 | 76 | if [ ! -z "$keypair_id" ] ; then 77 | echo "deleting keypair $KEYPAIR_NAME" 78 | nova $INSECURE keypair-delete "$KEYPAIR_NAME" >/dev/null 79 | fi 80 | 81 | if [ -f "$KEY_FILE_NAME" ] ; then 82 | echo "deleting keypair file $KEY_FILE_NAME" 83 | rm -f "$KEY_FILE_NAME" 84 | fi 85 | 86 | if [ ! -z "$secgroup_id" ] ; then 87 | nova $INSECURE secgroup-delete $SECGROUP >/dev/null 88 | fi 89 | } 90 | 91 | function prepare() { 92 | if [ "$OS_INSECURE" -eq "1" ] ; then 93 | export INSECURE="--insecure" 94 | fi 95 | 96 | lookup_for_objects 97 | 98 | if [ -z "$img_id" ] ; then 99 | echo "Creating $IMAGE_NAME image" 100 | 101 | # opts="--disk-format qcow2 --container-format bare --is-public true" 102 | # glance $INSECURE image-create --name "$IMAGE_NAME" $opts --copy-from "$IMAGE_URL" >/dev/null 103 | 104 | IMAGE_FILE="/tmp/${IMAGE_NAME}.qcow" 105 | if [ ! -f "$IMAGE_FILE" ] ; then 106 | curl "$IMAGE_URL" -o "$IMAGE_FILE" 2>&1 >/dev/null 107 | fi 108 | opts="--disk-format qcow2 --container-format bare --is-public true" 109 | glance $INSECURE image-create --name "$IMAGE_NAME" $opts --file "$IMAGE_FILE" >/dev/null 110 | echo "Image created, but may need a time to became active" 111 | fi 112 | 113 | if [ -z "$flavor_id" ] ; then 114 | echo "Creating flavor $FLAVOR_NAME" 115 | nova $INSECURE flavor-create "$FLAVOR_NAME" "$FLAVOR_NAME" "$FLAVOR_RAM" "$FLAVOR_HDD" "$FLAVOR_CPU_COUNT" >/dev/null 116 | fi 117 | 118 | for SERV_GROUP in $missed_groups ; do 119 | echo "Creating server group $SERV_GROUP" 120 | nova $INSECURE server-group-create --policy anti-affinity "$SERV_GROUP" >/dev/null 121 | group_id=$(nova $INSECURE server-group-list | grep " $SERV_GROUP " | awk '{print $2}' ) 122 | export groups_ids="$groups_ids $group_id" 123 | done 124 | 125 | if [ ! -z "$KEYPAIR_NAME" ] ; then 126 | if [ -z "$keypair_id" ] ; then 127 | echo "Creating server group $SERV_GROUP. Key would be stored into $KEY_FILE_NAME" 128 | nova $INSECURE keypair-add "$KEYPAIR_NAME" > "$KEY_FILE_NAME" 129 | chmod og= "$KEY_FILE_NAME" 130 | fi 131 | fi 132 | 133 | if [ -z "$secgroup_id" ] ; then 134 | echo "Adding rules for ping and ssh" 135 | nova $INSECURE secgroup-create $SECGROUP $SECGROUP >/dev/null 136 | nova $INSECURE secgroup-add-rule $SECGROUP icmp -1 -1 0.0.0.0/0 >/dev/null 137 | nova $INSECURE secgroup-add-rule $SECGROUP tcp 22 22 0.0.0.0/0 >/dev/null 138 | fi 139 | } 140 | 141 | if [ "$1" = "--clear" ] ; then 142 | clean 143 | else 144 | prepare 145 | fi 146 | -------------------------------------------------------------------------------- /scripts/receiver.py: -------------------------------------------------------------------------------- 1 | from .api import start_monitoring, Empty 2 | # from influx_exporter import connect, add_data 3 | 4 | uri = "udp://192.168.0.104:12001" 5 | # infldb_url = "influxdb://perf:perf@192.168.152.42:8086/perf" 6 | # conn = connect(infldb_url) 7 | 8 | monitor_config = {'127.0.0.1': 9 | {"block-io": {'allowed_prefixes': ['sda1', 'rbd1']}, 10 | "net-io": {"allowed_prefixes": ["virbr2"]}}} 11 | 12 | with start_monitoring(uri, monitor_config) as queue: 13 | while True: 14 | try: 15 | (ip, port), data = queue.get(True, 1) 16 | print (ip, port), data 17 | # add_data(conn, ip, [data]) 18 | except Empty: 19 | pass 20 | -------------------------------------------------------------------------------- /scripts/run.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from wally.main import main 3 | 4 | exit(main(sys.argv)) 5 | 6 | -------------------------------------------------------------------------------- /scripts/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | # don't forget to change it in prepare.sh as well 5 | my_dir="$(dirname "$0")" 6 | source "$my_dir/config.sh" 7 | 8 | BLOCK_SIZES="1k 4k 64k 256k 1m" 9 | OPERATIONS="randwrite write randread read" 10 | SYNC_TYPES="s a d" 11 | REPEAT_COUNT="3" 12 | CONCURRENCES="1 8 64" 13 | IODEPTHS="16" 14 | 15 | 16 | SYNC_FACTOR="x500" 17 | DIRECT_FACTOR="x500" 18 | ASYNC_FACTOR="r2" 19 | 20 | 21 | function get_file_size_opts() { 22 | SYNC_TYPE="$1" 23 | if [ "$SYNC_TYPE" = "s" ] ; then 24 | echo "--iosize $SYNC_FACTOR -s" 25 | elif [ "$SYNC_TYPE" = "d" ] ; then 26 | echo "--iosize $DIRECT_FACTOR -d" 27 | else 28 | echo "--iosize $ASYNC_FACTOR" 29 | fi 30 | } 31 | 32 | function echo_combinations() { 33 | for IODEPTH in $IODEPTHS ; do 34 | for CONCURRENCE in $CONCURRENCES ; do 35 | for BSIZE in $BLOCK_SIZES ; do 36 | for OPERATION in $OPERATIONS ; do 37 | for SYNC_TYPE in $SYNC_TYPES ; do 38 | 39 | # filter out too slow options 40 | if [ "$BSIZE" = "1k" -o "$BSIZE" = "4k" ] ; then 41 | if [ "$SYNC_TYPE" = "a" ] ; then 42 | continue 43 | fi 44 | fi 45 | 46 | # filter out sync reads 47 | if [ "$OPERATION" = "read" -o "$OPERATION" = "randread" ] ; then 48 | if [ "$SYNC_TYPE" = "s" ] ; then 49 | continue 50 | fi 51 | fi 52 | 53 | FILE_SIZE_AND_SYNC=$(get_file_size_opts "$SYNC_TYPE") 54 | 55 | 56 | IO_OPTS="--type $TESTER_TYPE " 57 | IO_OPTS="$IO_OPTS -a $OPERATION " 58 | IO_OPTS="$IO_OPTS --iodepth $IODEPTH " 59 | IO_OPTS="$IO_OPTS --blocksize $BSIZE " 60 | IO_OPTS="$IO_OPTS $FILE_SIZE_AND_SYNC " 61 | IO_OPTS="$IO_OPTS --concurrency $CONCURRENCE" 62 | 63 | for COUNTER in $(seq 1 $REPEAT_COUNT) ; do 64 | echo $IO_OPTS 65 | done 66 | done 67 | done 68 | done 69 | done 70 | done 71 | } 72 | 73 | 74 | function run_test() { 75 | OPTION_FILE="$1" 76 | 77 | if [ ! -f "$OPTION_FILE" ] ; then 78 | echo "Path to file with io.py options list should be passed" 79 | exit 1 80 | fi 81 | 82 | if [ "$RUNNER" = "ssh" ] ; then 83 | GROUP_ID=$(nova server-group-list | grep " $SERV_GROUP " | awk '{print $2}' ) 84 | EXTRA_OPTS="user=$IMAGE_USER" 85 | EXTRA_OPTS="${EXTRA_OPTS},keypair_name=$KEYPAIR_NAME" 86 | EXTRA_OPTS="${EXTRA_OPTS},img_name=$IMAGE_NAME" 87 | EXTRA_OPTS="${EXTRA_OPTS},flavor_name=$FLAVOR_NAME" 88 | EXTRA_OPTS="${EXTRA_OPTS},network_zone_name=$NETWORK_ZONE_NAME" 89 | EXTRA_OPTS="${EXTRA_OPTS},flt_ip_pool=$FL_NETWORK_ZONE_NAME" 90 | EXTRA_OPTS="${EXTRA_OPTS},key_file=$KEY_FILE_NAME" 91 | EXTRA_OPTS="${EXTRA_OPTS},aff_group=$GROUP_ID" 92 | EXTRA_OPTS="${EXTRA_OPTS},count=$VM_COUNT" 93 | else 94 | echo "Unsupported runner $RUNNER" 95 | exit 1 96 | fi 97 | 98 | RUN_TEST_OPTS="-t io -l --runner $RUNNER" 99 | set -x 100 | python run_test.py $RUN_TEST_OPTS --create-vms-opts="$EXTRA_OPTS" -f "$OPTION_FILE" $TESTER_TYPE 101 | set +x 102 | } 103 | 104 | if [ "$1" = '--prepare-opts' ] ; then 105 | echo_combinations 106 | else 107 | run_test $1 108 | fi 109 | 110 | -------------------------------------------------------------------------------- /scripts/run_all_tests.sh: -------------------------------------------------------------------------------- 1 | SSH_PASS=$(sshpass) 2 | export SSH_OPTS="-o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" 3 | 4 | if [ -z SSH_PASS ]; then 5 | sudo apt-get install sshpass 6 | echo 'All dependencies has been installed' 7 | fi 8 | 9 | DEBS=`download_debs` 10 | echo "Debs has been downloaded" 11 | 12 | bash run_test.sh 172.16.52.108 172.16.55.2 disk_io_perf.pem file_to_test.dat result.txt 13 | bash run_test.sh 172.16.52.108 172.16.55.2 disk_io_perf.pem file_to_test.dat result.txt 14 | bash run_test.sh 172.16.52.108 172.16.55.2 disk_io_perf.pem file_to_test.dat result.txt 15 | bash run_test.sh 172.16.52.108 172.16.55.2 disk_io_perf.pem file_to_test.dat result.txt -------------------------------------------------------------------------------- /scripts/run_test.sh: -------------------------------------------------------------------------------- 1 | function get_arguments() { 2 | 3 | export FUEL_MASTER_IP=$1 4 | 5 | if [ -z "${FUEL_MASTER_IP}" ]; then echo "Fuel master node ip is not provided"; fi 6 | 7 | export EXTERNAL_IP=$2 8 | 9 | if [ -z "${EXTERNAL_IP}" ]; then echo "Fuel external ip is not provided"; fi 10 | 11 | export KEY_FILE_NAME=$3 12 | 13 | if [ -z "${KEY_FILE_NAME}" ]; then echo "Key file name is not provided"; fi 14 | 15 | export FILE_TO_TEST=$4 16 | 17 | if [ -z "${KEY_FILE_NAME}" ]; then echo "Key file name is not provided"; fi 18 | 19 | if [ ! -f $KEY_FILE_NAME ]; 20 | then 21 | echo "File $KEY_FILE_NAME does not exist." 22 | fi 23 | 24 | export RESULT_FILE=$5 25 | 26 | if [ -z "${RESULT_FILE}" ]; then echo "Result file name is not provided"; fi 27 | 28 | export FUEL_MASTER_PASSWD=${6:-test37} 29 | export TIMEOUT=${7:-360} 30 | 31 | 32 | echo "Fuel master IP: $FUEL_MASTER_IP" 33 | echo "Fuel master password: $FUEL_MASTER_PASSWD" 34 | echo "External IP: $EXTERNAL_IP" 35 | echo "Key file name: $KEY_FILE_NAME" 36 | echo "Timeout: $TIMEOUT" 37 | } 38 | 39 | # note : function will works properly only when image dame is single string without spaces that can brake awk 40 | function wait_image_active() { 41 | image_state="none" 42 | image_name="$IMAGE_NAME" 43 | counter=0 44 | 45 | while [ ["$image_state" == "active"] ] ; do 46 | sleep 1 47 | image_state=$(glance image-list | grep "$image_name" | awk '{print $12}') 48 | echo $image_state 49 | counter=$((counter + 1)) 50 | 51 | if [ "$counter" -eq "$TIMEOUT" ] 52 | then 53 | echo "Time limit exceed" 54 | break 55 | fi 56 | done 57 | } 58 | 59 | 60 | function wait_floating_ip() { 61 | floating_ip="|" 62 | vm_name=$VM_NAME 63 | counter=0 64 | 65 | while [ "$floating_ip" != "|" ] ; do 66 | sleep 1 67 | floating_ip=$(nova floating-ip-list | grep "$vm_name" | awk '{print $13}' | head -1) 68 | counter=$((counter + 1)) 69 | 70 | if [ $counter -eq $TIMEOUT ] 71 | then 72 | echo "Time limit exceed" 73 | break 74 | fi 75 | done 76 | } 77 | 78 | 79 | function wait_vm_deleted() { 80 | vm_name=$(nova list| grep "$VM_NAME"| awk '{print $4}'| head -1) 81 | counter=0 82 | 83 | while [ ! -z $vm_name ] ; do 84 | sleep 1 85 | vm_name=$(nova list| grep "$VM_NAME"| awk '{print $4}'| head -1) 86 | counter=$((counter + 1)) 87 | 88 | if [ "$counter" -eq $TIMEOUT ] 89 | then 90 | echo "Time limit exceed" 91 | break 92 | fi 93 | done 94 | } 95 | 96 | 97 | function get_floating_ip() { 98 | IP=$(nova floating-ip-list | grep "$FLOATING_NET" | awk '{if ($5 == "-") print $2}' | head -n1) 99 | 100 | if [ -z "$IP" ]; then # fix net name 101 | IP=$(nova floating-ip-create "$FLOATING_NET"| awk '{print $2}') 102 | 103 | if [ -z "$list" ]; then 104 | echo "Cannot allocate new floating ip" 105 | # exit 106 | fi 107 | fi 108 | 109 | echo $FLOATING_NET 110 | export VM_IP=$IP 111 | echo "VM_IP: $VM_IP" 112 | } 113 | 114 | function run_openrc() { 115 | source run_vm.sh "$FUEL_MASTER_IP" "$FUEL_MASTER_PASSWD" "$EXTERNAL_IP" novanetwork nova 116 | source `get_openrc` 117 | 118 | list=$(nova list) 119 | if [ "$list" == "" ]; then 120 | echo "openrc variables are unset or set to the empty string" 121 | fi 122 | 123 | echo "AUTH_URL: $OS_AUTH_URL" 124 | } 125 | 126 | get_arguments $@ 127 | 128 | echo "getting openrc from controller node" 129 | run_openrc 130 | 131 | echo "openrc has been activated on your machine" 132 | get_floating_ip 133 | 134 | echo "floating ip has been found" 135 | bash prepare.sh 136 | echo "Image has been sended to glance" 137 | wait_image_active 138 | echo "Image has been saved" 139 | 140 | BOOT_LOG_FILE=`tempfile` 141 | boot_vm | tee "$BOOT_LOG_FILE" 142 | VOL_ID=$(cat "$BOOT_LOG_FILE" | grep "VOL_ID=" | sed 's/VOL_ID=//') 143 | rm "$BOOT_LOG_FILE" 144 | 145 | echo "VM has been booted" 146 | wait_floating_ip 147 | echo "Floating IP has been obtained" 148 | source `prepare_vm` 149 | echo "VM has been prepared" 150 | 151 | # sudo bash ../single_node_test_short.sh $FILE_TO_TEST $RESULT_FILE 152 | 153 | ssh $SSH_OPTS -i $KEY_FILE_NAME ubuntu@$VM_IP \ 154 | "cd /tmp/io_scenario;" 155 | 156 | # echo 'results' > $RESULT_FILE; \ 157 | # curl -X POST -d @$RESULT_FILE http://http://172.16.52.80/api/test --header 'Content-Type:application/json' 158 | 159 | # nova delete $VM_NAME 160 | # wait_vm_deleted 161 | # echo "$VM_NAME has been deleted successfully" 162 | # cinder delete $VOL_ID 163 | # echo "Volume has been deleted $VOL_ID" 164 | -------------------------------------------------------------------------------- /scripts/run_vm.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | MASTER_IP=$1 3 | FUEL_PASSWD=$2 4 | NEW_IP=$3 5 | FIXED_NET_NAME=$4 6 | FLOATING_NET=$5 7 | VM_NAME=disk-io-test 8 | 9 | # VM_IP=$(nova floating-ip-create "$FLOATIN_NET" | grep "$FLOATIN_NET" | awk '{print $2}') 10 | VM_IP=172.16.53.23 11 | OS_ORIGIN_IP=10.20.0.129 12 | OS_EXT_IP=172.16.53.66 13 | 14 | 15 | 16 | FIXED_NET_NAME="novanetwork" 17 | FLOATING_NET="nova" 18 | 19 | my_dir="$(dirname -- "$0")" 20 | source "$my_dir/config.sh" 21 | SSH_OVER_MASTER="sshpass -p${FUEL_PASSWD} ssh root@${MASTER_IP}" 22 | VOLUME_NAME="test-volume" 23 | VOLUME_SIZE=20 24 | VOLUME_DEVICE="/dev/vdb" 25 | 26 | 27 | function get_openrc() { 28 | echo "get openrc" 29 | OPENRC=`tempfile` 30 | CONTROLLER_NODE=$($SSH_OVER_MASTER fuel node | grep controller | awk '-F|' '{gsub(" ", "", $5); print $5}') 31 | $SSH_OVER_MASTER ssh $CONTROLLER_NODE cat openrc 2>/dev/null | \ 32 | sed -r 's/(\b[0-9]{1,3}\.){3}[0-9]{1,3}\b'/$NEW_IP/ > $OPENRC 33 | echo $OPENRC 34 | } 35 | 36 | function wait_vm_active() { 37 | vm_state="none" 38 | vm_name=$VM_NAME 39 | counter=0 40 | 41 | while [ $vm_state != "ACTIVE" ] ; do 42 | sleep 1 43 | vm_state=$(nova list | grep $vm_name | awk '{print $6}') 44 | counter=$((counter + 1)) 45 | 46 | if [ $counter -eq $TIMEOUT ] 47 | then 48 | echo "Time limit exceed" 49 | break 50 | fi 51 | done 52 | } 53 | 54 | function boot_vm() { 55 | FIXED_NET_ID=$(nova net-list | grep "\b${FIXED_NET_NAME}\b" | awk '{print $2}') 56 | echo "FIXED NET id : $FIXED_NET_ID" 57 | sleep 10 58 | 59 | VOL_ID=$(cinder create --display-name $VOLUME_NAME $VOLUME_SIZE | grep '\bid\b' | grep available | awk '{print $4}') 60 | 61 | if [ -z $VOL_ID ]; then 62 | VOL_ID=$(cinder list | grep test-volume | grep available| awk '{print $2}'| head -1) 63 | fi 64 | 65 | nova boot --flavor "$FLAVOR_NAME" --image "$IMAGE_NAME" --key-name "$KEYPAIR_NAME" --security-groups default --nic net-id=$FIXED_NET_ID $VM_NAME >/dev/null 66 | wait_vm_active $VM_NAME 67 | 68 | nova floating-ip-associate $VM_NAME $VM_IP 69 | 70 | nova volume-attach $VM_NAME $VOL_ID $VOLUME_DEVICE >/dev/null 71 | echo "VOL_ID=$VOL_ID" 72 | } 73 | 74 | function prepare_vm() { 75 | echo "Copy io scenario folded" 76 | scp -i "$KEY_FILE_NAME" -r ../tests ubuntu@${VM_IP}:/tmp >/dev/null 77 | 78 | echo "Copy DEBS packages" 79 | scp -i "$KEY_FILE_NAME" $DEBS ubuntu@${VM_IP}:/tmp >/dev/null 80 | 81 | echo "Copy single_node_test_short" 82 | scp -i "$KEY_FILE_NAME" single_node_test_short.sh ubuntu@${VM_IP}:/tmp >/dev/null 83 | 84 | echo "dpkg on vm" 85 | ssh $SSH_OPTS -i "$KEY_FILE_NAME" ubuntu@${VM_IP} sudo dpkg -i $DEBS >/dev/null 86 | } 87 | 88 | function prepare_node() { 89 | # set -e 90 | # set -o pipefail 91 | echo "Preparing node" 92 | COMPUTE_NODE=$($SSH_OVER_MASTER fuel node | grep compute | awk '-F|' '{gsub(" ", "", $5); print $5}') 93 | 94 | echo "Copying io_scenario to compute node" 95 | sshpass -p${FUEL_MASTER_PASSWD} scp -r ../io_scenario root@${FUEL_MASTER_IP}:/tmp 96 | $SSH_OVER_MASTER scp -r /tmp/io_scenario $COMPUTE_NODE:/tmp >/dev/null 97 | 98 | echo "Copying debs to compute node" 99 | sshpass -p${FUEL_MASTER_PASSWD} scp $DEBS root@${FUEL_MASTER_IP}:/tmp 100 | 101 | $SSH_OVER_MASTER scp $DEBS $COMPUTE_NODE:/tmp 102 | $SSH_OVER_MASTER ssh $COMPUTE_NODE dpkg -i $DEBS 103 | 104 | echo "Copying single_node_test.sh to compute node" 105 | sshpass -p${FUEL_MASTER_PASSWD} scp single_node_test_short.sh root@${FUEL_MASTER_IP}:/tmp 106 | $SSH_OVER_MASTER scp /tmp/single_node_test_short.sh $COMPUTE_NODE:/tmp 107 | } 108 | 109 | function download_debs() { 110 | pushd /tmp >/dev/null 111 | rm -f *.deb >/dev/null 112 | aptitude download libibverbs1 librdmacm1 libaio1 fio >/dev/null 113 | popd >/dev/null 114 | echo /tmp/*.deb 115 | } 116 | 117 | # OPENRC=`get_openrc` 118 | # source $OPENRC 119 | # rm $OPENRC 120 | 121 | # boot_vm 122 | # prepare_vm 123 | 124 | 125 | -------------------------------------------------------------------------------- /scripts/sensors_webui.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 7 | 8 | 9 |
10 | 11 | 58 | -------------------------------------------------------------------------------- /scripts/show_disk_delta.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import time 4 | import pprint 5 | import threading 6 | 7 | 8 | mapping = [ 9 | "major number", 10 | "minor mumber", 11 | "device name", 12 | "reads completed successfully", 13 | "reads merged", 14 | "sectors read", 15 | "time spent reading (ms)", 16 | "writes complete", 17 | "writes merged", 18 | "sectors written", 19 | "time spent writing (ms)", 20 | "I/Os currently in progress", 21 | "time spent doing I/Os (ms)", 22 | "weighted time spent doing I/Os (ms)" 23 | ] 24 | 25 | 26 | def read_dstats(): 27 | res = {} 28 | for line in open("/proc/diskstats"): 29 | stats = dict(zip(mapping, line.split())) 30 | name = stats.pop('device name') 31 | res[name] = {k: int(v) for k, v in stats.items()} 32 | return res 33 | 34 | 35 | def diff_stats(obj1, obj2): 36 | return {key: (val - obj2[key]) for key, val in obj1.items()} 37 | 38 | 39 | def run_tool(cmd, suppress_console=False): 40 | s_cmd = " ".join(cmd) 41 | if suppress_console: 42 | s_cmd += " >/dev/null 2>&1 " 43 | os.system(s_cmd) 44 | 45 | devices = sys.argv[1].split(',') 46 | cmd = sys.argv[2:] 47 | 48 | th = threading.Thread(None, run_tool, None, (cmd,)) 49 | th.daemon = True 50 | 51 | rstats = read_dstats() 52 | prev_stats = {device: rstats[device] for device in devices} 53 | begin_stats = prev_stats 54 | 55 | th.start() 56 | 57 | wr_compl = "writes complete" 58 | 59 | while True: 60 | time.sleep(1) 61 | 62 | rstats = read_dstats() 63 | new_stats = {device: rstats[device] for device in devices} 64 | 65 | # print "Delta writes complete =", 66 | for device in devices: 67 | delta = new_stats[device][wr_compl] - prev_stats[device][wr_compl] 68 | # print device, delta, 69 | # print 70 | 71 | prev_stats = new_stats 72 | 73 | if not th.is_alive(): 74 | break 75 | 76 | pprint.pprint(diff_stats(new_stats[device], begin_stats[device])) 77 | -------------------------------------------------------------------------------- /scripts/single_node_test_complete.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | TEST_FILE=$1 5 | OUT_FILE=$2 6 | NUM_CYCLES=7 7 | # TESTS_PER_CYCLE=9 8 | 9 | # COUNTER=0 10 | # (( NUM_TESTS=$NUM_CYCLES * $TESTS_PER_CYCLE)) 11 | 12 | # function next() { 13 | # echo "Done $COUNTER tests from $NUM_TESTS" 14 | # (( COUNTER=$COUNTER + 1 )) 15 | # } 16 | 17 | function super_sync() { 18 | sync 19 | echo 3 > /proc/sys/vm/drop_caches 20 | } 21 | 22 | function run_tests(){ 23 | 24 | super_sync ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240 25 | 26 | OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G --timeout 15" 27 | for cycle in $(seq 50) ; do 28 | super_sync ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency 1 29 | done 30 | 31 | echo "--------------------------------------------------------------------------------" 32 | 33 | OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G --timeout 30" 34 | OPERS="read write randread randwrite" 35 | CONCS="1 4 8 64" 36 | SIZES="4k 16k 64k 256k 1m 2m" 37 | 38 | 39 | for cycle in $(seq $NUM_CYCLES) ; do 40 | for conc in $CONCS ; do 41 | for bsize in $SIZES ; do 42 | for operation in $OPERS ; do 43 | super_sync ; python io.py $OPTS -a $operation --blocksize $bsize -d --concurrency $conc 44 | done 45 | done 46 | done 47 | done 48 | 49 | for cycle in $(seq $NUM_CYCLES) ; do 50 | for conc in $CONCS ; do 51 | for operation in $OPERS ; do 52 | super_sync ; python io.py $OPTS -a $operation --blocksize 4k -s --concurrency $conc 53 | done 54 | done 55 | done 56 | 57 | super_sync ; python io.py $OPTS -a write --blocksize 2m --concurrency 1 58 | super_sync ; python io.py $OPTS -a write --blocksize 2m --concurrency 1 59 | super_sync ; python io.py $OPTS -a write --blocksize 2m --concurrency 1 60 | 61 | OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 1G" 62 | for cycle in $(seq $NUM_CYCLES) ; do 63 | super_sync ; python io.py $OPTS -a randwrite --blocksize 4k -d --concurrency 1 64 | done 65 | 66 | OPTS="--test-file $TEST_FILE --type fio --iodepth 1 --iosize 10G" 67 | # need to test different file sizes 68 | # need to test different timeouts - maybe we can decrease test time 69 | } 70 | 71 | run_tests "$FILE_1" 2>&1 | tee "$OUT_FILE" 72 | 73 | 74 | -------------------------------------------------------------------------------- /scripts/single_node_test_short.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -x 3 | 4 | TEST_FILE=$1 5 | OUT_FILE=$2 6 | NUM_CYCLES=7 7 | # TESTS_PER_CYCLE=9 8 | 9 | # COUNTER=0 10 | # (( NUM_TESTS=$NUM_CYCLES * $TESTS_PER_CYCLE)) 11 | 12 | # function next() { 13 | # echo "Done $COUNTER tests from $NUM_TESTS" 14 | # (( COUNTER=$COUNTER + 1 )) 15 | # } 16 | 17 | function run_tests(){ 18 | OPTS="--type=fio" 19 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randwrite_4kb_1с.cfg --type=fio 20 | 21 | sync ; echo 3 > /proc/sys/vm/drop_caches ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240 22 | sync ; echo 3 > /proc/sys/vm/drop_caches ; dd if=/dev/zero of=$TEST_FILE bs=1048576 count=10240 23 | 24 | for cycle in $(seq $NUM_CYCLES) ; do 25 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randwrite_4kb_1с.cfg --type=fio 26 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randwrite_4kb_4с.cfg --type=fio 27 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randwrite_4kb_8с.cfg --type=fio 28 | 29 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randread_4kb_1с.cfg --type=fio 30 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randread_4kb_1с.cfg --type=fio 31 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randread_4kb_1с.cfg --type=fio 32 | 33 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_randwrite_4kb_1с.cfg --type=fio 34 | 35 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_reade_2mb.cfg --type=fio 36 | sync ; echo 3 > /proc/sys/vm/drop_caches ; python tests/io.py tasks/io_task_write_2mb.cfg --type=fio 37 | done 38 | } 39 | 40 | run_tests "$FILE_1" 2>&1 | tee "$OUT_FILE" 41 | 42 | # sudo bash scripts/single_node_test_short.sh file_to_test result.txt 43 | -------------------------------------------------------------------------------- /scripts/tests.yaml: -------------------------------------------------------------------------------- 1 | - with_test_nodes: 2 | openstack: 3 | creds: ENV 4 | # creds: FUEL://USER:PASSDW@172.16.52.112:8000/ENV_NAME 5 | vm_params: 6 | count: x1 7 | img_name: disk_io_perf 8 | flavor_name: disk_io_perf.256 9 | keypair_name: disk_io_perf 10 | network_zone_name: novanetwork 11 | flt_ip_pool: nova 12 | creds: "ssh://ubuntu@{0}::disk_io_perf.pem" 13 | tests: 14 | - pgbench: 15 | opts: 16 | num_clients: [4, 8, 12] 17 | transactions: [1, 2, 3] 18 | - io: 19 | tool: fio 20 | config_file: tests/io_task_test.cfg 21 | 22 | - vm_count: 23 | max_lat_ms: 20 24 | min_bw_mbps: 60 25 | min_4k_direct_w_iops: 100 26 | min_4k_direct_r_iops: 100 27 | -------------------------------------------------------------------------------- /scripts/wally: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | python3.6 -m wally "$@" 3 | -------------------------------------------------------------------------------- /scripts/webui.py: -------------------------------------------------------------------------------- 1 | import time 2 | import random 3 | import os.path 4 | import logging 5 | import calendar 6 | import datetime 7 | import threading 8 | 9 | import cherrypy 10 | from cherrypy import tools 11 | 12 | import wally 13 | 14 | logger = logging.getLogger("wally.webui") 15 | 16 | 17 | def to_timestamp(str_datetime): 18 | dt, str_gmt_offset = str_datetime.split("GMT", 1) 19 | dt = dt.strip().split(" ", 1)[1] 20 | dto = datetime.datetime.strptime(dt, "%b %d %Y %H:%M:%S") 21 | timestamp = calendar.timegm(dto.timetuple()) 22 | str_gmt_offset = str_gmt_offset.strip().split(" ", 1)[0] 23 | gmt_offset = int(str_gmt_offset) 24 | gmt_offset_sec = gmt_offset // 100 * 3600 + (gmt_offset % 100) * 60 25 | return timestamp - gmt_offset_sec 26 | 27 | 28 | def backfill_thread(dstore): 29 | with dstore.lock: 30 | for i in range(600): 31 | dstore.data['disk_io'].append(int(random.random() * 100)) 32 | dstore.data['net_io'].append(int(random.random() * 100)) 33 | 34 | while True: 35 | time.sleep(1) 36 | with dstore.lock: 37 | dstore.data['disk_io'].append(int(random.random() * 100)) 38 | dstore.data['net_io'].append(int(random.random() * 100)) 39 | 40 | 41 | class WebWally(object): 42 | 43 | def __init__(self, sensors_data_storage): 44 | self.storage = sensors_data_storage 45 | 46 | @cherrypy.expose 47 | @tools.json_out() 48 | def sensors(self, start, stop, step, name): 49 | try: 50 | start = to_timestamp(start) 51 | stop = to_timestamp(stop) 52 | 53 | with self.storage.lock: 54 | data = self.storage.data[name] 55 | except Exception: 56 | logger.exception("During parse input data") 57 | raise cherrypy.HTTPError("Wrong date format") 58 | 59 | if step != 1000: 60 | raise cherrypy.HTTPError("Step must be equals to 1s") 61 | 62 | num = stop - start 63 | 64 | if len(data) > num: 65 | data = data[-num:] 66 | else: 67 | data = [0] * (num - len(data)) + data 68 | 69 | return data 70 | 71 | @cherrypy.expose 72 | def index(self): 73 | idx = os.path.dirname(wally.__file__) 74 | idx = os.path.join(idx, "sensors.html") 75 | return open(idx).read() 76 | 77 | 78 | def web_main_thread(sensors_data_storage): 79 | 80 | cherrypy.config.update({'environment': 'embedded', 81 | 'server.socket_port': 8089, 82 | 'engine.autoreload_on': False}) 83 | 84 | th = threading.Thread(None, backfill_thread, "backfill_thread", 85 | (sensors_data_storage,)) 86 | th.daemon = True 87 | th.start() 88 | 89 | cherrypy.quickstart(WebWally(sensors_data_storage), '/') 90 | 91 | 92 | def web_main_stop(): 93 | cherrypy.engine.stop() 94 | -------------------------------------------------------------------------------- /stubs/paramiko.pyi: -------------------------------------------------------------------------------- 1 | from io import BytesIO 2 | from typing import Any, Tuple 3 | 4 | 5 | __version_info__ = None # type: Tuple[int, int, int] 6 | 7 | 8 | class PasswordRequiredException(Exception): 9 | pass 10 | 11 | 12 | class SSHException(Exception): 13 | pass 14 | 15 | 16 | class RSAKey: 17 | @classmethod 18 | def from_private_key(cls, data: BytesIO, password: str = None) -> 'RSAKey': ... 19 | 20 | @classmethod 21 | def from_private_key_file(cls, fname: str, password: str = None) -> 'RSAKey': ... 22 | 23 | 24 | 25 | class AutoAddPolicy: 26 | pass 27 | 28 | 29 | class SSHClient: 30 | def __init__(self) -> None: 31 | self.known_hosts = None # type: Any 32 | 33 | def load_host_keys(self, path: str) -> None: ... 34 | def set_missing_host_key_policy(self, policy: AutoAddPolicy) -> None: ... 35 | def connect(self, *args: Any, **kwargs: Any): ... 36 | def get_transport(self) -> Any: ... 37 | def open_sftp(self) -> Any: ... 38 | -------------------------------------------------------------------------------- /stubs/psutil.pyi: -------------------------------------------------------------------------------- 1 | from typing import Iterable 2 | 3 | class Process: 4 | def __init__(self, pid: int) -> None: ... 5 | def children(self, recursive: bool = True) -> Iterable['Process']: ... 6 | def kill(self) -> None: ... 7 | -------------------------------------------------------------------------------- /stubs/yaml.pyi: -------------------------------------------------------------------------------- 1 | from typing import Union, List, Dict, Any 2 | 3 | 4 | Basic = Union[List, Dict[str, Any]] 5 | 6 | 7 | class Loader: ... 8 | class Dumper: ... 9 | class CLoader: ... 10 | class CDumper: ... 11 | 12 | def load(data: bytes, Loader: Any = None, encoding: str = 'utf8') -> Any: ... 13 | def dump(data: Any, Dumper: Any = None, encoding: str = 'utf8') -> bytes: ... 14 | def safe_load(data: bytes, encoding: str = 'utf8') -> Any: ... 15 | def safe_dump(data: Any, encoding: str = 'utf8') -> bytes: ... 16 | -------------------------------------------------------------------------------- /tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/tests/__init__.py -------------------------------------------------------------------------------- /tests/test_executors.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import unittest 4 | 5 | 6 | from oktest import ok, main, test 7 | 8 | 9 | from wally import utils, ssh_utils 10 | 11 | 12 | class AgentTest(unittest.TestCase): 13 | @test("test_local_executor_ls") 14 | def test_ls(self): 15 | expected = sorted(os.listdir('/')) 16 | ok(sorted(utils.run_locally('ls /').split())) == expected 17 | 18 | @test("test_local_executor_sleep1") 19 | def test_sleep1(self): 20 | t = time.time() 21 | with self.assertRaises(RuntimeError): 22 | utils.run_locally(['sleep', '20'], timeout=1) 23 | ok(time.time() - t) < 1.2 24 | 25 | @test("test_local_executor_sleep2") 26 | def test_sleep2(self): 27 | t = time.time() 28 | with self.assertRaises(RuntimeError): 29 | utils.run_locally('sleep 20', timeout=1) 30 | ok(time.time() - t) < 1.2 31 | 32 | @test("test_ssh_executor1") 33 | def test_ssh_executor1(self): 34 | id_rsa_path = os.path.expanduser('~/.ssh/id_rsa') 35 | ssh_url = "ssh://localhost::" + id_rsa_path 36 | expected = sorted(os.listdir('/')) 37 | 38 | conn = ssh_utils.connect(ssh_url) 39 | out = ssh_utils.run_over_ssh(conn, "ls /") 40 | ok(sorted(out.split())) == expected 41 | 42 | if __name__ == '__main__': 43 | main() 44 | -------------------------------------------------------------------------------- /tests/test_hlstorage.py: -------------------------------------------------------------------------------- 1 | import os 2 | import shutil 3 | import tempfile 4 | import contextlib 5 | from typing import Tuple, Union, Dict, Any 6 | 7 | import numpy 8 | 9 | from wally.result_classes import DataSource, TimeSeries, SuiteConfig 10 | from wally.suits.job import JobConfig, JobParams 11 | from wally.hlstorage import ResultStorage 12 | 13 | from cephlib.storage import make_storage 14 | 15 | @contextlib.contextmanager 16 | def in_temp_dir(): 17 | dname = tempfile.mkdtemp() 18 | try: 19 | yield dname 20 | finally: 21 | shutil.rmtree(dname) 22 | 23 | 24 | SUITE_ID = "suite_1" 25 | JOB_ID = "job_11" 26 | NODE_ID = "11.22.33.44:223" 27 | SENSOR = "sensor" 28 | DEV = "dev" 29 | METRIC = "metric" 30 | TAG = "csv" 31 | DATA_UNITS = "x" 32 | TIME_UNITS = "us" 33 | 34 | 35 | class TJobParams(JobParams): 36 | def __init__(self) -> None: 37 | JobParams.__init__(self) 38 | 39 | @property 40 | def summary(self) -> str: 41 | return "UT_Job_CFG" 42 | 43 | @property 44 | def long_summary(self) -> str: 45 | return "UT_Job_Config" 46 | 47 | def copy(self, **updated) -> 'TJobParams': 48 | return self.__class__() 49 | 50 | @property 51 | def char_tpl(self) -> Tuple[Union[str, int, float, bool], ...]: 52 | return (1, 2, 3) 53 | 54 | 55 | class TJobConfig(JobConfig): 56 | @property 57 | def storage_id(self) -> str: 58 | return JOB_ID 59 | 60 | @property 61 | def params(self) -> JobParams: 62 | return TJobParams() 63 | 64 | def raw(self) -> Dict[str, Any]: 65 | return {} 66 | 67 | @classmethod 68 | def fromraw(cls, data: Dict[str, Any]) -> 'TJobConfig': 69 | return cls() 70 | 71 | 72 | class TSuiteConfig(SuiteConfig): 73 | def __init__(self): 74 | SuiteConfig.__init__(self, "UT", {}, "run_uuid", [], "/tmp", 0, False) 75 | self.storage_id = SUITE_ID 76 | 77 | 78 | def test_sensor_ts(): 79 | with in_temp_dir() as root: 80 | size = 5 81 | sensor_data = numpy.arange(size) 82 | collected_at = numpy.arange(size * 2) + 100 83 | 84 | ds = DataSource(node_id=NODE_ID, sensor=SENSOR, dev=DEV, metric=METRIC, tag='csv') 85 | cds = DataSource(node_id=NODE_ID, metric='collected_at', tag='csv') 86 | 87 | with make_storage(root, existing=False) as storage: 88 | rstorage = ResultStorage(storage) 89 | 90 | rstorage.append_sensor(sensor_data, ds, units=DATA_UNITS) 91 | rstorage.append_sensor(sensor_data, ds, units=DATA_UNITS) 92 | 93 | rstorage.append_sensor(collected_at, cds, units=TIME_UNITS) 94 | rstorage.append_sensor(collected_at + size * 2, cds, units=TIME_UNITS) 95 | 96 | with make_storage(root, existing=True) as storage2: 97 | rstorage2 = ResultStorage(storage2) 98 | ts = rstorage2.get_sensor(ds) 99 | assert numpy.array_equal(ts.data, numpy.concatenate((sensor_data, sensor_data))) 100 | assert numpy.array_equal(ts.times, numpy.concatenate((collected_at, collected_at + size * 2))[::2]) 101 | 102 | 103 | def test_result_ts(): 104 | with in_temp_dir() as root: 105 | sensor_data = numpy.arange(5, dtype=numpy.uint32) 106 | collected_at = numpy.arange(5, dtype=numpy.uint32) + 100 107 | ds = DataSource(suite_id=SUITE_ID, job_id=JOB_ID, 108 | node_id=NODE_ID, sensor=SENSOR, dev=DEV, metric=METRIC, tag=TAG) 109 | ds.verify() 110 | 111 | ts = TimeSeries(sensor_data, times=collected_at, units=DATA_UNITS, source=ds, time_units=TIME_UNITS) 112 | 113 | suite = TSuiteConfig() 114 | job = TJobConfig(1) 115 | 116 | with make_storage(root, existing=False) as storage: 117 | rstorage = ResultStorage(storage) 118 | rstorage.put_or_check_suite(suite) 119 | rstorage.put_job(suite, job) 120 | rstorage.put_ts(ts) 121 | 122 | with make_storage(root, existing=True) as storage2: 123 | rstorage2 = ResultStorage(storage2) 124 | suits = list(rstorage2.iter_suite('UT')) 125 | suits2 = list(rstorage2.iter_suite()) 126 | assert len(suits) == 1 127 | assert len(suits2) == 1 128 | -------------------------------------------------------------------------------- /tests/test_rpc.py: -------------------------------------------------------------------------------- 1 | import contextlib 2 | 3 | from wally import ssh_utils, node, node_interfaces 4 | 5 | 6 | CONNECT_URI = "localhost" 7 | 8 | 9 | @contextlib.contextmanager 10 | def rpc_conn_ctx(uri, log_level=None): 11 | creds = ssh_utils.parse_ssh_uri(uri) 12 | rpc_code, modules = node.get_rpc_server_code() 13 | 14 | ssh_conn = node.connect(node_interfaces.NodeInfo(creds, set())) 15 | try: 16 | rpc_conn = node.setup_rpc(ssh_conn, rpc_code, plugins=modules, log_level=log_level) 17 | try: 18 | yield rpc_conn 19 | finally: 20 | rpc_conn.conn.server.stop() 21 | rpc_conn.disconnect() 22 | finally: 23 | ssh_conn.disconnect() 24 | 25 | 26 | def test_rpc_simple(): 27 | with rpc_conn_ctx(CONNECT_URI) as conn: 28 | names = conn.conn.server.rpc_info() 29 | assert 'server.list_modules' in names 30 | assert 'server.load_module' in names 31 | assert 'server.rpc_info' in names 32 | assert 'server.stop' in names 33 | 34 | 35 | def test_rpc_plugins(): 36 | with rpc_conn_ctx(CONNECT_URI) as conn: 37 | print(conn.conn.server.rpc_info()) 38 | assert conn.conn.fs.file_exists("/") 39 | -------------------------------------------------------------------------------- /tests/test_ssh.py: -------------------------------------------------------------------------------- 1 | import os 2 | import contextlib 3 | from unittest.mock import patch 4 | from typing import Iterator 5 | 6 | 7 | from wally import ssh_utils, ssh, node, node_interfaces 8 | 9 | 10 | creds = "root@osd-0" 11 | 12 | 13 | def test_ssh_url_parser(): 14 | default_user = "default_user" 15 | 16 | creds = [ 17 | ("test", ssh_utils.ConnCreds("test", default_user, port=22)), 18 | ("test:13", ssh_utils.ConnCreds("test", default_user, port=13)), 19 | ("test::xxx.key", ssh_utils.ConnCreds("test", default_user, port=22, key_file="xxx.key")), 20 | ("test:123:xxx.key", ssh_utils.ConnCreds("test", default_user, port=123, key_file="xxx.key")), 21 | ("user@test", ssh_utils.ConnCreds("test", "user", port=22)), 22 | ("user@test:13", ssh_utils.ConnCreds("test", "user", port=13)), 23 | ("user@test::xxx.key", ssh_utils.ConnCreds("test", "user", port=22, key_file="xxx.key")), 24 | ("user@test:123:xxx.key", ssh_utils.ConnCreds("test", "user", port=123, key_file="xxx.key")), 25 | ("user:passwd@test", ssh_utils.ConnCreds("test", "user", port=22, passwd="passwd")), 26 | ("user:passwd:@test", ssh_utils.ConnCreds("test", "user", port=22, passwd="passwd:")), 27 | ("user:passwd:@test:123", ssh_utils.ConnCreds("test", "user", port=123, passwd="passwd:")) 28 | ] 29 | 30 | for uri, expected in creds: 31 | with patch('getpass.getuser', lambda : default_user): 32 | parsed = ssh_utils.parse_ssh_uri(uri) 33 | 34 | assert parsed.user == expected.user, uri 35 | assert parsed.addr.port == expected.addr.port, uri 36 | assert parsed.addr.host == expected.addr.host, uri 37 | assert parsed.key_file == expected.key_file, uri 38 | assert parsed.passwd == expected.passwd, uri 39 | 40 | 41 | CONNECT_URI = "localhost" 42 | 43 | 44 | @contextlib.contextmanager 45 | def conn_ctx(uri, *args): 46 | creds = ssh_utils.parse_ssh_uri(CONNECT_URI) 47 | node_info = node_interfaces.NodeInfo(creds, set()) 48 | conn = node.connect(node_info, *args) 49 | try: 50 | yield conn 51 | finally: 52 | conn.disconnect() 53 | 54 | 55 | def test_ssh_connect(): 56 | with conn_ctx(CONNECT_URI) as conn: 57 | assert set(conn.run("ls -1 /").split()) == set(fname for fname in os.listdir("/") if not fname.startswith('.')) 58 | 59 | 60 | def test_ssh_complex(): 61 | pass 62 | 63 | 64 | def test_file_copy(): 65 | data1 = b"-" * 1024 66 | data2 = b"+" * 1024 67 | 68 | with conn_ctx(CONNECT_URI) as conn: 69 | path = conn.put_to_file(None, data1) 70 | assert data1 == open(path, 'rb').read() 71 | 72 | assert path == conn.put_to_file(path, data2) 73 | assert data2 == open(path, 'rb').read() 74 | 75 | assert len(data2) > 10 76 | assert path == conn.put_to_file(path, data2[10:]) 77 | assert data2[10:] == open(path, 'rb').read() 78 | -------------------------------------------------------------------------------- /wally/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/wally/__init__.py -------------------------------------------------------------------------------- /wally/__main__.py: -------------------------------------------------------------------------------- 1 | import sys 2 | from .main import main 3 | 4 | 5 | if __name__ == '__main__': 6 | exit(main(sys.argv)) 7 | -------------------------------------------------------------------------------- /wally/ceph.py: -------------------------------------------------------------------------------- 1 | """ Collect data about ceph nodes""" 2 | import enum 3 | import logging 4 | from typing import Dict, cast, List, Set 5 | from cephlib import discover 6 | from cephlib.discover import OSDInfo 7 | from cephlib.common import to_ip 8 | from cephlib.node import NodeInfo, IRPCNode 9 | from cephlib.ssh import ConnCreds, IP, parse_ssh_uri 10 | from cephlib.node_impl import connect, setup_rpc 11 | 12 | from .stage import Stage, StepOrder 13 | from .test_run_class import TestRun 14 | from .utils import StopTestError 15 | 16 | 17 | logger = logging.getLogger("wally") 18 | 19 | 20 | def get_osds_info(node: IRPCNode, ceph_extra_args: str = "", thcount: int = 8) -> Dict[IP, List[OSDInfo]]: 21 | """Get set of osd's ip""" 22 | return {IP(ip): osd_info_list 23 | for ip, osd_info_list in discover.get_osds_nodes(node.run, ceph_extra_args, thcount=thcount).items()} 24 | 25 | 26 | def get_mons_ips(node: IRPCNode, ceph_extra_args: str = "") -> Set[IP]: 27 | """Return mon ip set""" 28 | return {IP(ip) for ip, _ in discover.get_mons_nodes(node.run, ceph_extra_args).values()} 29 | 30 | 31 | class DiscoverCephStage(Stage): 32 | config_block = 'ceph' 33 | priority = StepOrder.DISCOVER 34 | 35 | def run(self, ctx: TestRun) -> None: 36 | """Return list of ceph's nodes NodeInfo""" 37 | if 'all_nodes' in ctx.storage: 38 | logger.debug("Skip ceph discovery, use previously discovered nodes") 39 | return 40 | 41 | if 'metadata' in ctx.config.discover: 42 | logger.exception("Ceph metadata discovery is not implemented") 43 | raise StopTestError() 44 | 45 | ignore_errors = 'ignore_errors' in ctx.config.discover 46 | ceph = ctx.config.ceph 47 | try: 48 | root_node_uri = cast(str, ceph.root_node) 49 | except AttributeError: 50 | logger.error("'root_node' option must be provided in 'ceph' config section. " + 51 | "It must be the name of the node, which has access to ceph") 52 | raise StopTestError() 53 | cluster = ceph.get("cluster", "ceph") 54 | ip_remap = ctx.config.ceph.get('ip_remap', {}) 55 | 56 | conf = ceph.get("conf") 57 | key = ceph.get("key") 58 | 59 | if conf is None: 60 | conf = f"/etc/ceph/{cluster}.conf" 61 | 62 | if key is None: 63 | key = f"/etc/ceph/{cluster}.client.admin.keyring" 64 | 65 | ctx.ceph_extra_args = f" -c '{conf}' -k '{key}'" 66 | 67 | logger.debug(f"Start discovering ceph nodes from root {root_node_uri}") 68 | logger.debug(f"cluster={cluster} key={conf} conf={key}") 69 | 70 | info = NodeInfo(parse_ssh_uri(root_node_uri), set()) 71 | 72 | ceph_params = {"cluster": cluster, "conf": conf, "key": key} 73 | 74 | ssh_user = ctx.config.ssh_opts.get("user") 75 | ssh_key = ctx.config.ssh_opts.get("key") 76 | 77 | node = ctx.ceph_master_node = setup_rpc(connect(info), ctx.rpc_code, ctx.default_rpc_plugins, 78 | log_level=ctx.config.rpc_log_level, 79 | sudo=ctx.config.ssh_opts.get("sudo", False)) 80 | 81 | try: 82 | ips = set() 83 | for ip, osds_info in get_osds_info(node, ctx.ceph_extra_args, thcount=16).items(): 84 | ip = ip_remap.get(ip, ip) 85 | ips.add(ip) 86 | creds = ConnCreds(to_ip(cast(str, ip)), user=ssh_user, key_file=ssh_key) 87 | info = ctx.merge_node(creds, {'ceph-osd'}) 88 | info.params.setdefault('ceph-osds', []).extend(info.__dict__.copy() for info in osds_info) 89 | assert 'ceph' not in info.params or info.params['ceph'] == ceph_params 90 | info.params['ceph'] = ceph_params 91 | logger.debug(f"Found {len(ips)} nodes with ceph-osd role") 92 | except Exception as exc: 93 | if not ignore_errors: 94 | logger.exception("OSD discovery failed") 95 | raise StopTestError() 96 | else: 97 | logger.warning(f"OSD discovery failed {exc}") 98 | 99 | try: 100 | counter = 0 101 | for counter, ip in enumerate(get_mons_ips(node, ctx.ceph_extra_args)): 102 | ip = ip_remap.get(ip, ip) 103 | creds = ConnCreds(to_ip(cast(str, ip)), user=ssh_user, key_file=ssh_key) 104 | info = ctx.merge_node(creds, {'ceph-mon'}) 105 | assert 'ceph' not in info.params or info.params['ceph'] == ceph_params 106 | info.params['ceph'] = ceph_params 107 | logger.debug(f"Found {counter + 1} nodes with ceph-mon role") 108 | except Exception as exc: 109 | if not ignore_errors: 110 | logger.exception("MON discovery failed") 111 | raise StopTestError() 112 | else: 113 | logger.warning(f"MON discovery failed {exc}") 114 | 115 | 116 | def raw_dev_name(path: str) -> str: 117 | if path.startswith("/dev/"): 118 | path = path[5:] 119 | while path and path[-1].isdigit(): 120 | path = path[:-1] 121 | return path 122 | 123 | 124 | class CollectCephInfoStage(Stage): 125 | config_block = 'ceph' 126 | priority = StepOrder.UPDATE_NODES_INFO 127 | 128 | def run(self, ctx: TestRun) -> None: 129 | for node in ctx.nodes: 130 | if 'ceph_storage_devs' not in node.info.params: 131 | if 'ceph-osd' in node.info.roles: 132 | jdevs: Set[str] = set() 133 | sdevs: Set[str] = set() 134 | for osd_info in node.info.params['ceph-osds']: 135 | 136 | if osd_info['bluestore'] is None: 137 | osd_stor_type_b = node.conn.fs.get_file(osd_info['storage'] + "/type", compress=False) 138 | osd_stor_type = osd_stor_type_b.decode('utf8').strip() 139 | osd_info['bluestore'] = osd_stor_type == 'bluestore' 140 | 141 | if osd_info['bluestore']: 142 | for name, sset in [('block.db', jdevs), ('block.wal', jdevs), ('block', sdevs)]: 143 | path = f"{osd_info['storage']}/{name}" 144 | dpath = node.conn.fs.get_dev_for_file(path) 145 | if isinstance(dpath, bytes): 146 | dpath = dpath.decode('utf8') 147 | sset.add(raw_dev_name(dpath)) 148 | else: 149 | for key, sset in [('journal', jdevs), ('storage', sdevs)]: 150 | path = osd_info.get(key) 151 | if path: 152 | dpath = node.conn.fs.get_dev_for_file(path) 153 | if isinstance(dpath, bytes): 154 | dpath = dpath.decode('utf8') 155 | sset.add(raw_dev_name(dpath)) 156 | 157 | node.info.params['ceph_storage_devs'] = list(sdevs) 158 | node.info.params['ceph_journal_devs'] = list(jdevs) 159 | -------------------------------------------------------------------------------- /wally/config.py: -------------------------------------------------------------------------------- 1 | from typing import Any, Dict, Optional, Set 2 | 3 | from cephlib.storage import IStorable 4 | 5 | 6 | ConfigBlock = Dict[str, Any] 7 | 8 | 9 | class Config(IStorable): 10 | def __init__(self, dct: ConfigBlock) -> None: 11 | # make mypy happy, set fake dict 12 | self.__dict__['_dct'] = {} 13 | self.run_uuid: str = None # type: ignore 14 | self.storage_url: str = None # type: ignore 15 | self.comment: str = None # type: ignore 16 | self.keep_vm: bool = None # type: ignore 17 | self.dont_discover_nodes: bool = None # type: ignore 18 | self.build_id: str = None # type: ignore 19 | self.build_description: str = None # type: ignore 20 | self.build_type: str = None # type: ignore 21 | self.default_test_local_folder: str = None # type: ignore 22 | self.settings_dir: str = None # type: ignore 23 | self.connect_timeout: int = None # type: ignore 24 | self.no_tests: bool = False 25 | self.debug_agents: bool = False 26 | 27 | self.logging: 'Config' = None # type: ignore 28 | self.ceph: 'Config' = None # type: ignore 29 | self.openstack: 'Config' = None # type: ignore 30 | self.test: 'Config' = None # type: ignore 31 | self.sensors: 'Config' = None # type: ignore 32 | 33 | # None, disabled, enabled, metadata, ignore_errors 34 | self.discover: Set[str] = None # type: ignore 35 | 36 | self._dct.clear() 37 | self._dct.update(dct) 38 | 39 | @classmethod 40 | def fromraw(cls, data: Dict[str, Any]) -> 'Config': 41 | return cls(data) 42 | 43 | def raw(self) -> Dict[str, Any]: 44 | return self._dct 45 | 46 | def get(self, path: str, default: Any = None) -> Any: 47 | curr = self 48 | while path: 49 | if '/' in path: 50 | name, path = path.split('/', 1) 51 | else: 52 | name = path 53 | path = "" 54 | 55 | try: 56 | curr = getattr(curr, name) 57 | except AttributeError: 58 | return default 59 | 60 | return curr 61 | 62 | def __getattr__(self, name: str) -> Any: 63 | try: 64 | val = self._dct[name] 65 | except KeyError: 66 | raise AttributeError(name) 67 | 68 | if isinstance(val, dict): 69 | val = self.__class__(val) 70 | 71 | return val 72 | 73 | def __setattr__(self, name: str, val: Any): 74 | self._dct[name] = val 75 | 76 | def __contains__(self, name: str) -> bool: 77 | return self.get(name) is not None 78 | -------------------------------------------------------------------------------- /wally/console_report.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import cast, List, Union 3 | 4 | import numpy 5 | 6 | from cephlib.common import float2str 7 | from cephlib.texttable import Texttable 8 | from cephlib.statistic import calc_norm_stat_props, calc_histo_stat_props 9 | 10 | from .stage import Stage, StepOrder 11 | from .test_run_class import TestRun 12 | from .result_classes import SuiteConfig 13 | from .suits.io.fio import FioTest 14 | from .suits.io.fio_job import FioJobParams 15 | from .suits.io.fio_hist import get_lat_vals 16 | from .data_selectors import get_aggregated 17 | from .result_storage import IWallyStorage 18 | 19 | 20 | logger = logging.getLogger("wally") 21 | 22 | 23 | console_report_headers = ["Description", "IOPS ~ Dev", "BW, MiBps", 'Skew/Kurt', 'lat med, ms', 'lat 95, ms'] 24 | console_report_align = ['l', 'r', 'r', 'r', 'r', 'r'] 25 | 26 | def get_console_report_table(suite: SuiteConfig, rstorage: IWallyStorage) -> List[Union[List[str], Texttable.HLINE]]: 27 | table: List[Union[List[str], Texttable.HLINE]] = [] 28 | prev_params = None 29 | for job in sorted(rstorage.iter_job(suite), key=lambda job: job.params): 30 | fparams = cast(FioJobParams, job.params) 31 | fparams['qd'] = None 32 | 33 | if prev_params is not None and fparams.char_tpl != prev_params: 34 | table.append(Texttable.HLINE) 35 | 36 | prev_params = fparams.char_tpl 37 | 38 | bw_ts = get_aggregated(rstorage, suite.storage_id, job.storage_id, metric='bw', 39 | trange=job.reliable_info_range_s) 40 | props = calc_norm_stat_props(bw_ts) 41 | avg_iops = props.average // job.params.params['bsize'] 42 | iops_dev = props.deviation // job.params.params['bsize'] 43 | 44 | lat_ts = get_aggregated(rstorage, suite.storage_id, job.storage_id, metric='lat', 45 | trange=job.reliable_info_range_s) 46 | bins_edges = numpy.array(get_lat_vals(lat_ts.data.shape[1]), dtype='float32') / 1000 # convert us to ms 47 | lat_props = calc_histo_stat_props(lat_ts, bins_edges) 48 | table.append([job.params.summary, 49 | f"{float2str(avg_iops):>6s} ~ {float2str(iops_dev):>6s}", 50 | float2str(props.average / 1024), # Ki -> Mi 51 | f"{props.skew:>5.1f}/{props.kurt:>5.1f}", 52 | float2str(lat_props.perc_50), float2str(lat_props.perc_95)]) 53 | return table 54 | 55 | 56 | class ConsoleReportStage(Stage): 57 | 58 | priority = StepOrder.REPORT 59 | 60 | def run(self, ctx: TestRun) -> None: 61 | for suite in ctx.rstorage.iter_suite(FioTest.name): 62 | table = Texttable(max_width=200) 63 | table.set_deco(Texttable.VLINES | Texttable.BORDER | Texttable.HEADER) 64 | tbl = ctx.rstorage.get_txt_report(suite) 65 | if tbl is None: 66 | table.header(console_report_headers) 67 | table.set_cols_align(console_report_align) 68 | for line in get_console_report_table(suite, ctx.rstorage): 69 | table.add_row(line) 70 | tbl = table.draw() 71 | ctx.rstorage.put_txt_report(suite, tbl) 72 | print(tbl) 73 | -------------------------------------------------------------------------------- /wally/data_selectors.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import Tuple, Iterator, List, Iterable, Dict, Union, Callable, Set 3 | 4 | import numpy 5 | 6 | from cephlib.numeric_types import DataSource, TimeSeries 7 | from cephlib.storage_selectors import c_interpolate_ts_on_seconds_border 8 | from cephlib.node import NodeInfo 9 | 10 | from .result_classes import IWallyStorage 11 | 12 | 13 | logger = logging.getLogger("wally") 14 | 15 | # Separately for each test heatmaps & agg acroos whole time histos: 16 | # * fio latency heatmap for all instances 17 | # * data dev iops across all osd 18 | # * data dev bw across all osd 19 | # * date dev qd across all osd 20 | # * journal dev iops across all osd 21 | # * journal dev bw across all osd 22 | # * journal dev qd across all osd 23 | # * net dev pps across all hosts 24 | # * net dev bps across all hosts 25 | 26 | # Main API's 27 | # get sensors by pattern 28 | # allign values to seconds 29 | # cut ranges for particular test 30 | # transform into 2d histos (either make histos or rebin them) and clip outliers same time 31 | 32 | 33 | AGG_TAG = 'ALL' 34 | 35 | 36 | def find_all_series(rstorage: IWallyStorage, suite_id: str, job_id: str, metric: str) -> Iterator[TimeSeries]: 37 | "Iterated over selected metric for all nodes for given Suite/job" 38 | return (rstorage.get_ts(ds) for ds in rstorage.iter_ts(suite_id=suite_id, job_id=job_id, metric=metric)) 39 | 40 | 41 | def get_aggregated(rstorage: IWallyStorage, suite_id: str, job_id: str, metric: str, 42 | trange: Tuple[int, int]) -> TimeSeries: 43 | "Sum selected fio metric for all nodes for given Suite/job" 44 | 45 | key = (id(rstorage), suite_id, job_id, metric, trange) 46 | aggregated_cache = rstorage.storage.other_caches['aggregated'] 47 | if key in aggregated_cache: 48 | return aggregated_cache[key].copy() 49 | 50 | tss = list(find_all_series(rstorage, suite_id, job_id, metric)) 51 | 52 | if len(tss) == 0: 53 | raise NameError(f"Can't found any TS for {suite_id},{job_id},{metric}") 54 | 55 | c_intp = c_interpolate_ts_on_seconds_border 56 | tss_inp = [c_intp(ts.select(trange), tp='fio', allow_broken_step=(metric == 'lat')) for ts in tss] 57 | 58 | res = None 59 | res_times = None 60 | 61 | for ts, ts_orig in zip(tss_inp, tss): 62 | if ts.time_units != 's': 63 | msg = "time_units must be 's' for fio sensor" 64 | logger.error(msg) 65 | raise ValueError(msg) 66 | 67 | # if metric == 'lat' and (len(ts.data.shape) != 2 or ts.data.shape[1] != expected_lat_bins): 68 | # msg = f"Sensor {ts.source.dev}.{ts.source.sensor} on node {ts.source.node_id} " + \ 69 | # f"has shape={ts.data.shape}. Can only process sensors with shape=[X, {expected_lat_bins}]." 70 | # logger.error(msg) 71 | # raise ValueError(msg) 72 | 73 | if metric != 'lat' and len(ts.data.shape) != 1: 74 | msg = f"Sensor {ts.source.dev}.{ts.source.sensor} on node {ts.source.node_id} " + \ 75 | f"has shape={ts.data.shape}. Can only process 1D sensors." 76 | logger.error(msg) 77 | raise ValueError(msg) 78 | 79 | assert trange[0] >= ts.times[0] and trange[1] <= ts.times[-1], \ 80 | f"[{ts.times[0]}, {ts.times[-1]}] not in [{trange[0]}, {trange[-1]}]" 81 | 82 | idx1, idx2 = numpy.searchsorted(ts.times, trange) 83 | idx2 += 1 84 | 85 | assert (idx2 - idx1) == (trange[1] - trange[0] + 1), \ 86 | "Broken time array at {} for {}".format(trange, ts.source) 87 | 88 | dt = ts.data[idx1: idx2] 89 | if res is None: 90 | res = dt.copy() 91 | res_times = ts.times[idx1: idx2].copy() 92 | else: 93 | assert res.shape == dt.shape, f"res.shape(={res.shape}) != dt.shape(={dt.shape})" 94 | res += dt 95 | 96 | ds = DataSource(suite_id=suite_id, job_id=job_id, node_id=AGG_TAG, sensor='fio', 97 | dev=AGG_TAG, metric=metric, tag='csv') 98 | agg_ts = TimeSeries(res, source=ds, 99 | times=res_times, 100 | units=tss_inp[0].units, 101 | histo_bins=tss_inp[0].histo_bins, 102 | time_units=tss_inp[0].time_units) 103 | aggregated_cache[key] = agg_ts 104 | return agg_ts.copy() 105 | 106 | 107 | def get_nodes(storage: IWallyStorage, roles: Iterable[str]) -> List[NodeInfo]: 108 | return [node for node in storage.load_nodes() if node.roles.intersection(roles)] 109 | 110 | -------------------------------------------------------------------------------- /wally/logger.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import logging.config 3 | from typing import Callable 4 | 5 | 6 | def color_me(color: int) -> Callable[[str], str]: 7 | RESET_SEQ = "\033[0m" 8 | COLOR_SEQ = "\033[1;%dm" 9 | 10 | color_seq = COLOR_SEQ % (30 + color) 11 | 12 | def closure(msg): 13 | return color_seq + msg + RESET_SEQ 14 | return closure 15 | 16 | 17 | class ColoredFormatter(logging.Formatter): 18 | BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(8) 19 | 20 | colors = { 21 | 'WARNING': color_me(YELLOW), 22 | 'DEBUG': color_me(BLUE), 23 | 'CRITICAL': color_me(YELLOW), 24 | 'ERROR': color_me(RED) 25 | } 26 | 27 | def __init__(self, msg: str, use_color: bool=True, datefmt: str=None) -> None: 28 | logging.Formatter.__init__(self, msg, datefmt=datefmt) 29 | self.use_color = use_color 30 | 31 | def format(self, record: logging.LogRecord) -> str: 32 | orig = record.__dict__ 33 | record.__dict__ = record.__dict__.copy() 34 | levelname = record.levelname 35 | 36 | prn_name = levelname + ' ' * (8 - len(levelname)) 37 | if levelname in self.colors: 38 | record.levelname = self.colors[levelname](prn_name) 39 | else: 40 | record.levelname = prn_name 41 | 42 | # super doesn't work here in 2.6 O_o 43 | res = logging.Formatter.format(self, record) 44 | 45 | # res = super(ColoredFormatter, self).format(record) 46 | 47 | # restore record, as it will be used by other formatters 48 | record.__dict__ = orig 49 | return res 50 | -------------------------------------------------------------------------------- /wally/plot.py: -------------------------------------------------------------------------------- 1 | import logging 2 | from typing import List 3 | 4 | import numpy 5 | 6 | from cephlib.units import unit_conversion_coef_f 7 | from cephlib.plot import PlotParams, provide_plot 8 | 9 | from .resources import IOSummary 10 | 11 | 12 | logger = logging.getLogger("wally") 13 | 14 | 15 | @provide_plot(eng=False, no_legend=True, grid='y', style_name='ioqd', noadjust=True) 16 | def io_chart(pp: PlotParams, 17 | legend: str, 18 | iosums: List[IOSummary], 19 | iops_log_spine: bool = False, 20 | lat_log_spine: bool = False) -> None: 21 | 22 | # -------------- MAGIC VALUES --------------------- 23 | # IOPS bar width 24 | width = 0.2 25 | 26 | # offset from center of bar to deviation/confidence range indicator 27 | err_x_offset = 0.03 28 | 29 | # extra space on top and bottom, comparing to maximal tight layout 30 | extra_y_space = 0.05 31 | 32 | # additional spine for BW/IOPS on left side of plot 33 | extra_io_spine_x_offset = -0.1 34 | 35 | # extra space on left and right sides 36 | extra_x_space = 0.5 37 | 38 | # legend location settings 39 | legend_location = "center left" 40 | legend_bbox_to_anchor = (1.1, 0.81) 41 | 42 | # -------------- END OF MAGIC VALUES --------------------- 43 | 44 | block_size = iosums[0].block_size 45 | xpos = numpy.arange(1, len(iosums) + 1, dtype='uint') 46 | 47 | coef_mb = unit_conversion_coef_f(iosums[0].bw.units, "MiBps") 48 | coef_iops = unit_conversion_coef_f(iosums[0].bw.units, "KiBps") / block_size 49 | 50 | iops_primary = block_size < pp.style.large_blocks 51 | 52 | coef = coef_iops if iops_primary else coef_mb 53 | pp.ax.set_ylabel("IOPS" if iops_primary else "BW (MiBps)") 54 | 55 | vals = [iosum.bw.average * coef for iosum in iosums] 56 | 57 | # set correct x limits for primary IO spine 58 | min_io = min(iosum.bw.average - iosum.bw.deviation * pp.style.dev_range_x for iosum in iosums) 59 | max_io = max(iosum.bw.average + iosum.bw.deviation * pp.style.dev_range_x for iosum in iosums) 60 | border = (max_io - min_io) * extra_y_space 61 | io_lims = (min_io - border, max_io + border) 62 | 63 | pp.ax.set_ylim(io_lims[0] * coef, io_lims[-1] * coef) 64 | pp.ax.bar(xpos - width / 2, vals, width=width, color=pp.colors.box_color, label=legend) 65 | 66 | # plot deviation and confidence error ranges 67 | err1_legend = err2_legend = None 68 | for pos, iosum in zip(xpos, iosums): 69 | dev_bar_pos = pos - err_x_offset 70 | err1_legend = pp.ax.errorbar(dev_bar_pos, 71 | iosum.bw.average * coef, 72 | iosum.bw.deviation * pp.style.dev_range_x * coef, 73 | alpha=pp.colors.subinfo_alpha, 74 | color=pp.colors.suppl_color1) # 'magenta' 75 | 76 | conf_bar_pos = pos + err_x_offset 77 | err2_legend = pp.ax.errorbar(conf_bar_pos, 78 | iosum.bw.average * coef, 79 | iosum.bw.confidence * coef, 80 | alpha=pp.colors.subinfo_alpha, 81 | color=pp.colors.suppl_color2) # 'teal' 82 | 83 | handles1, labels1 = pp.ax.get_legend_handles_labels() 84 | 85 | handles1 += [err1_legend, err2_legend] 86 | labels1 += ["{}% dev".format(pp.style.dev_perc), 87 | "{}% conf".format(int(100 * iosums[0].bw.confidence_level))] 88 | 89 | # extra y spine for latency on right side 90 | ax2 = pp.ax.twinx() 91 | 92 | # plot median and 95 perc latency 93 | lat_coef_ms = unit_conversion_coef_f(iosums[0].lat.units, "ms") 94 | ax2.plot(xpos, [iosum.lat.perc_50 * lat_coef_ms for iosum in iosums], label="lat med") 95 | ax2.plot(xpos, [iosum.lat.perc_95 * lat_coef_ms for iosum in iosums], label="lat 95%") 96 | 97 | for grid_line in ax2.get_ygridlines(): 98 | grid_line.set_linestyle(":") 99 | 100 | # extra y spine for BW/IOPS on left side 101 | if pp.style.extra_io_spine: 102 | ax3 = pp.ax.twinx() 103 | if iops_log_spine: 104 | ax3.set_yscale('log') 105 | 106 | ax3.set_ylabel("BW (MiBps)" if iops_primary else "IOPS") 107 | secondary_coef = coef_mb if iops_primary else coef_iops 108 | ax3.set_ylim(io_lims[0] * secondary_coef, io_lims[1] * secondary_coef) 109 | ax3.spines["left"].set_position(("axes", extra_io_spine_x_offset)) 110 | ax3.spines["left"].set_visible(True) 111 | ax3.yaxis.set_label_position('left') 112 | ax3.yaxis.set_ticks_position('left') 113 | else: 114 | ax3 = None 115 | 116 | ax2.set_ylabel("Latency (ms)") 117 | 118 | # legend box 119 | handles2, labels2 = ax2.get_legend_handles_labels() 120 | pp.ax.legend(handles1 + handles2, labels1 + labels2, loc=legend_location, bbox_to_anchor=legend_bbox_to_anchor) 121 | 122 | # limit and label x spine 123 | pp.ax.set_xlim(extra_x_space, len(iosums) + extra_x_space) 124 | pp.ax.set_ylim(bottom=0) 125 | pp.ax.set_xticks(xpos) 126 | pp.ax.set_xticklabels(["{0}*{1}={2}".format(iosum.qd, iosum.nodes_count, iosum.qd * iosum.nodes_count) 127 | for iosum in iosums], 128 | rotation=30 if len(iosums) > 9 else 0) 129 | pp.ax.set_xlabel("IO queue depth * test node count = total parallel requests") 130 | 131 | # apply log scales for X spines, if set 132 | if iops_log_spine: 133 | pp.ax.set_yscale('log') 134 | 135 | if lat_log_spine: 136 | ax2.set_yscale('log') 137 | 138 | # override some styles 139 | pp.fig.set_size_inches(*pp.style.qd_chart_inches) 140 | pp.fig.subplots_adjust(right=pp.style.subplot_adjust_r) 141 | 142 | if pp.style.extra_io_spine: 143 | ax3.grid(False) 144 | 145 | -------------------------------------------------------------------------------- /wally/pretty_yaml.py: -------------------------------------------------------------------------------- 1 | __doc__ = "functions for make pretty yaml files" 2 | __all__ = ['dumps'] 3 | 4 | from typing import Any, Iterable, List, Optional 5 | 6 | 7 | def dumps_simple(val: Any) -> str: 8 | bad_symbols = set(" \r\t\n,':{}[]><;") 9 | 10 | if isinstance(val, str): 11 | val = val.encode('utf8') 12 | 13 | try: 14 | float(val) 15 | val = repr(val) 16 | except ValueError: 17 | if len(bad_symbols & set(val)) != 0: 18 | val = repr(val) 19 | 20 | return val 21 | elif val is True: 22 | return 'true' 23 | elif val is False: 24 | return 'false' 25 | elif val is None: 26 | return 'null' 27 | 28 | return str(val) 29 | 30 | 31 | def is_simple(val: Any) -> bool: 32 | simple_type = isinstance(val, (str, int, bool, float)) 33 | return simple_type or val is None 34 | 35 | 36 | def all_nums(vals: Iterable[Any]) -> bool: 37 | return all(isinstance(val, (int, float)) for val in vals) 38 | 39 | 40 | def dumpv(data: Any, tab_sz: int = 4, width: int = 160, min_width: int = 40) -> List[str]: 41 | tab = ' ' * tab_sz 42 | 43 | if width < min_width: 44 | width = min_width 45 | 46 | res = [] # type: List[str] 47 | if is_simple(data): 48 | return [dumps_simple(data)] 49 | 50 | if isinstance(data, (list, tuple)): 51 | if all(map(is_simple, data)): 52 | join_str = ", " if all_nums(data) else "," 53 | one_line: Optional[str] = "[" + join_str.join(map(dumps_simple, data)) + "]" 54 | elif len(data) == 0: 55 | one_line = "[]" 56 | else: 57 | one_line = None 58 | 59 | if one_line is None or len(one_line) > width: 60 | pref = "-" + ' ' * (tab_sz - 1) 61 | 62 | for val in data: 63 | items = dumpv(val, tab_sz, width - tab_sz, min_width) 64 | items = [pref + items[0]] + \ 65 | [tab + item for item in items[1:]] 66 | res.extend(items) 67 | else: 68 | res.append(one_line) 69 | elif isinstance(data, dict): 70 | if len(data) == 0: 71 | res.append("{}") 72 | else: 73 | assert all(map(is_simple, data.keys())) 74 | 75 | one_line = None 76 | if all(map(is_simple, data.values())): 77 | one_line = ", ".join(f"{dumps_simple(k)}: {dumps_simple(v)}" for k, v in sorted(data.items())) 78 | one_line = "{" + one_line + "}" 79 | if len(one_line) > width: 80 | one_line = None 81 | 82 | if one_line is None: 83 | for k, v in data.items(): 84 | key_str = dumps_simple(k) + ": " 85 | val_res = dumpv(v, tab_sz, width - tab_sz, min_width) 86 | 87 | if len(val_res) == 1 and \ 88 | len(key_str + val_res[0]) < width and \ 89 | not isinstance(v, dict) and \ 90 | not val_res[0].strip().startswith('-'): 91 | res.append(key_str + val_res[0]) 92 | else: 93 | res.append(key_str) 94 | res.extend(tab + i for i in val_res) 95 | else: 96 | res.append(one_line) 97 | else: 98 | try: 99 | get_yamable = data.get_yamable 100 | except AttributeError: 101 | raise ValueError("Can't pack {0!r}".format(data)) 102 | res = dumpv(get_yamable(), tab_sz, width, min_width) 103 | 104 | return res 105 | 106 | 107 | def dumps(data: Any, tab_sz: int = 4, width: int = 120, min_width: int = 40) -> str: 108 | return "\n".join(dumpv(data, tab_sz, width, min_width)) 109 | -------------------------------------------------------------------------------- /wally/report_profiles.py: -------------------------------------------------------------------------------- 1 | # ---------------- PROFILES ------------------------------------------------------------------------------------------ 2 | 3 | 4 | # this is default values, real values is loaded from config 5 | class ColorProfile: 6 | primary_color = 'b' 7 | suppl_color1 = 'teal' 8 | suppl_color2 = 'magenta' 9 | suppl_color3 = 'orange' 10 | box_color = 'y' 11 | err_color = 'red' 12 | super_outlier_color = 'orange' 13 | 14 | noise_alpha = 0.3 15 | subinfo_alpha = 0.7 16 | 17 | imshow_colormap = None # type: str 18 | hmap_cmap = "Blues" 19 | 20 | 21 | default_format = 'svg' 22 | io_chart_format = 'svg' 23 | 24 | 25 | class StyleProfile: 26 | default_style = 'seaborn-white' 27 | io_chart_style = 'classic' 28 | 29 | dpi = 80 30 | 31 | lat_samples = 5 32 | 33 | tide_layout = False 34 | hist_boxes = 10 35 | hist_lat_boxes = 25 36 | hm_hist_bins_count = 25 37 | hm_x_slots = 25 38 | min_points_for_dev = 5 39 | 40 | x_label_rotation = 35 41 | 42 | dev_range_x = 2.0 43 | dev_perc = 95 44 | 45 | point_shape = 'o' 46 | err_point_shape = '*' 47 | max_hidden_outliers_fraction = 0.05 48 | super_outlier_point_shape_up = '^' 49 | super_outlier_point_shape_down = 'v' 50 | 51 | avg_range = 20 52 | approx_average = True 53 | approx_average_no_points = False 54 | 55 | curve_approx_level = 6 56 | curve_approx_points = 100 57 | assert avg_range >= min_points_for_dev 58 | 59 | # figure size in inches 60 | figsize = (8, 4) 61 | figsize_long = (8, 4) 62 | qd_chart_inches = (16, 9) 63 | 64 | subplot_adjust_r = 0.75 65 | subplot_adjust_r_no_legend = 0.9 66 | title_font_size = 12 67 | 68 | extra_io_spine = True 69 | 70 | legend_for_eng = True 71 | 72 | # heatmap interpolation is deprecated 73 | # heatmap_interpolation = '1d' 74 | # heatmap_interpolation = None 75 | # heatmap_interpolation_points = 300 76 | 77 | heatmap_colorbar = False 78 | outliers_q_nd = 3 79 | outliers_hide_q_nd = 4 80 | outliers_lat = (0.01, 0.95) 81 | 82 | violin_instead_of_box = True 83 | violin_point_count = 30000 84 | 85 | min_iops_vs_qd_jobs = 3 86 | 87 | qd_bins = [0, 1, 2, 4, 6, 8, 12, 16, 20, 26, 32, 40, 48, 56, 64, 96, 128] 88 | iotime_bins = list(range(0, 1030, 50)) 89 | block_size_bins = [0, 2, 4, 8, 16, 32, 48, 64, 96, 128, 192, 256, 384, 512, 1024, 2048] 90 | large_blocks = 256 91 | 92 | min_load_diff = 0.05 93 | 94 | histo_grid = 'x' 95 | 96 | 97 | DefColorProfile = ColorProfile() 98 | DefStyleProfile = StyleProfile() 99 | -------------------------------------------------------------------------------- /wally/result_classes.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Dict, List, Any, Tuple, cast, Type, Iterator, Union 3 | 4 | from cephlib.numeric_types import TimeSeries, DataSource 5 | from cephlib.statistic import StatProps 6 | from cephlib.istorage import IImagesStorage, Storable, ISensorStorage 7 | from cephlib.node import NodeInfo 8 | from cephlib.node_impl import IRPCNode 9 | 10 | from .suits.job import JobConfig 11 | 12 | 13 | class SuiteConfig(Storable): 14 | """ 15 | Test suite input configuration. 16 | 17 | test_type - test type name 18 | params - parameters from yaml file for this test 19 | run_uuid - UUID to be used to create file names & Co 20 | nodes - nodes to run tests on 21 | remote_dir - directory on nodes to be used for local files 22 | """ 23 | __ignore_fields__ = ['nodes', 'run_uuid', 'remote_dir'] 24 | 25 | def __init__(self, 26 | test_type: str, 27 | params: Dict[str, Any], 28 | run_uuid: str, 29 | nodes: List[IRPCNode], 30 | remote_dir: str, 31 | idx: int, 32 | keep_raw_files: bool) -> None: 33 | self.test_type = test_type 34 | self.params = params 35 | self.run_uuid = run_uuid 36 | self.nodes = nodes 37 | self.nodes_ids = [node.node_id for node in nodes] 38 | self.remote_dir = remote_dir 39 | self.keep_raw_files = keep_raw_files 40 | 41 | if 'load' in self.params: 42 | self.storage_id = "{}_{}_{}".format(self.test_type, self.params['load'], idx) 43 | else: 44 | self.storage_id = "{}_{}".format(self.test_type, idx) 45 | 46 | def __eq__(self, o: object) -> bool: 47 | if type(o) is not self.__class__: 48 | return False 49 | 50 | other = cast(SuiteConfig, o) 51 | 52 | return (self.test_type == other.test_type and 53 | self.params == other.params and 54 | set(self.nodes_ids) == set(other.nodes_ids)) 55 | 56 | 57 | # (node_name, source_dev, metric_name) => metric_results 58 | JobMetrics = Dict[Tuple[str, str, str], TimeSeries] 59 | JobStatMetrics = Dict[Tuple[str, str, str], StatProps] 60 | 61 | 62 | class IWallyStorage(ISensorStorage, IImagesStorage, metaclass=abc.ABCMeta): 63 | 64 | @abc.abstractmethod 65 | def flush(self) -> None: 66 | pass 67 | 68 | @abc.abstractmethod 69 | def put_or_check_suite(self, suite: SuiteConfig) -> None: 70 | pass 71 | 72 | @abc.abstractmethod 73 | def put_job(self, suite: SuiteConfig, job: JobConfig) -> None: 74 | pass 75 | 76 | @abc.abstractmethod 77 | def put_extra(self, data: bytes, source: DataSource) -> None: 78 | pass 79 | 80 | @abc.abstractmethod 81 | def put_stat(self, data: StatProps, source: DataSource) -> None: 82 | pass 83 | 84 | @abc.abstractmethod 85 | def get_stat(self, stat_cls: Type[StatProps], source: DataSource) -> StatProps: 86 | pass 87 | 88 | @abc.abstractmethod 89 | def iter_suite(self, suite_type: str = None) -> Iterator[SuiteConfig]: 90 | pass 91 | 92 | @abc.abstractmethod 93 | def iter_job(self, suite: SuiteConfig) -> Iterator[JobConfig]: 94 | pass 95 | 96 | # return path to file to be inserted into report 97 | @abc.abstractmethod 98 | def put_plot_file(self, data: bytes, source: DataSource) -> str: 99 | pass 100 | 101 | @abc.abstractmethod 102 | def get_job_info(self, suite: SuiteConfig, job: JobConfig, key: str) -> Any: 103 | pass 104 | 105 | @abc.abstractmethod 106 | def get_ts(self, ds: DataSource) -> TimeSeries: 107 | pass 108 | 109 | @abc.abstractmethod 110 | def put_ts(self, ts: TimeSeries) -> None: 111 | pass 112 | 113 | @abc.abstractmethod 114 | def iter_ts(self, **ds_parts) -> Iterator[DataSource]: 115 | pass 116 | 117 | @abc.abstractmethod 118 | def put_job_info(self, suite: SuiteConfig, job: JobConfig, key: str, data: Any) -> None: 119 | pass 120 | 121 | @abc.abstractmethod 122 | def load_nodes(self) -> List[NodeInfo]: 123 | pass -------------------------------------------------------------------------------- /wally/stage.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Optional 3 | 4 | from .test_run_class import TestRun 5 | from .config import ConfigBlock 6 | 7 | 8 | class StepOrder: 9 | DISCOVER = 0 10 | SPAWN = 10 11 | CONNECT = 20 12 | UPDATE_NODES_INFO = 30 13 | START_SENSORS = 40 14 | TEST = 50 15 | COLLECT_SENSORS = 60 16 | STOP_SENSORS = 70 17 | REPORT = 80 18 | 19 | 20 | class Stage(metaclass=abc.ABCMeta): 21 | priority: int = None # type: ignore 22 | config_block: Optional[str] = None 23 | 24 | @classmethod 25 | def name(cls) -> str: 26 | return cls.__name__ 27 | 28 | @classmethod 29 | def validate_config(cls, cfg: ConfigBlock) -> None: 30 | pass 31 | 32 | @abc.abstractmethod 33 | def run(self, ctx: TestRun) -> None: 34 | pass 35 | 36 | def cleanup(self, ctx: TestRun) -> None: 37 | pass 38 | 39 | -------------------------------------------------------------------------------- /wally/storage_structure.yaml: -------------------------------------------------------------------------------- 1 | # {node} - node id in format '\d+.\d+.\d+.\d+:\d+' 2 | # {descr} - test short description '[-a-zA-Z0-9]+' 3 | # {metric_name} - metrics name '[a-z_]+' 4 | # {id} - test/suite run id '\d+' 5 | # {dev} - device name '[^.]+' 6 | # {suite} - suite name '[a-z]+' 7 | # {profile} - profile name '[a-z_]+' 8 | # {sensor} - sensor name '[-a-z]+' 9 | 10 | 11 | config: Config # test input configuration 12 | all_nodes: List[NodeInfo] # all discovered nodes 13 | cli: List[str] # cli options 14 | spawned_nodes_ids: List[int] # list of openstack VM ids, spawned for test 15 | fuel_version: List[int] # FUEL master node version 16 | fuel_os_creds: OSCreds # openstack creds, discovered from fuel (or None) 17 | openstack_openrc: OSCreds # openrc used for openstack cluster 18 | 'results/{suite}.info.yaml': SuiteConfig # test job(iteration) input config, {id} is id of first job in suite 19 | 'results/{suite}.{job}/{node}.{loader}.{metric}.{tag}': 20 | 'sensors/{node}_{sensor}.{dev}.{metric_name}.{tag}': # sensor values 21 | 'sensors/{node}_collected_at.csv': 22 | 'rpc_logs/{node}.txt' : bytes # rpc server log from node 23 | -------------------------------------------------------------------------------- /wally/suits/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/wally/suits/__init__.py -------------------------------------------------------------------------------- /wally/suits/all_suits.py: -------------------------------------------------------------------------------- 1 | from .io.fio import FioTest 2 | # from .suits.itest import TestSuiteConfig 3 | # from .suits.mysql import MysqlTest 4 | # from .suits.omgbench import OmgTest 5 | # from .suits.postgres import PgBenchTest 6 | 7 | 8 | all_suits = {suite.name: suite for suite in [FioTest]} 9 | -------------------------------------------------------------------------------- /wally/suits/io/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Mirantis/disk_perf_test_tool/aaad8b81218d0907e9aa1425e18b1a044f06960d/wally/suits/io/__init__.py -------------------------------------------------------------------------------- /wally/suits/io/ceph.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | 4 | # QD_R={% 1, 5, 10, 15, 25, 40, 80, 120 %} 5 | QD_R={% 1, 5, 10, 20, 40, 60, 100, 150, 200, 500 %} 6 | QD_W={% 1, 5, 10, 20, 40, 60, 100, 150, 200 %} 7 | QD_SEQ_R=1 8 | QD_SEQ_W=1 9 | 10 | ramp_time=15 11 | runtime=180 12 | 13 | # --------------------------------------------------------------------- 14 | # check different QD, direct mode. (latency, iops) = func(th_count) 15 | # --------------------------------------------------------------------- 16 | [ceph_{TEST_SUMM}] 17 | blocksize=4k 18 | rw=randwrite 19 | iodepth={QD_W} 20 | 21 | # --------------------------------------------------------------------- 22 | # check different QD, direct read mode. (latency, iops) = func(th_count) 23 | # also check iops for randread 24 | # --------------------------------------------------------------------- 25 | [ceph_{TEST_SUMM}] 26 | blocksize=4k 27 | rw=randread 28 | iodepth={QD_R} 29 | 30 | # --------------------------------------------------------------------- 31 | # sync write - disabled for now 32 | # --------------------------------------------------------------------- 33 | #[ceph_{TEST_SUMM}] 34 | #blocksize=4k 35 | #rw=randwrite 36 | #direct=1 37 | #sync=1 38 | #numjobs=1 39 | 40 | # --------------------------------------------------------------------- 41 | # this is essentially sequential write operations 42 | # we can't use sequential with numjobs > 1 due to caching and block merging 43 | # --------------------------------------------------------------------- 44 | [ceph_{TEST_SUMM}] 45 | blocksize=1m 46 | rw=write 47 | iodepth=1 48 | # offset_increment={OFFSET_INC} 49 | 50 | #[ceph_{TEST_SUMM}] 51 | #blocksize=16m 52 | #rw=randwrite 53 | #iodepth={QD_SEQ_W} 54 | 55 | # --------------------------------------------------------------------- 56 | # this is essentially sequential read operations 57 | # we can't use sequential with numjobs > 1 due to caching and block merging 58 | # --------------------------------------------------------------------- 59 | [ceph_{TEST_SUMM}] 60 | blocksize=16m 61 | rw=randread 62 | iodepth={QD_SEQ_R} 63 | -------------------------------------------------------------------------------- /wally/suits/io/check_distribution.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | 4 | [distrubution_test_{TEST_SUMM}] 5 | blocksize=4k 6 | rw=randwrite 7 | direct=1 8 | sync=0 9 | ramp_time=30 10 | runtime=1800 11 | iodepth=4 12 | -------------------------------------------------------------------------------- /wally/suits/io/check_linearity.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | direct=1 4 | ramp_time=5 5 | runtime=30 6 | BLOCK_SIZES={% 512,1k,2k,4k,8k,16k,32k,128k,256k,512k,1m %} 7 | 8 | # --------------------------------------------------------------------- 9 | # check read and write linearity. oper_time = func(size) 10 | # --------------------------------------------------------------------- 11 | [linearity_test_{TEST_SUMM}] 12 | blocksize={BLOCK_SIZES} 13 | rw=randread 14 | iodepth=4 15 | 16 | # --------------------------------------------------------------------- 17 | # check sync write linearity. oper_time = func(size) 18 | # check sync BW as well 19 | # --------------------------------------------------------------------- 20 | [linearity_test_{TEST_SUMM}] 21 | blocksize={BLOCK_SIZES} 22 | rw=randwrite 23 | sync=1 24 | -------------------------------------------------------------------------------- /wally/suits/io/cinder_iscsi.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | ramp_time=30 4 | runtime=180 5 | direct=1 6 | sync=0 7 | QD={% 1, 5, 10, 15, 25, 40 %} 8 | 9 | # --------------------------------------------------------------------- 10 | # check different thread count, sync mode. (latency, iops) = func(th_count) 11 | # --------------------------------------------------------------------- 12 | [cinder_iscsi_{TEST_SUMM}] 13 | blocksize=4k 14 | rw=randwrite 15 | iodepth={QD} 16 | 17 | # --------------------------------------------------------------------- 18 | # check different thread count, direct read mode. (latency, iops) = func(th_count) 19 | # also check iops for randread 20 | # --------------------------------------------------------------------- 21 | [cinder_iscsi_{TEST_SUMM}] 22 | blocksize=4k 23 | rw=randread 24 | iodepth={QD} 25 | 26 | # --------------------------------------------------------------------- 27 | # Read always sync, with large request latency linear write 28 | # ...... 29 | # --------------------------------------------------------------------- 30 | [cinder_iscsi_{TEST_SUMM}] 31 | blocksize=8m 32 | rw=read 33 | iodepth=1 34 | 35 | # --------------------------------------------------------------------- 36 | # No reason for th count > 1 in case of sequantial operations 37 | # ot they became random 38 | # --------------------------------------------------------------------- 39 | [cinder_iscsi_{TEST_SUMM}] 40 | blocksize=8m 41 | rw=write 42 | iodepth=1 43 | -------------------------------------------------------------------------------- /wally/suits/io/defaults.cfg: -------------------------------------------------------------------------------- 1 | buffered=0 2 | group_reporting=1 3 | iodepth=1 4 | unified_rw_reporting=1 5 | 6 | norandommap=1 7 | 8 | thread=1 9 | time_based=1 10 | wait_for_previous=1 11 | 12 | # this is critical for correct results in multy-node run 13 | randrepeat=0 14 | 15 | filename={FILENAME} 16 | 17 | size={TEST_FILE_SIZE} 18 | 19 | write_lat_log=fio_log 20 | write_iops_log=fio_log 21 | write_bw_log=fio_log 22 | log_avg_msec=500 23 | 24 | 25 | -------------------------------------------------------------------------------- /wally/suits/io/defaults_qd.cfg: -------------------------------------------------------------------------------- 1 | buffered=0 2 | direct=1 3 | sync=0 4 | ioengine=libaio 5 | 6 | group_reporting=1 7 | unified_rw_reporting=1 8 | norandommap=1 9 | numjobs=1 10 | thread=1 11 | time_based=1 12 | wait_for_previous=1 13 | per_job_logs=0 14 | 15 | # this is critical for correct results in multy-node run 16 | randrepeat=0 17 | 18 | filename={FILENAME} 19 | size={FILESIZE} 20 | 21 | write_bw_log=fio_bw_log 22 | log_avg_msec=1000 23 | write_hist_log=fio_lat_hist_log 24 | log_hist_coarseness=0 25 | log_hist_msec=1000 26 | log_unix_epoch=1 27 | -------------------------------------------------------------------------------- /wally/suits/io/fio_hist.py: -------------------------------------------------------------------------------- 1 | from typing import List 2 | 3 | 4 | #---------------------------- FIO HIST LOG PARSE CODE ----------------------------------------------------------------- 5 | 6 | # Copy-paste from fio/tools/hist/fiologparser_hist.py. 7 | # Because that's impossible to understand or improve, 8 | # you can only copy such a pearl. 9 | 10 | def _plat_idx_to_val(idx: int , edge: float = 0.5, FIO_IO_U_PLAT_BITS: int = 6, FIO_IO_U_PLAT_VAL: int = 64) -> float: 11 | """ Taken from fio's stat.c for calculating the latency value of a bin 12 | from that bin's index. 13 | 14 | idx : the value of the index into the histogram bins 15 | edge : fractional value in the range [0,1]** indicating how far into 16 | the bin we wish to compute the latency value of. 17 | 18 | ** edge = 0.0 and 1.0 computes the lower and upper latency bounds 19 | respectively of the given bin index. """ 20 | 21 | # MSB <= (FIO_IO_U_PLAT_BITS-1), cannot be rounded off. Use 22 | # all bits of the sample as index 23 | if (idx < (FIO_IO_U_PLAT_VAL << 1)): 24 | return idx 25 | 26 | # Find the group and compute the minimum value of that group 27 | error_bits = (idx >> FIO_IO_U_PLAT_BITS) - 1 28 | base = 1 << (error_bits + FIO_IO_U_PLAT_BITS) 29 | 30 | # Find its bucket number of the group 31 | k = idx % FIO_IO_U_PLAT_VAL 32 | 33 | # Return the mean (if edge=0.5) of the range of the bucket 34 | return base + ((k + edge) * (1 << error_bits)) 35 | 36 | 37 | def plat_idx_to_val_coarse(idx: int, coarseness: int, edge: float = 0.5) -> float: 38 | """ Converts the given *coarse* index into a non-coarse index as used by fio 39 | in stat.h:plat_idx_to_val(), subsequently computing the appropriate 40 | latency value for that bin. 41 | """ 42 | 43 | # Multiply the index by the power of 2 coarseness to get the bin 44 | # bin index with a max of 1536 bins (FIO_IO_U_PLAT_GROUP_NR = 24 in stat.h) 45 | stride = 1 << coarseness 46 | idx = idx * stride 47 | lower = _plat_idx_to_val(idx, edge=0.0) 48 | upper = _plat_idx_to_val(idx + stride, edge=1.0) 49 | return lower + (upper - lower) * edge 50 | 51 | 52 | def get_lat_vals(columns: int, coarseness: int = 0) -> List[float]: 53 | # convert ns to ms 54 | if columns == 1216: 55 | coef = 1 56 | elif columns == 1856: 57 | coef = 1000 58 | 59 | return [plat_idx_to_val_coarse(val, coarseness) / coef for val in range(columns)] 60 | 61 | -------------------------------------------------------------------------------- /wally/suits/io/hdd.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | 4 | QD={% 1, 2, 4, 8, 16, 32, 64 %} 5 | runtime=300 6 | direct=1 7 | 8 | # --------------------------------------------------------------------- 9 | # check different thread count, direct read mode. (latency, iops) = func(QD) 10 | # --------------------------------------------------------------------- 11 | [hdd_{TEST_SUMM}] 12 | blocksize=4k 13 | rw={% randread, randwrite %} 14 | iodepth={QD} 15 | 16 | # --------------------------------------------------------------------- 17 | # No reason for QD > 1 in case of sequential operations 18 | # ot they became random 19 | # --------------------------------------------------------------------- 20 | [hdd_{TEST_SUMM}] 21 | blocksize=1m 22 | rw={% read, write %} 23 | iodepth=1 24 | -------------------------------------------------------------------------------- /wally/suits/io/lat_vs_iops.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | 4 | ramp_time=5 5 | runtime=30 6 | 7 | blocksize=4k 8 | rw=randwrite 9 | sync=0 10 | direct=1 11 | 12 | # --------------------------------------------------------------------- 13 | # latency as function from IOPS 14 | # --------------------------------------------------------------------- 15 | [latVSiops{rate_iops}_{TEST_SUMM}] 16 | iodepth=1 17 | rate_iops={% 20, 40, 60, 80, 100, 120, 160, 200, 250, 300 %} 18 | 19 | # --------------------------------------------------------------------- 20 | # latency as function from IOPS 21 | # --------------------------------------------------------------------- 22 | [latVSiops{rate_iops}_{TEST_SUMM}] 23 | iodepth=3 24 | rate_iops={% 10, 20, 40, 60, 80, 100, 120, 160 %} 25 | 26 | # --------------------------------------------------------------------- 27 | # latency as function from IOPS 28 | # --------------------------------------------------------------------- 29 | [latVSiops{rate_iops}_{TEST_SUMM}] 30 | iodepth=7 31 | rate_iops={% 5, 10, 20, 40, 50, 60, 70 %} 32 | 33 | # --------------------------------------------------------------------- 34 | # latency as function from IOPS 35 | # --------------------------------------------------------------------- 36 | [latVSiops{rate_iops}_{TEST_SUMM}] 37 | iodepth=10 38 | rate_iops={% 5, 10, 20, 40, 50 %} 39 | -------------------------------------------------------------------------------- /wally/suits/io/mixed_hdd.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | ramp_time=5 4 | runtime=30 5 | blocksize=4k 6 | rw=randrw 7 | sync=0 8 | direct=1 9 | 10 | [mixed-hdd-r{rwmixread}_{TEST_SUMM}] 11 | rwmixread={% 0,20,40,60,80,100 %} 12 | iodepth={% 1,8,16 %} 13 | -------------------------------------------------------------------------------- /wally/suits/io/mixed_ssd.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | ramp_time=5 4 | runtime=30 5 | blocksize=4k 6 | rw=randrw 7 | sync=0 8 | direct=1 9 | 10 | [mixed-ssd-r{rwmixread}_{TEST_SUMM}] 11 | rwmixread={% 0,20,40,60,80,85,90,95,100 %} 12 | iodepth={% 1,16,64,128 %} 13 | -------------------------------------------------------------------------------- /wally/suits/io/one_step.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | ramp_time=0 4 | runtime={RUNTIME} 5 | 6 | [test_{TEST_SUMM}] 7 | blocksize=60k 8 | rw=randwrite 9 | iodepth=1 -------------------------------------------------------------------------------- /wally/suits/io/rpc_plugin.py: -------------------------------------------------------------------------------- 1 | import os 2 | import time 3 | import stat 4 | import random 5 | import logging 6 | import subprocess 7 | 8 | 9 | mod_name = "fio" 10 | __version__ = (0, 1) 11 | 12 | 13 | logger = logging.getLogger("agent.fio") 14 | 15 | 16 | # TODO: fix this in case if file is block device 17 | def check_file_prefilled(path, used_size_mb, blocks_to_check=16): 18 | used_size = used_size_mb * 1024 ** 2 19 | 20 | try: 21 | fstats = os.stat(path) 22 | if stat.S_ISREG(fstats.st_mode) and fstats.st_size < used_size: 23 | return False 24 | except EnvironmentError: 25 | return False 26 | 27 | offsets = [0, used_size - 1024] + [random.randrange(used_size - 1024) for _ in range(blocks_to_check)] 28 | logger.debug(str(offsets)) 29 | with open(path, 'rb') as fd: 30 | for offset in offsets: 31 | fd.seek(offset) 32 | if b"\x00" * 1024 == fd.read(1024): 33 | return False 34 | 35 | return True 36 | 37 | 38 | def rpc_fill_file(fname, size, force=False, fio_path='fio'): 39 | if not force: 40 | if check_file_prefilled(fname, size): 41 | return False, None 42 | 43 | assert size % 4 == 0, "File size must be proportional to 4M" 44 | 45 | cmd_templ = "{0} --name=xxx --filename={1} --direct=1 --bs=4m --size={2}m --rw=write" 46 | 47 | run_time = time.time() 48 | try: 49 | subprocess.check_output(cmd_templ.format(fio_path, fname, size), shell=True) 50 | except subprocess.CalledProcessError as exc: 51 | raise RuntimeError("{0!s}.\nOutput: {1}".format(exc, exc.output)) 52 | run_time = time.time() - run_time 53 | 54 | prefill_bw = None if run_time < 1.0 else int(size / run_time) 55 | 56 | return True, prefill_bw 57 | 58 | 59 | def rpc_install(name, binary): 60 | try: 61 | subprocess.check_output("which {0}".format(binary), shell=True) 62 | except: 63 | subprocess.check_output("apt-get install -y {0}".format(name), shell=True) 64 | -------------------------------------------------------------------------------- /wally/suits/io/rpc_plugin.pyi: -------------------------------------------------------------------------------- 1 | from typing import Any, Optional, Dict, List 2 | 3 | def rpc_run_fio(cfg: Dict[str, str]) -> Any: ... 4 | def rpc_check_file_prefilled(path: str, used_size_mb: int) -> bool: ... 5 | def rpc_prefill_test_files(files: Dict[str, int], force: bool = False, fio_path: str = 'fio') -> Optional[int]: ... 6 | 7 | 8 | def load_fio_log_file(fname: str) -> List[float]: ... 9 | -------------------------------------------------------------------------------- /wally/suits/io/rrd.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | ramp_time=15 4 | runtime=180 5 | 6 | [test_swd_4] 7 | blocksize=16m 8 | rw=write 9 | iodepth=4 10 | 11 | [test_swd_8] 12 | blocksize=16m 13 | rw=write 14 | iodepth=8 15 | 16 | [test_srd_4] 17 | blocksize=16m 18 | rw=read 19 | iodepth=4 20 | 21 | [test_srd_8] 22 | blocksize=16m 23 | rw=read 24 | iodepth=8 25 | 26 | #[test_{TEST_SUMM}] 27 | #iodepth=16 28 | #blocksize=60k 29 | #rw=randread 30 | 31 | #[test_{TEST_SUMM}] 32 | #blocksize=60k 33 | #rw=randwrite 34 | #iodepth=1 35 | 36 | #[test_{TEST_SUMM}] 37 | #iodepth=16 38 | #blocksize=60k 39 | #rw=randwrite 40 | 41 | #[test_{TEST_SUMM}] 42 | #iodepth=1 43 | #blocksize=1m 44 | #rw=write 45 | 46 | #[test_{TEST_SUMM}] 47 | #iodepth=1 48 | #blocksize=1m 49 | #rw=read 50 | -------------------------------------------------------------------------------- /wally/suits/io/rrd_qd_scan.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | ramp_time=0 4 | runtime={RUNTIME} 5 | 6 | [test_{TEST_SUMM}] 7 | blocksize=4k 8 | rw=randread 9 | iodepth={QDS} 10 | -------------------------------------------------------------------------------- /wally/suits/io/rrd_raw.cfg: -------------------------------------------------------------------------------- 1 | [test] 2 | blocksize=4k 3 | rw=randwrite 4 | iodepth=1 5 | ramp_time=0 6 | runtime=120 7 | buffered=0 8 | direct=1 9 | sync=0 10 | ioengine=libaio 11 | group_reporting=1 12 | unified_rw_reporting=1 13 | norandommap=1 14 | numjobs=1 15 | thread=1 16 | time_based=1 17 | wait_for_previous=1 18 | per_job_logs=0 19 | randrepeat=0 20 | filename=/dev/rbd0 21 | size=1G 22 | ;verify_pattern=0x00 23 | buffer_compress_percentage=99 24 | write_bw_log=/tmp/bw.non-compress.log 25 | -------------------------------------------------------------------------------- /wally/suits/io/verify.cfg: -------------------------------------------------------------------------------- 1 | [global] 2 | include defaults_qd.cfg 3 | QDW={% 4, 16, 32, 64, 128, 256 %} 4 | QDR={% 16, 32, 64, 128, 256, 512 %} 5 | 6 | LQDW={% 1, 4, 16, 64 %} 7 | LQDR={% 1, 4, 16, 64 %} 8 | 9 | runtime={RUNTIME} 10 | direct=1 11 | ramp_time={RAMPTIME} 12 | 13 | # --------------------------------------------------------------------- 14 | 15 | [verify_{TEST_SUMM}] 16 | blocksize=1m 17 | rw=write 18 | iodepth={LQDW} 19 | 20 | [verify_{TEST_SUMM}] 21 | blocksize=1m 22 | rw=randread:16 23 | iodepth={LQDR} 24 | 25 | [verify_{TEST_SUMM}] 26 | blocksize=4k 27 | rw=randwrite 28 | iodepth={QDW} 29 | 30 | [verify_{TEST_SUMM}] 31 | blocksize=4k 32 | rw=randread 33 | iodepth={QDR} 34 | 35 | [verify_{TEST_SUMM}] 36 | blocksize=4k 37 | rw=randwrite 38 | sync=1 39 | iodepth=1 40 | -------------------------------------------------------------------------------- /wally/suits/job.py: -------------------------------------------------------------------------------- 1 | import abc 2 | from typing import Dict, Any, Tuple, cast, Union, NamedTuple 3 | from collections import OrderedDict 4 | 5 | from cephlib.istorage import Storable 6 | 7 | 8 | Var = NamedTuple('Var', [('name', str)]) 9 | 10 | 11 | class JobParams(metaclass=abc.ABCMeta): 12 | """Class contains all job parameters, which significantly affects job results. 13 | Like block size or operation type, but not file name or file size. 14 | Can be used as key in dictionary 15 | """ 16 | 17 | def __init__(self, **params: Dict[str, Any]) -> None: 18 | self.params = params 19 | 20 | @property 21 | @abc.abstractmethod 22 | def summary(self) -> str: 23 | """Test short summary, used mostly for file names and short image description""" 24 | pass 25 | 26 | @property 27 | @abc.abstractmethod 28 | def long_summary(self) -> str: 29 | """Readable long summary for management and deployment engineers""" 30 | pass 31 | 32 | @abc.abstractmethod 33 | def copy(self, **updated) -> 'JobParams': 34 | pass 35 | 36 | def __getitem__(self, name: str) -> Any: 37 | return self.params[name] 38 | 39 | def __setitem__(self, name: str, val: Any) -> None: 40 | self.params[name] = val 41 | 42 | def __hash__(self) -> int: 43 | return hash(self.char_tpl) 44 | 45 | def __eq__(self, o: object) -> bool: 46 | if not isinstance(o, self.__class__): 47 | raise TypeError(f"Can't compare {self.__class__.__qualname__!r} to {type(o).__qualname__!r}") 48 | return sorted(self.params.items()) == sorted(cast(JobParams, o).params.items()) 49 | 50 | def __lt__(self, o: object) -> bool: 51 | if not isinstance(o, self.__class__): 52 | raise TypeError(f"Can't compare {self.__class__.__qualname__!r} to {type(o).__qualname__!r}") 53 | return self.char_tpl < cast(JobParams, o).char_tpl 54 | 55 | @property 56 | @abc.abstractmethod 57 | def char_tpl(self) -> Tuple[Union[str, int, float, bool], ...]: 58 | pass 59 | 60 | 61 | class JobConfig(Storable, metaclass=abc.ABCMeta): 62 | """Job config class""" 63 | 64 | def __init__(self, idx: int) -> None: 65 | # job id, used in storage to distinct jobs with same summary 66 | self.idx = idx 67 | 68 | # time interval, in seconds, when test was running on all nodes 69 | self.reliable_info_range: Tuple[int, int] = None # type: ignore 70 | 71 | # all job parameters, both from suite file and config file 72 | self.vals: Dict[str, Any] = OrderedDict() 73 | 74 | @property 75 | def reliable_info_range_s(self) -> Tuple[int, int]: 76 | return (self.reliable_info_range[0] // 1000, self.reliable_info_range[1] // 1000) 77 | 78 | @property 79 | def storage_id(self) -> str: 80 | """unique string, used as key in storage""" 81 | return f"{self.summary}_{self.idx}" 82 | 83 | @property 84 | @abc.abstractmethod 85 | def params(self) -> JobParams: 86 | """Should return a copy""" 87 | pass 88 | 89 | @property 90 | def summary(self) -> str: 91 | return self.params.summary 92 | -------------------------------------------------------------------------------- /wally/suits/mysql/__init__.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | import texttable 4 | 5 | from ..itest import TwoScriptTest 6 | 7 | 8 | class MysqlTest(TwoScriptTest): 9 | root = os.path.dirname(__file__) 10 | pre_run_script = os.path.join(root, "prepare.sh") 11 | run_script = os.path.join(root, "run.sh") 12 | 13 | @classmethod 14 | def format_for_console(cls, data): 15 | tab = texttable.Texttable(max_width=120) 16 | tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER) 17 | tab.header(["TpmC"]) 18 | tab.add_row([data['res']['TpmC']]) 19 | return tab.draw() 20 | -------------------------------------------------------------------------------- /wally/suits/mysql/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | while [[ $# > 1 ]] 4 | do 5 | key="$1" 6 | 7 | case $key in 8 | warehouses) 9 | WAREHOUSES="$2" 10 | shift 11 | ;; 12 | *) 13 | echo "Unknown option $key" 14 | exit 1 15 | ;; 16 | esac 17 | shift 18 | done 19 | 20 | # install and configure mysql 21 | 22 | DATABASE_PASSWORD=wally 23 | DATBASE_USER=root 24 | DB_NAME=tpcc 25 | 26 | # not prompting db password 27 | debconf-set-selections <$HOME/.my.cnf 43 | [client] 44 | user=$DATABASE_USER 45 | password=$DATABASE_PASSWORD 46 | host=$DATABASE_HOST 47 | EOF 48 | 49 | cd ~ 50 | apt-get -y install bzr 51 | bzr branch lp:~percona-dev/perconatools/tpcc-mysql 52 | cd tpcc-mysql/src 53 | make 54 | 55 | cd .. 56 | mysql -e "CREATE DATABASE $DB_NAME;" 57 | mysql "$DB_NAME" < create_table.sql 58 | mysql "$DB_NAME" < add_fkey_idx.sql 59 | 60 | ./tpcc_load localhost "$DB_NAME" "$DATBASE_USER" "$DATABASE_PASSWORD" "$WAREHOUSES" -------------------------------------------------------------------------------- /wally/suits/mysql/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | set -x 4 | 5 | while [[ $# > 1 ]] 6 | do 7 | key="$1" 8 | 9 | case $key in 10 | warehouses) 11 | WAREHOUSES="$2" 12 | shift 13 | ;; 14 | *) 15 | echo "Unknown option $key" 16 | exit 1 17 | ;; 18 | esac 19 | shift 20 | done 21 | 22 | DATABASE_PASSWORD=wally 23 | DATBASE_USER=root 24 | DB_NAME=tpcc 25 | 26 | cd ~/tpcc-mysql 27 | ./tpcc_start -h127.0.0.1 "-d$DB_NAME" "-u$DATBASE_USER" "-p$DATABASE_PASSWORD" -w"WAREHOUSES" -c16 -r10 -l1200 > ~/tpcc-output.log 28 | echo "TpmC:" `cat ~/tpcc-output.log | grep TpmC | grep -o '[0-9,.]\+'` 29 | -------------------------------------------------------------------------------- /wally/suits/omgbench/__init__.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | 4 | import texttable 5 | 6 | 7 | from ..itest import TwoScriptTest 8 | 9 | 10 | class OmgTest(TwoScriptTest): 11 | root = os.path.dirname(__file__) 12 | pre_run_script = os.path.join(root, "prepare.sh") 13 | run_script = os.path.join(root, "run.sh") 14 | 15 | @classmethod 16 | def format_for_console(cls, data): 17 | success_vals = [] 18 | duration_vals = [] 19 | count = 0 20 | for res in data[0]: 21 | msgs, success, duration = res.raw_result.strip().split('\n') 22 | count += int(msgs) 23 | success_vals.append(float(success)) 24 | duration_vals.append(float(duration)) 25 | 26 | totalt = max(duration_vals) 27 | totalms = int(count / totalt) 28 | sucesst = int(sum(success_vals) / len(success_vals)) 29 | tab = texttable.Texttable(max_width=120) 30 | tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER) 31 | tab.header(["Bandwidth m/s", "Success %"]) 32 | tab.add_row([totalms, sucesst]) 33 | return tab.draw() 34 | -------------------------------------------------------------------------------- /wally/suits/omgbench/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | set -x 5 | 6 | OMGPATN=/tmp 7 | 8 | mkdir -p "$OMGPATN" 9 | cd "$OMGPATN" 10 | 11 | apt-get update 12 | apt-get -y install git python-pip 13 | 14 | git clone https://github.com/openstack/rally 15 | git clone https://github.com/Yulya/omgbenchmark 16 | 17 | mkdir venv 18 | cd rally 19 | ./install_rally.sh -d "$OMGPATN"/venv -y 20 | 21 | cd "$OMGPATN" 22 | source venv/bin/activate 23 | apt-get -y install python-scipy libblas-dev liblapack-dev libatlas-base-dev gfortran 24 | pip install oslo.messaging petname scipy 25 | -------------------------------------------------------------------------------- /wally/suits/omgbench/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -e 4 | 5 | 6 | while [[ $# > 1 ]] 7 | do 8 | key="$1" 9 | 10 | case $key in 11 | url) 12 | URL="$2" 13 | shift 14 | ;; 15 | timeout) 16 | TIMEOUT="$2" 17 | shift 18 | ;; 19 | concurrency) 20 | CONC="$2" 21 | shift 22 | ;; 23 | times) 24 | TIMES="$2" 25 | shift 26 | ;; 27 | *) 28 | echo "Unknown option $key" 29 | exit 1 30 | ;; 31 | esac 32 | shift 33 | done 34 | 35 | OMGPATN=/tmp 36 | 37 | cd "$OMGPATN" 38 | source venv/bin/activate 39 | 40 | cd omgbenchmark/rally_plugin 41 | 42 | sed -i -e "s+rabbit:\/\/guest:guest@localhost\/+$URL+g" deployment.json 43 | sed -i -e "s,timeout\": 100,timeout\": $TIMEOUT,g" task_timeout.json 44 | sed -i -e "s,concurrency\": 40,concurrency\": $CONC,g" task_timeout.json 45 | sed -i -e "s,times\": 40,times\": $TIMES,g" task_timeout.json 46 | 47 | rally --plugin-paths . deployment create --file=deployment.json --name=test &> /dev/null 48 | rally --plugin-paths . task start task_timeout.json &> ~/omg.log 49 | 50 | cat ~/omg.log | grep "Messages count" | grep -o '[0-9,.]\+' | tail -1 51 | cat ~/omg.log | grep "total" | grep -o '[0-9,.]\+%' | grep -o '[0-9,.]\+' 52 | cat ~/omg.log | grep "Load duration" | grep -o '[0-9,.]\+' 53 | -------------------------------------------------------------------------------- /wally/suits/postgres/__init__.py: -------------------------------------------------------------------------------- 1 | import os.path 2 | 3 | 4 | import texttable 5 | 6 | 7 | from ..itest import TwoScriptTest 8 | 9 | 10 | class PgBenchTest(TwoScriptTest): 11 | root = os.path.dirname(__file__) 12 | pre_run_script = os.path.join(root, "prepare.sh") 13 | run_script = os.path.join(root, "run.sh") 14 | 15 | @classmethod 16 | def format_for_console(cls, data): 17 | tab = texttable.Texttable(max_width=120) 18 | tab.set_deco(tab.HEADER | tab.VLINES | tab.BORDER) 19 | tab.header(["TpmC"]) 20 | tab.add_row([data['res']['TpmC']]) 21 | return tab.draw() 22 | -------------------------------------------------------------------------------- /wally/suits/postgres/prepare.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | if [ ! -d /etc/postgresql ]; then 5 | apt-get update 6 | apt-get install -y postgresql postgresql-contrib 7 | err=$(pg_createcluster 9.3 main --start 2>&1 /dev/null ) 8 | if [ $? -ne 0 ]; then 9 | echo "There was an error while creating cluster" 10 | exit 1 11 | fi 12 | fi 13 | 14 | sed -i 's/^local\s\+all\s\+all\s\+peer/local all all trust/g' /etc/postgresql/9.3/main/pg_hba.conf 15 | sudo sed -i "s/#listen_addresses = 'localhost'/listen_addresses = '*'/g" /etc/postgresql/9.3/main/postgresql.conf 16 | 17 | service postgresql restart 18 | 19 | exit 0 20 | -------------------------------------------------------------------------------- /wally/suits/postgres/run.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | set -e 3 | 4 | while [[ $# > 1 ]] 5 | do 6 | key="$1" 7 | 8 | case $key in 9 | num_clients) 10 | CLIENTS="$2" 11 | shift 12 | ;; 13 | transactions_per_client) 14 | TRANSACTINOS_PER_CLIENT="$2" 15 | shift 16 | ;; 17 | *) 18 | echo "Unknown option $key" 19 | exit 1 20 | ;; 21 | esac 22 | shift 23 | done 24 | 25 | CLIENTS=$(echo $CLIENTS | tr ',' '\n') 26 | TRANSACTINOS_PER_CLIENT=$(echo $TRANSACTINOS_PER_CLIENT | tr ',' '\n') 27 | 28 | 29 | sudo -u postgres createdb -O postgres pgbench &> /dev/null 30 | sudo -u postgres pgbench -i -U postgres pgbench &> /dev/null 31 | 32 | 33 | for num_clients in $CLIENTS; do 34 | for trans_per_cl in $TRANSACTINOS_PER_CLIENT; do 35 | tps_all='' 36 | for i in 1 2 3 4 5 6 7 8 9 10; do 37 | echo -n "$num_clients $trans_per_cl:" 38 | sudo -u postgres pgbench -c $num_clients -n -t $trans_per_cl -j 4 -r -U postgres pgbench | 39 | grep "(excluding connections establishing)" | awk {'print $3'} 40 | done 41 | done 42 | done 43 | 44 | sudo -u postgres dropdb pgbench &> /dev/null 45 | 46 | exit 0 47 | 48 | -------------------------------------------------------------------------------- /wally/test_run_class.py: -------------------------------------------------------------------------------- 1 | from typing import List, Callable, Any, Dict, Optional, Set 2 | from concurrent.futures import ThreadPoolExecutor 3 | 4 | from cephlib.istorage import IStorage 5 | from cephlib.node import NodeInfo, IRPCNode 6 | from cephlib.ssh import ConnCreds 7 | from cephlib.storage_selectors import DevRolesConfig 8 | 9 | from .openstack_api import OSCreds, OSConnection 10 | from .config import Config 11 | from .result_classes import IWallyStorage 12 | 13 | 14 | class TestRun: 15 | """Test run information""" 16 | def __init__(self, config: Config, storage: IStorage, rstorage: IWallyStorage) -> None: 17 | # NodesInfo list 18 | self.nodes_info: Dict[str, NodeInfo] = {} 19 | 20 | self.ceph_master_node: Optional[IRPCNode] = None 21 | self.ceph_extra_args: Optional[str] = None 22 | 23 | # Nodes list 24 | self.nodes: List[IRPCNode] = [] 25 | 26 | self.build_meta: Dict[str,Any] = {} 27 | self.clear_calls_stack: List[Callable[['TestRun'], None]] = [] 28 | 29 | # openstack credentials 30 | self.os_creds: Optional[OSCreds] = None # type: ignore 31 | self.os_connection: Optional[OSConnection] = None # type: ignore 32 | self.rpc_code: bytes = None # type: ignore 33 | self.default_rpc_plugins: Dict[str, bytes] = None # type: ignore 34 | 35 | self.storage = storage 36 | self.rstorage = rstorage 37 | self.config = config 38 | self.sensors_run_on: Set[str] = set() 39 | self.os_spawned_nodes_ids: List[int] = None # type: ignore 40 | self.devs_locator: DevRolesConfig = [] 41 | 42 | def get_pool(self): 43 | return ThreadPoolExecutor(self.config.get('worker_pool_sz', 32)) 44 | 45 | def merge_node(self, creds: ConnCreds, roles: Set[str], **params) -> NodeInfo: 46 | info = NodeInfo(creds, roles, params) 47 | nid = info.node_id 48 | 49 | if nid in self.nodes_info: 50 | self.nodes_info[nid].roles.update(info.roles) 51 | self.nodes_info[nid].params.update(info.params) 52 | return self.nodes_info[nid] 53 | else: 54 | self.nodes_info[nid] = info 55 | return info 56 | -------------------------------------------------------------------------------- /wally/utils.py: -------------------------------------------------------------------------------- 1 | import os 2 | import sys 3 | import uuid 4 | import logging 5 | import datetime 6 | import contextlib 7 | 8 | from typing import Any, Tuple, Iterator, Iterable 9 | 10 | try: 11 | from petname import Generate as pet_generate 12 | except ImportError: 13 | def pet_generate(_1: str, _2: str) -> str: 14 | return str(uuid.uuid4()) 15 | 16 | from cephlib.common import run_locally, sec_to_str 17 | 18 | 19 | logger = logging.getLogger("wally") 20 | 21 | 22 | STORAGE_ROLES = ['ceph-osd'] 23 | 24 | 25 | class StopTestError(RuntimeError): 26 | pass 27 | 28 | 29 | class LogError: 30 | def __init__(self, message: str, exc_logger: logging.Logger = None) -> None: 31 | self.message = message 32 | self.exc_logger = exc_logger 33 | 34 | def __enter__(self) -> 'LogError': 35 | return self 36 | 37 | def __exit__(self, tp: type, value: Exception, traceback: Any) -> bool: 38 | if value is None or isinstance(value, StopTestError): 39 | return False 40 | 41 | if self.exc_logger is None: 42 | exc_logger = sys._getframe(1).f_globals.get('logger', logger) 43 | else: 44 | exc_logger = self.exc_logger 45 | 46 | exc_logger.exception(self.message, exc_info=(tp, value, traceback)) 47 | raise StopTestError(self.message) from value 48 | 49 | 50 | class TaskFinished(Exception): 51 | pass 52 | 53 | 54 | def log_block(message: str, exc_logger:logging.Logger = None) -> LogError: 55 | logger.debug("Starts : " + message) 56 | return LogError(message, exc_logger) 57 | 58 | 59 | def check_input_param(is_ok: bool, message: str) -> None: 60 | if not is_ok: 61 | logger.error(message) 62 | raise StopTestError(message) 63 | 64 | 65 | def yamable(data: Any) -> Any: 66 | if isinstance(data, (tuple, list)): 67 | return map(yamable, data) 68 | 69 | if isinstance(data, dict): 70 | res = {} 71 | for k, v in data.items(): 72 | res[yamable(k)] = yamable(v) 73 | return res 74 | 75 | return data 76 | 77 | 78 | def get_creds_openrc(path: str) -> Tuple[str, str, str, str, bool]: 79 | fc = open(path).read() 80 | 81 | echo = 'echo "$OS_INSECURE:$OS_TENANT_NAME:$OS_USERNAME:$OS_PASSWORD@$OS_AUTH_URL"' 82 | 83 | msg = "Failed to get creads from openrc file" 84 | with LogError(msg): 85 | data = run_locally(['/bin/bash'], input_data=(fc + "\n" + echo).encode('utf8')).decode("utf8") 86 | 87 | msg = "Failed to get creads from openrc file: " + data 88 | with LogError(msg): 89 | data = data.strip() 90 | insecure_str, user, tenant, passwd_auth_url = data.split(':', 3) 91 | insecure = (insecure_str in ('1', 'True', 'true')) 92 | passwd, auth_url = passwd_auth_url.rsplit("@", 1) 93 | assert (auth_url.startswith("https://") or 94 | auth_url.startswith("http://")) 95 | 96 | return user, passwd, tenant, auth_url, insecure 97 | 98 | 99 | @contextlib.contextmanager 100 | def empty_ctx(val: Any = None) -> Iterator[Any]: 101 | yield val 102 | 103 | 104 | def get_uniq_path_uuid(path: str, max_iter: int = 10) -> Tuple[str, str]: 105 | for i in range(max_iter): 106 | run_uuid = pet_generate(2, "_") 107 | results_dir = os.path.join(path, run_uuid) 108 | if not os.path.exists(results_dir): 109 | break 110 | else: 111 | run_uuid = str(uuid.uuid4()) 112 | results_dir = os.path.join(path, run_uuid) 113 | 114 | return results_dir, run_uuid 115 | 116 | 117 | def get_time_interval_printable_info(seconds: int) -> Tuple[str, str]: 118 | exec_time_s = sec_to_str(seconds) 119 | now_dt = datetime.datetime.now() 120 | end_dt = now_dt + datetime.timedelta(0, seconds) 121 | return exec_time_s, "{:%H:%M:%S}".format(end_dt) 122 | 123 | 124 | -------------------------------------------------------------------------------- /web_app/__init__.py: -------------------------------------------------------------------------------- 1 | # : order imports in usual way 2 | import json 3 | import os.path 4 | 5 | from logging import getLogger, INFO 6 | from flask import render_template, url_for, make_response, request 7 | from report import build_vertical_bar, build_lines_chart 8 | from web_app import app 9 | from persistance.storage_api import builds_list, prepare_build_data, \ 10 | get_data_for_table, add_data, get_builds_data, \ 11 | get_build_info, get_build_detailed_info 12 | from web_app.app import app 13 | from werkzeug.routing import Rule 14 | 15 | 16 | def merge_builds(b1, b2): 17 | d = {} 18 | 19 | for pair in b2.items(): 20 | if pair[0] in b1 and type(pair[1]) is list: 21 | b1[pair[0]].extend(pair[1]) 22 | else: 23 | b1[pair[0]] = pair[1] 24 | 25 | 26 | app.url_map.add(Rule('/', endpoint='index')) 27 | app.url_map.add(Rule('/images/', endpoint='get_image')) 28 | app.url_map.add(Rule('/tests/', endpoint='render_test')) 29 | app.url_map.add(Rule('/tests/table//', endpoint='render_table')) 30 | app.url_map.add(Rule('/api/tests/', 31 | endpoint='add_test', methods=['POST'])) 32 | app.url_map.add(Rule('/api/tests', endpoint='get_all_tests')) 33 | app.url_map.add(Rule('/api/tests/', endpoint='get_test')) 34 | 35 | 36 | @app.endpoint('index') 37 | def index(): 38 | data = builds_list() 39 | 40 | for elem in data: 41 | elem['url'] = url_for('render_test', test_name=elem['url']) 42 | 43 | return render_template("index.html", tests=data) 44 | 45 | 46 | @app.endpoint('get_image') 47 | def get_image(image_name): 48 | with open("static/images/" + image_name, 'rb') as f: 49 | image_binary = f.read() 50 | 51 | response = make_response(image_binary) 52 | response.headers['Content-Type'] = 'image/png' 53 | response.headers['Content-Disposition'] = 'attachment; filename=img.png' 54 | 55 | return response 56 | 57 | 58 | @app.endpoint('render_test') 59 | def render_test(test_name): 60 | results = prepare_build_data(test_name) 61 | lab_meta = get_build_detailed_info(test_name) 62 | 63 | bars = build_vertical_bar(results) 64 | lines = build_lines_chart(results) 65 | urls = bars + lines 66 | 67 | urls = [url_for("get_image", image_name=os.path.basename(url)) 68 | if not url.startswith('http') else url for url in urls] 69 | 70 | return render_template("test.html", urls=urls, 71 | table_url=url_for('render_table', 72 | test_name=test_name), 73 | index_url=url_for('index'), lab_meta=lab_meta) 74 | 75 | 76 | @app.endpoint('render_table') 77 | def render_table(test_name): 78 | builds = get_data_for_table(test_name) 79 | data = get_build_info(test_name) 80 | 81 | header_keys = ['build_id', 'iso_md5', 'type', 'date'] 82 | table = [[]] 83 | if len(builds) > 0: 84 | sorted_keys = sorted(builds[0].keys()) 85 | 86 | for key in sorted_keys: 87 | if key not in header_keys: 88 | header_keys.append(key) 89 | 90 | for test in builds: 91 | row = [] 92 | 93 | for header in header_keys: 94 | if isinstance(test[header], list): 95 | row.append(str(test[header][0]) + unichr(0x00B1) 96 | + str(test[header][1])) 97 | else: 98 | row.append(test[header]) 99 | 100 | table.append(row) 101 | 102 | return render_template("table.html", headers=header_keys, table=table, 103 | back_url=url_for('render_test', 104 | test_name=test_name), lab=data) 105 | 106 | 107 | @app.endpoint('add_test') 108 | def add_test(test_name): 109 | add_data(request.data) 110 | return "Created", 201 111 | 112 | 113 | @app.endpoint('get_all_tests') 114 | def get_all_tests(): 115 | return json.dumps(get_builds_data()) 116 | 117 | 118 | @app.endpoint('get_test') 119 | def get_test(test_name): 120 | builds = get_builds_data(test_name) 121 | 122 | return json.dumps(builds) 123 | 124 | 125 | if __name__ == "__main__": 126 | logger = getLogger("logger") 127 | app.logger.setLevel(INFO) 128 | app.logger.addHandler(logger) 129 | app.run(host='0.0.0.0', debug=True) 130 | -------------------------------------------------------------------------------- /web_app/app.py: -------------------------------------------------------------------------------- 1 | from config import DATABASE_URI 2 | from flask import Flask 3 | from flask.ext.bootstrap import Bootstrap 4 | from flask.ext.sqlalchemy import SQLAlchemy 5 | 6 | app = Flask(__name__) 7 | db = SQLAlchemy(app) 8 | app.config["SQLALCHEMY_DATABASE_URI"] = DATABASE_URI 9 | Bootstrap(app) 10 | -------------------------------------------------------------------------------- /web_app/rest_api.py: -------------------------------------------------------------------------------- 1 | import json 2 | import requests 3 | 4 | 5 | def add_test(test_name, test_data, url): 6 | if not url.endswith("/"): 7 | url += '/api/tests/' + test_name 8 | requests.post(url=url, data=json.dumps(test_data)) 9 | 10 | 11 | def get_test(test_name, url): 12 | if not url.endswith("/"): 13 | url += '/api/tests/' + test_name 14 | 15 | result = requests.get(url=url) 16 | 17 | return json.loads(result.content) 18 | 19 | 20 | def get_all_tests(url): 21 | if not url.endswith('/'): 22 | url += '/api/tests' 23 | 24 | result = requests.get(url=url) 25 | return json.loads(result.content) 26 | -------------------------------------------------------------------------------- /web_app/static/script.js: -------------------------------------------------------------------------------- 1 | $(document).ready(function(){ 2 | 3 | $("#toggler").click(function(){ 4 | $(this).toggleClass('active, inactive'); 5 | }) 6 | 7 | }) -------------------------------------------------------------------------------- /web_app/static/style.css: -------------------------------------------------------------------------------- 1 | .active i.icon-folder-open{ display:inline-block; } 2 | .active i.icon-folder-close { display:none;} 3 | 4 | .inactive i.icon-folder-close{ display:inline-block; } 5 | .inactive i.icon-folder-open { display:none;} -------------------------------------------------------------------------------- /web_app/templates/base.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | {% block head %} 5 | 6 | 7 | 8 | 9 | 10 | 11 | {% endblock %} 12 | 13 | 14 | {% block body %} 15 | {% endblock %} 16 | 17 | -------------------------------------------------------------------------------- /web_app/templates/index.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | 4 | {% block head %} 5 | {{ super() }} 6 | Test report 7 | {% endblock %} 8 | 9 | 10 | {% block body %} 11 |

Tests report :

12 |
{iops_vs_size}{iotime_vs_size}
13 | 14 | 15 | 16 | {% for test in tests %} 17 | 18 | 21 | 24 | 27 | 28 | {% endfor %} 29 |
Build typeBuild name Date
19 | {{ test.type }} 20 | 22 | {{ test.name }} 23 | 25 | {{ test.date }} 26 |
30 | {% endblock %} 31 | 32 | -------------------------------------------------------------------------------- /web_app/templates/lab_header.html: -------------------------------------------------------------------------------- 1 |
2 |
3 | Lab Name : {{ lab.name }} 4 |
5 | 6 | {% for node in lab.nodes %} 7 | {% for p in node.processors %} 8 |
    9 |
  • 10 | Processor model : {{ p.model }} 11 | Processor frequency : {{ p.frequency }} 12 |
  • 13 |
14 | {% endfor %} 15 | 16 | {% for i in node.interfaces %} 17 |
    18 |
  • 19 |
    20 | 21 | name : {{ i.name }} 22 | MAC frequency : {{ i.mac }} 23 | max speed : {{ i.max_speed }} 24 | current speed : {{ i.current_speed }} 25 | state : {{ i.state }} 26 |
    27 |
  • 28 |
29 | {% endfor %} 30 | 31 | {% for disk in node.disks %} 32 |
    33 |
  • 34 |
    35 | Disk name : {{ disk.name }} 36 | Size: {{ disk.size }} 37 |
    38 |
  • 39 |
40 | {% endfor %} 41 | 42 |
    43 |
  • 44 |
    45 | Memory total : node.memory.total 46 | Memory : node.memory.maximum_capacity 47 |
    48 |
  • 49 |
50 | 51 | {% endfor %} 52 | 53 |
-------------------------------------------------------------------------------- /web_app/templates/lab_main.html: -------------------------------------------------------------------------------- 1 |
2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 |
Openstack Release :Juno on Ubuntu 12.04.4
Release Fuel:6.1
Operation systemUbuntu
Ceph version 0.80.7
Nodes count{{ lab_meta.nodes_count }}
Cores count{{ lab_meta.processor_count }}
Total memory{{ lab_meta.total_memory }} Gb
Total disk{{ lab_meta.total_disk }} Gb
36 |
-------------------------------------------------------------------------------- /web_app/templates/table.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | 4 | {% block head %} 5 | {{ super() }} 6 | Table of results 7 | {% endblock %} 8 | 9 | 10 |
11 | {% include 'lab_header.html' %} 12 |
13 | {% block body %} 14 |

Perf-1-Env

15 | 16 | {% for header in headers %} 17 | 20 | {% endfor %} 21 | {% for row in table %} 22 | 23 | {% for data in row %} 24 | 27 | {% endfor %} 28 | 29 | {% endfor %} 30 |
18 | {{ header }} 19 |
25 | {{ data }} 26 |
31 |
32 |

33 | Back 34 |

35 | {% endblock %} 36 | 37 | -------------------------------------------------------------------------------- /web_app/templates/test.html: -------------------------------------------------------------------------------- 1 | {% extends "base.html" %} 2 | 3 | 4 | {% block head %} 5 | {{ super() }} 6 | Table of results 7 | {% endblock %} 8 | 9 | 10 | {% block body %} 11 |

Perf-1 Env

12 | {% include 'lab_main.html'%} 13 |
14 |
15 |
16 | 17 | {% for url in urls %} 18 | {% if loop.index is divisibleby 2 %} 19 | 22 | 23 | {% else %} 24 | 25 | 28 | {% endif %} 29 | {% endfor %} 30 |
20 | 21 |
26 | 27 |
31 |
32 | 33 | {% for header in headers %} 34 | 37 | {% endfor %} 38 | {% for row in table %} 39 | 40 | {% for data in row %} 41 | 44 | {% endfor %} 45 | 46 | {% endfor %} 47 |
35 |

{{ header }}

36 |
42 | {{ data }} 43 |
48 |
49 |
50 |

51 | Index page 52 |

53 |

54 | Details 55 |

56 |
57 | {% endblock %} 58 | 59 | --------------------------------------------------------------------------------