├── .dockerignore ├── .gitignore ├── .gitlab-ci.yml ├── .gitreview ├── INFO ├── INFO.yaml ├── LICENSE ├── README.rst ├── behave_tests ├── __init__.py ├── behavedriver.py └── features │ ├── __init__.py │ ├── characterization-full.feature │ ├── characterization-samples.feature │ ├── environment.py │ ├── non-regression.feature │ ├── quick-test-10kpps.feature │ └── steps │ ├── __init__.py │ ├── steps.py │ └── testapi.py ├── client ├── __init__.py ├── client.py ├── nfvbench_client.py └── requirements.txt ├── docker ├── Dockerfile ├── cleanup_generators.py └── nfvbench-entrypoint.sh ├── docs ├── Makefile ├── conf.py ├── developer │ ├── building.rst │ ├── design │ │ ├── design.rst │ │ ├── index.rst │ │ ├── ndrpdr.rst │ │ ├── overview.rst │ │ ├── traffic_desc.rst │ │ └── versioning.rst │ ├── index.rst │ ├── nfvbenchvm.rst │ └── testing-nfvbench.rst ├── index.rst ├── make.bat ├── release-notes │ ├── index.rst │ ├── nfvbench-release-notes.rst │ └── nfvbenchvm-release-notes.rst ├── requirements.txt └── user │ ├── advanced.rst │ ├── examples.rst │ ├── extchains.rst │ ├── faq.rst │ ├── fluentd.rst │ ├── hw_requirements.rst │ ├── images │ ├── extchain-config.png │ ├── nfvbench-all-sriov-pvvp.png │ ├── nfvbench-all-sriov-pvvp2.png │ ├── nfvbench-ext-multi-vlans.png │ ├── nfvbench-ext-shared.png │ ├── nfvbench-kibana-filter-kql.png │ ├── nfvbench-kibana-filter.png │ ├── nfvbench-kibana-gbps-line.png │ ├── nfvbench-kibana-pps-scatter.png │ ├── nfvbench-kibana-pps-theoretical.png │ ├── nfvbench-kibana-zoom-selection.png │ ├── nfvbench-kibana.png │ ├── nfvbench-npvp.png │ ├── nfvbench-pvp.png │ ├── nfvbench-pvpl3.png │ ├── nfvbench-pvvp.png │ ├── nfvbench-pvvp2.png │ ├── nfvbench-sriov-pvp.png │ ├── nfvbench-sriov-pvvp.png │ ├── nfvbench-sriov-pvvp2.png │ ├── nfvbench-trex-setup.png │ └── nfvbench-xtesting.png │ ├── index.rst │ ├── installation.rst │ ├── kibana.rst │ ├── mpls.rst │ ├── pvpl3.rst │ ├── quickstart_docker.rst │ ├── readme.rst │ ├── server.rst │ ├── sriov.rst │ └── xtesting.rst ├── kibana └── visualizations │ ├── export.ndjson │ ├── ndr_capacity_gbps_line_chart.json │ ├── ndr_capacity_gbps_scatter_plot.json │ ├── ndr_capacity_gbps_theoretical_line_chart.json │ ├── ndr_capacity_gbps_theoretical_scatter_plot.json │ ├── ndr_capacity_pps_line_chart.json │ ├── ndr_capacity_pps_scatter_plot.json │ ├── ndr_capacity_pps_theoretical_line_chart.json │ └── ndr_capacity_pps_theoretical_scatter_plot.json ├── nfvbench ├── __init__.py ├── cfg.default.yaml ├── chain_router.py ├── chain_runner.py ├── chain_workers.py ├── chaining.py ├── cleanup.py ├── compute.py ├── config.py ├── config_plugin.py ├── credentials.py ├── factory.py ├── fluentd.py ├── log.py ├── nfvbench.py ├── nfvbenchd.py ├── nfvbenchvm │ └── nfvbenchvm.conf ├── packet_stats.py ├── specs.py ├── stats_collector.py ├── stats_manager.py ├── summarizer.py ├── traffic_client.py ├── traffic_gen │ ├── __init__.py │ ├── dummy.py │ ├── traffic_base.py │ ├── traffic_utils.py │ └── trex_gen.py ├── traffic_server.py └── utils.py ├── nfvbenchvm └── dib │ ├── build-image.sh │ ├── elements │ └── nfvbenchvm │ │ ├── element-deps │ │ ├── fdio-release.repo │ │ ├── finalise.d │ │ ├── 51-add-cpu-isolation │ │ ├── 52-change-resolution │ │ └── 53-boot-from-new-kernel │ │ ├── package-installs.yaml │ │ ├── post-install.d │ │ ├── 01-update-kernel │ │ ├── 02-pip-package │ │ ├── 03-copy-rc-local │ │ ├── 04-add-execute-attribute │ │ ├── 51-cloudcfg-edit │ │ ├── 52-nfvbench-script │ │ ├── 53-sshd-script │ │ └── 99-cleanup │ │ └── static │ │ ├── etc │ │ ├── cloud │ │ │ └── cloud.cfg.d │ │ │ │ └── 99-disable-network-config.cfg │ │ ├── modprobe.d │ │ │ └── vfio.conf │ │ ├── modules-load.d │ │ │ └── vfio-pci.conf │ │ ├── openstack │ │ │ └── clouds.yaml │ │ ├── profile.d │ │ │ └── nfvbench.sh │ │ ├── rc.d │ │ │ ├── rc.local.generator │ │ │ └── rc.local.loopvm │ │ ├── sysconfig │ │ │ └── network-scripts │ │ │ │ ├── ifcfg-eth0 │ │ │ │ └── ifcfg-eth1 │ │ └── systemd │ │ │ └── system │ │ │ └── nfvbench.service │ │ ├── nfvbench │ │ ├── configure-nfvbench.sh │ │ ├── nfvbench.conf │ │ └── start-nfvbench.sh │ │ └── vpp │ │ ├── startup.conf │ │ └── vm.conf │ └── verify-image.sh ├── pylint.rc ├── requirements-dev.txt ├── requirements.txt ├── setup.cfg ├── setup.py ├── test-requirements.txt ├── test ├── __init__.py ├── mock_trex.py ├── test_chains.py ├── test_nfvbench.py └── ut_behave_tests │ ├── __init__.py │ ├── test_data │ ├── project=nfvbench&case=characterization&criteria=PASS&page=1.json │ ├── project=nfvbench&case=characterization&criteria=PASS&page=2.json │ └── project=nfvbench&case=non-regression&criteria=PASS&page=1.json │ ├── test_steps.py │ ├── test_testapi.py │ └── test_utils.py ├── tox.ini └── xtesting ├── ansible ├── host_vars │ └── 127.0.0.1 └── site.yml └── testcases.yaml /.dockerignore: -------------------------------------------------------------------------------- 1 | requirements-dev.txt 2 | .gitignore 3 | .gitreview 4 | nfvbenchvm/ 5 | test/ 6 | .tox/ 7 | .cache/ -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_store 2 | *.pyc 3 | *~ 4 | .idea 5 | .tox 6 | .cache 7 | .eggs 8 | .vscode 9 | .pytest_cache/ 10 | venv 11 | nfvbench.egg-info 12 | nfvbenchvm/dib/dib-venv 13 | nfvbenchvm/dib/nfvbenchvm_centos-*.d/ 14 | *.qcow2 15 | docs/_static 16 | build/ 17 | AUTHORS 18 | ChangeLog 19 | docs/_build/* 20 | -------------------------------------------------------------------------------- /.gitlab-ci.yml: -------------------------------------------------------------------------------- 1 | # NFVBench Gitlab-CI Jobs 2 | --- 3 | include: 4 | - project: anuket/releng 5 | file: '/gitlab-templates/RTD.gitlab-ci.yml' 6 | - project: anuket/releng 7 | file: '/gitlab-templates/Docker.gitlab-ci.yml' 8 | - project: anuket/releng 9 | file: '/gitlab-templates/GoogleStorage.gitlab-ci.yml' 10 | 11 | variables: 12 | DOCKER_REGISTRY: docker.io 13 | 14 | .tox-defaults: &tox-defaults 15 | stage: test 16 | image: python:3.8 17 | before_script: 18 | - pip install tox==3.21.4 19 | cache: 20 | paths: 21 | - .cache/pip 22 | - venv/ 23 | rules: 24 | - if: $CI_PIPELINE_SOURCE == "merge_request_event" || $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH 25 | 26 | pep8: 27 | <<: *tox-defaults 28 | stage: build 29 | script: 30 | tox -e pep8 31 | 32 | tox-py38: 33 | <<: *tox-defaults 34 | script: 35 | tox -e py38 36 | 37 | verify-image: 38 | stage: build 39 | image: centos:7 40 | before_script: 41 | - yum -y install python3 qemu-img kpartx sudo e2fsprogs 42 | - python3 -m venv venv 43 | script: 44 | - source venv/bin/activate 45 | - !reference [.gsutil-install, script] 46 | - cd nfvbenchvm/dib 47 | - | 48 | echo -e "\e[0Ksection_start:`date +%s`:build_image\r\e[0KBuild Image" 49 | bash verify-image.sh -v 50 | echo -e "\e[0Ksection_end:`date +%s`:build_image\r\e[0K" 51 | rules: 52 | - if: $CI_PIPELINE_SOURCE == "merge_request_event" || $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH 53 | 54 | build-image: 55 | stage: deploy 56 | image: centos:7 57 | before_script: 58 | - yum -y install python3 qemu-img kpartx sudo e2fsprogs 59 | - python3 -m venv venv 60 | script: 61 | - source venv/bin/activate 62 | - !reference [.gsutil-install, script] 63 | - cd nfvbenchvm/dib 64 | - | 65 | echo -e "\e[0Ksection_start:`date +%s`:build_image\r\e[0KBuild Image" 66 | bash build-image.sh 67 | echo -e "\e[0Ksection_end:`date +%s`:build_image\r\e[0K" 68 | rules: 69 | - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH 70 | 71 | docker-build-nfvbench: 72 | extends: .docker-build-and-push 73 | variables: 74 | DOCKER_IMAGE: "$DOCKER_ORGANIZATION/nfvbench" 75 | DOCKER_BUILDCONTEXT: "docker" 76 | DOCKER_FILEPATH: "docker/Dockerfile" 77 | -------------------------------------------------------------------------------- /.gitreview: -------------------------------------------------------------------------------- 1 | [gerrit] 2 | host=gerrit.opnfv.org 3 | port=29418 4 | project=nfvbench.git 5 | -------------------------------------------------------------------------------- /INFO: -------------------------------------------------------------------------------- 1 | Project: L2/L3 forwarding performance toolkit for NFVi (NFVbench) 2 | Project Creation Date: May 2nd 2017 3 | Project Category: 4 | Lifecycle State: 5 | Primary Contact: Alec Hothan (ahothan@cisco.com) 6 | Project Lead: Alec Hothan (ahothan@cisco.com) 7 | Jira Project Name: L2 L3 forwarding performance toolkit for NFVi 8 | Jira Project Prefix: NFVBENCH 9 | Mailing list tag: [nfvbench] 10 | IRC: Server: 11 | Repository: nfvbench 12 | 13 | Committers: 14 | Alec Hothan (ahothan@cisco.com) 15 | Carsten Rossenhövel (cross@eantc.com) 16 | Frank Brockners (fbrockne@cisco.com) 17 | Yichen Wang (yicwang@cisco.com) 18 | Al Morton (acmorton@att.com) 19 | 20 | Link to TSC approval of the project: 21 | 22 | Acknowledgements 23 | The development of NFVbench started in Summer 2016 at Cisco by this small team of dedicated people 24 | before being open sourced in Spring 2017 to OPNFV following more than 500 commits: 25 | Jan Balaz (aka Johnny) 26 | Stefano Chiesa Suryanto 27 | Yichen Wang 28 | Alec Hothan 29 | 30 | 31 | 32 | -------------------------------------------------------------------------------- /INFO.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | project: 'L2/L3 forwarding performance toolkit for NFVi (NFVbench)' 3 | project_creation_date: 'May 2nd 2017' 4 | project_category: '' 5 | lifecycle_state: '' 6 | project_lead: &opnfv_nfvbench_ptl 7 | name: 'Alec Hothan' 8 | email: 'ahothan@cisco.com' 9 | id: 'ahothan' 10 | company: 'cisco.com' 11 | timezone: 'PST' 12 | primary_contact: *opnfv_nfvbench_ptl 13 | issue_tracking: 14 | type: 'jira' 15 | url: 'https://jira-old.opnfv.org/projects/NFVBENCH' 16 | key: 'NFVBENCH' 17 | mailing_list: 18 | type: 'mailman2' 19 | url: 'anuket-tech-discuss@lists.anuket.io' 20 | tag: '#nfvbench' 21 | realtime_discussion: 22 | type: irc 23 | server: 'freenode.net' 24 | channel: '#opnfv-nfvbench' 25 | meetings: 26 | - type: 'gotomeeting+irc' 27 | agenda: # eg: 'https://wiki.opnfv.org/display/' 28 | url: # eg: 'https://global.gotomeeting.com/join/819733085' 29 | server: 'freenode.net' 30 | channel: '#opnfv-meeting' 31 | repeats: 'weekly' 32 | time: # eg: '16:00 UTC' 33 | repositories: 34 | - 'nfvbench' 35 | committers: 36 | - <<: *opnfv_nfvbench_ptl 37 | - name: 'Yichen Wang' 38 | email: 'yicwang@cisco.com' 39 | company: 'cisco.com' 40 | id: 'yicwang' 41 | - name: 'Francois-Regis Menguy' 42 | email: 'francoisregis.menguy@orange.com' 43 | company: 'orange.com' 44 | id: 'fmenguy' 45 | - name: 'Gwenael Lambrouin' 46 | email: 'gwenael.lambrouin@orange.com' 47 | company: 'orange.com' 48 | id: 'glambrouin' 49 | tsc: 50 | # yamllint disable rule:line-length 51 | approval: '' 52 | # yamllint enable rule:line-length 53 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | Copyright 2015 Open Platform for NFV Project, Inc. and its contributors 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /README.rst: -------------------------------------------------------------------------------- 1 | NFVbench: A Network Performance Benchmarking Tool for NFVi Full Stacks 2 | ********************************************************************** 3 | 4 | The NFVbench tool provides an automated way to measure the network performance for the most common data plane packet flows 5 | on any NFVi system viewed as a black box (NFVi Full Stack). 6 | An NFVi full stack exposes the following interfaces: 7 | - an OpenStack API for those NFVi platforms based on OpenStack 8 | - an interface to send and receive packets on the data plane (typically through top of rack switches 9 | while simpler direct wiring to a looping device would also work) 10 | 11 | The NFVi full stack does not have to be supported by the OPNFV ecosystem and can be any functional OpenStack system that provides 12 | the above interfaces. 13 | NFVbench can also be used without OpenStack on any networking device that can handle L2 forwarding or L3 routing. 14 | 15 | NFVbench can be installed standalone (in the form of a single Docker container) and is fully functional without 16 | the need to install any other OPNFV framework. 17 | 18 | It is designed to be easy to install and easy to use by non experts (no need to be an expert in traffic generators and data plane 19 | performance benchmarking). 20 | 21 | Online Documentation 22 | -------------------- 23 | The latest version of the NFVbench documentation is available online at: 24 | 25 | https://docs.anuket.io/projects/nfvbench/en/latest/index.html 26 | 27 | Contact Information 28 | ------------------- 29 | Inquiries and questions: send an email to anuket-tech-discuss@lists.anuket.io with a Subject line starting with "#nfvbench" 30 | 31 | Open issues or submit an issue or enhancement request: https://jira-old.opnfv.org/projects/NFVBENCH/issues (this requires an OPNFV Linux Foundation login). 32 | -------------------------------------------------------------------------------- /behave_tests/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/behave_tests/__init__.py -------------------------------------------------------------------------------- /behave_tests/behavedriver.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2021 Orange 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | """Define classes required to run any Behave test suites.""" 18 | 19 | from __future__ import division 20 | 21 | import json 22 | import logging 23 | import os 24 | 25 | from xtesting.core.behaveframework import BehaveFramework 26 | 27 | __author__ = "François-Régis Menguy " 28 | 29 | 30 | class BehaveDriver(BehaveFramework): 31 | """NFVbench custom BehaveDriver for Xtesting.""" 32 | # pylint: disable=too-many-instance-attributes 33 | 34 | __logger = logging.getLogger('xtesting.core.behavedriver') 35 | 36 | def __init__(self, **kwargs): 37 | super().__init__(**kwargs) 38 | self.campaign_json_file = os.path.join(self.res_dir, 'campaign_result.json') 39 | 40 | def extract_nfvbench_results(self): 41 | with open(self.campaign_json_file) as stream_: 42 | self.details['results'] = json.load(stream_) 43 | 44 | def run(self, **kwargs): 45 | 46 | """Override existing Xtesting BehaveFramework core script run method 47 | to extract NFVbench result and push them to DB 48 | 49 | Here are the steps: 50 | * run Xtesting behave method: 51 | * create the output directories if required, 52 | * run behave features with parameters 53 | * get the behave results in output.json, 54 | * get the nfvbench results in campaign_result.json 55 | 56 | Args: 57 | kwargs: Arbitrary keyword arguments. 58 | 59 | Returns: 60 | EX_OK if all suites ran well. 61 | EX_RUN_ERROR otherwise. 62 | """ 63 | try: 64 | super().run(**kwargs) 65 | self.extract_nfvbench_results() 66 | self.__logger.info("NFVbench results were successfully parsed") 67 | except Exception: # pylint: disable=broad-except 68 | self.__logger.exception("Cannot parse NFVbench results") 69 | return self.EX_RUN_ERROR 70 | return self.EX_OK 71 | -------------------------------------------------------------------------------- /behave_tests/features/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/behave_tests/features/__init__.py -------------------------------------------------------------------------------- /behave_tests/features/characterization-full.feature: -------------------------------------------------------------------------------- 1 | @characterization 2 | Feature: characterization 3 | 4 | @throughput 5 | Scenario Outline: Run a NDR test for a defined frame size and flow count 6 | Given 10 sec run duration 7 | And frame size 8 | And flow count 9 | And ndr rate 10 | When NFVbench API is ready 11 | Then 3 runs are started and waiting for maximum result 12 | And push result to database 13 | And extract offered rate result 14 | 15 | Examples: Frame sizes and flow counts 16 | | frame_size | flow_count | 17 | | 64 | 128 | 18 | | 128 | 128 | 19 | | 256 | 128 | 20 | | 512 | 128 | 21 | | 768 | 128 | 22 | | 1024 | 128 | 23 | | 1280 | 128 | 24 | | 1518 | 128 | 25 | | IMIX | 128 | 26 | | 9000 | 128 | 27 | | 64 | 10k | 28 | | 128 | 10k | 29 | | 256 | 10k | 30 | | 512 | 10k | 31 | | 768 | 10k | 32 | | 1024 | 10k | 33 | | 1280 | 10k | 34 | | 1518 | 10k | 35 | | IMIX | 10k | 36 | | 9000 | 10k | 37 | | 64 | 100k | 38 | | 128 | 100k | 39 | | 256 | 100k | 40 | | 512 | 100k | 41 | | 768 | 100k | 42 | | 1024 | 100k | 43 | | 1280 | 100k | 44 | | 1518 | 100k | 45 | | IMIX | 100k | 46 | | 9000 | 100k | 47 | 48 | 49 | @latency 50 | Scenario Outline: Run a latency test for a defined frame size and throughput percentage 51 | Given 10 sec run duration 52 | And TRex is restarted 53 | And frame size 54 | And 100k flow count 55 | And rate of previous scenario 56 | When NFVbench API is ready 57 | Then run is started and waiting for result 58 | And push result to database 59 | 60 | Examples: Frame sizes and throughput percentages 61 | | frame_size | throughput | 62 | | 64 | 70% | 63 | | 64 | 90% | 64 | | 768 | 70% | 65 | | 768 | 90% | 66 | | 1518 | 70% | 67 | | 1518 | 90% | 68 | | 9000 | 70% | 69 | | 9000 | 90% | 70 | -------------------------------------------------------------------------------- /behave_tests/features/characterization-samples.feature: -------------------------------------------------------------------------------- 1 | @characterization 2 | Feature: characterization 3 | 4 | @throughput 5 | Scenario Outline: Run a NDR test for a defined frame size and flow count 6 | Given 10 sec run duration 7 | And frame size 8 | And flow count 9 | And ndr rate 10 | When NFVbench API is ready 11 | Then 3 runs are started and waiting for maximum result 12 | And push result to database 13 | And extract offered rate result 14 | 15 | Examples: Frame sizes and flow counts 16 | | frame_size | flow_count | 17 | | 64 | 100k | 18 | | 768 | 100k | 19 | | 1518 | 100k | 20 | | 9000 | 100k | 21 | 22 | 23 | @latency 24 | Scenario Outline: Run a latency test for a defined frame size and throughput percentage 25 | Given 10 sec run duration 26 | And frame size 27 | And 100k flow count 28 | And rate of previous scenario 29 | When NFVbench API is ready 30 | Then run is started and waiting for result 31 | And push result to database 32 | 33 | Examples: Frame sizes and throughput percentages 34 | | frame_size | throughput | 35 | | 64 | 70% | 36 | | 64 | 90% | 37 | | 768 | 70% | 38 | | 768 | 90% | 39 | | 1518 | 70% | 40 | | 1518 | 90% | 41 | | 9000 | 70% | 42 | | 9000 | 90% | 43 | -------------------------------------------------------------------------------- /behave_tests/features/environment.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2021 Orange 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | 18 | import json 19 | import os 20 | import logging 21 | import pathlib 22 | import time 23 | 24 | 25 | def before_all(context): 26 | context.data = {'config': os.getenv('NFVBENCH_CONFIG_PATH', '/etc/nfvbench/nfvbench.cfg')} 27 | 28 | context.data['PROJECT_NAME'] = os.getenv('PROJECT_NAME', 'nfvbench') 29 | context.data['TEST_DB_URL'] = os.getenv('TEST_DB_URL') 30 | context.data['BASE_TEST_DB_URL'] = '' 31 | if context.data['TEST_DB_URL']: 32 | context.data['BASE_TEST_DB_URL'] = context.data['TEST_DB_URL'].replace('results', '') 33 | context.data['INSTALLER_TYPE'] = os.getenv('INSTALLER_TYPE') 34 | context.data['DEPLOY_SCENARIO'] = os.getenv('DEPLOY_SCENARIO') 35 | context.data['NODE_NAME'] = os.getenv('NODE_NAME', 'nfvbench') 36 | context.data['BUILD_TAG'] = os.getenv('BUILD_TAG') 37 | 38 | # NFVbench server host and port 39 | context.host_ip = os.getenv('NFVBENCH_SERVER_HOST', '127.0.0.1') 40 | context.port = int(os.getenv('NFVBENCH_SERVER_PORT', '7555')) 41 | 42 | 43 | def before_feature(context, feature): 44 | context.rates = {} 45 | context.results = {} 46 | context.start_time = time.time() 47 | context.CASE_NAME = feature.name 48 | 49 | # Create results dir if needed 50 | results_dir = pathlib.Path('/var/lib/xtesting/results/' + context.CASE_NAME) 51 | if not results_dir.exists(): 52 | results_dir.mkdir() 53 | 54 | # Setup a second logger to be able to understand why a test passed or failed 55 | # (The main logger is used by behave itself) 56 | context.logger = logging.getLogger('behave_tests') 57 | context.logger.setLevel(logging.INFO) 58 | fh = logging.FileHandler(filename=results_dir / pathlib.Path('behave_tests.log'), 59 | mode='w') # Re-create the file at the beginning of the feature 60 | fh.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')) 61 | context.logger.addHandler(fh) 62 | 63 | context.logger.info('before_feature: ' + feature.name) 64 | 65 | 66 | def before_scenario(context, scenario): 67 | context.tag = scenario.tags[0] 68 | context.json = {'log_file': '/var/lib/xtesting/results/' + context.CASE_NAME + '/nfvbench.log'} 69 | user_label = os.getenv('NFVBENCH_USER_LABEL', None) 70 | if user_label: 71 | context.json['user_label'] = user_label 72 | loopvm_flavor = os.getenv('NFVBENCH_LOOPVM_FLAVOR_NAME', None) 73 | if loopvm_flavor: 74 | context.json['flavor_type'] = loopvm_flavor 75 | context.synthesis = {} 76 | context.percentage_rate = None 77 | 78 | context.logger.info('before_scenario: ' + scenario.name) 79 | 80 | 81 | def after_feature(context, feature): 82 | if len(context.results) == 0: 83 | # No result to dump 84 | return 85 | 86 | results_dir = pathlib.Path('/var/lib/xtesting/results/' + context.CASE_NAME) 87 | results_file = results_dir / pathlib.Path('campaign_result.json') 88 | results_file.write_text(json.dumps(context.results, indent=4)) 89 | -------------------------------------------------------------------------------- /behave_tests/features/non-regression.feature: -------------------------------------------------------------------------------- 1 | @non-regression 2 | Feature: non-regression 3 | 4 | @throughput 5 | Scenario Outline: Run a NDR test for a defined frame size 6 | Given 10 sec run duration 7 | And frame size 8 | And 100k flow count 9 | And ndr rate 10 | When NFVbench API is ready 11 | Then 3 runs are started and waiting for maximum result 12 | And push result to database 13 | And extract offered rate result 14 | And verify throughput result is in same range as the previous result 15 | And verify throughput result is in same range as the characterization result 16 | 17 | Examples: Frame sizes 18 | | frame_size | 19 | | 64 | 20 | | 768 | 21 | | 1518 | 22 | | 9000 | 23 | 24 | 25 | @latency 26 | Scenario Outline: Run a latency test for a defined frame size and throughput percentage 27 | Given 10 sec run duration 28 | And frame size 29 | And 100k flow count 30 | And packet rate equal to of max throughput of last characterization 31 | When NFVbench API is ready 32 | Then run is started and waiting for result 33 | And push result to database 34 | And verify latency result is lower than 1000 microseconds 35 | 36 | Examples: Frame sizes and throughput percentages 37 | | frame_size | percentage | 38 | | 64 | 70% | 39 | | 64 | 90% | 40 | | 768 | 70% | 41 | | 768 | 90% | 42 | | 1518 | 70% | 43 | | 1518 | 90% | 44 | | 9000 | 70% | 45 | | 9000 | 90% | 46 | -------------------------------------------------------------------------------- /behave_tests/features/quick-test-10kpps.feature: -------------------------------------------------------------------------------- 1 | @quick-test-10kpps 2 | Feature: quick-test-10kpps 3 | 4 | @throughput 5 | Scenario: Run a 10s test at 10kpps with 64-byte frames and 128 flows 6 | Given 10 sec run duration 7 | And TRex is restarted 8 | And 64 frame size 9 | And 128 flow count 10 | And 10kpps rate 11 | When NFVbench API is ready 12 | Then 1 runs are started and waiting for maximum result 13 | And push result to database 14 | -------------------------------------------------------------------------------- /behave_tests/features/steps/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/behave_tests/features/steps/__init__.py -------------------------------------------------------------------------------- /client/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | -------------------------------------------------------------------------------- /client/client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | import requests 18 | import time 19 | 20 | 21 | class TimeOutException(Exception): 22 | pass 23 | 24 | 25 | class NfvbenchException(Exception): 26 | pass 27 | 28 | 29 | class NfvbenchClient(object): 30 | """Python client class to control a nfvbench server 31 | 32 | The nfvbench server must run in background using the --server option. 33 | """ 34 | def __init__(self, nfvbench_url): 35 | """Client class to send requests to the nfvbench server 36 | 37 | Args: 38 | nfvbench_url: the URL of the nfvbench server (e.g. 'http://127.0.0.1:7555') 39 | """ 40 | self.url = nfvbench_url 41 | 42 | def http_get(self, command, config): 43 | url = self.url + '/' + command 44 | res = requests.get(url, json=config) 45 | if res.ok: 46 | return res.json() 47 | res.raise_for_status() 48 | 49 | def http_post(self, command, config): 50 | url = self.url + '/' + command 51 | res = requests.post(url, json=config) 52 | if res.ok: 53 | return res.json() 54 | res.raise_for_status() 55 | 56 | def echo_config(self, config, timeout=100): 57 | """Send an echo event to the nfvbench server with some dummy config and expect the 58 | config to be sent back right away. 59 | 60 | Args: 61 | config: some dummy configuration - must be a valid dict 62 | timeout: how long to wait in seconds or 0 to return immediately, 63 | defaults to 100 seconds 64 | 65 | Returns: 66 | The config as passed as a dict or None if timeout passed is 0 67 | 68 | Raises: 69 | NfvbenchException: the execution of the passed configuration failed, 70 | the body of the exception 71 | containes the description of the failure. 72 | TimeOutException: the request timed out (and might still being executed 73 | by the server) 74 | """ 75 | return self.http_get('echo', config) 76 | 77 | def run_config(self, config, timeout=300, poll_interval=5): 78 | """Request an nfvbench configuration to be executed by the nfvbench server. 79 | 80 | This function will block the caller until the request completes or the request times out. 81 | It can return immediately if timeout is set to 0. 82 | Note that running a configuration may take a while depending on the amount of work 83 | requested - so set the timeout value to an appropriate value. 84 | 85 | Args: 86 | config: the nfvbench configuration to execute - must be a valid dict with 87 | valid nfvbench attributes 88 | timeout: how long to wait in seconds or 0 to return immediately, 89 | defaults to 300 seconds 90 | poll_interval: seconds between polling (http only) - defaults to every 5 seconds 91 | 92 | Returns: 93 | The result of the nfvbench execution 94 | or None if timeout passed is 0 95 | The function will return as soon as the request is completed or when the 96 | timeout occurs (whichever is first). 97 | 98 | Raises: 99 | NfvbenchException: the execution of the passed configuration failed, the body of 100 | the exception contains the description of the failure. 101 | TimeOutException: the request timed out but will still be executed by the server. 102 | """ 103 | res = self.http_post('start_run', config) 104 | if res['status'] != 'PENDING': 105 | raise NfvbenchException(res['error_message']) 106 | 107 | # poll until request completes 108 | elapsed = 0 109 | while True: 110 | time.sleep(poll_interval) 111 | result = self.http_get('status', config) 112 | if result['status'] != 'PENDING': 113 | return result 114 | elapsed += poll_interval 115 | if elapsed >= timeout: 116 | raise TimeOutException() 117 | -------------------------------------------------------------------------------- /client/nfvbench_client.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | # 18 | # This is an example of python application controling a nfvbench server 19 | # using the nfvbench client API. 20 | # The nfvbench server must run in background using the --server option. 21 | # Since HTML pages are not required, the path to pass to --server can be any directory on the host. 22 | # 23 | import argparse 24 | import json 25 | import sys 26 | 27 | from client import NfvbenchClient 28 | 29 | 30 | # 31 | # At the CLI, the user can either: 32 | # - pass an nfvbench configuration as a string (-c ) 33 | # - pass an nfvbench configuration as a file name containing the 34 | # configuration (-f ) 35 | # - or pass a test config (-e ) that will be echoed back by the server as is 36 | # 37 | def main(): 38 | parser = argparse.ArgumentParser() 39 | 40 | parser.add_argument('-f', '--file', dest='file', 41 | action='store', 42 | help='NFVbench config file to execute (json format)', 43 | metavar='') 44 | parser.add_argument('-c', '--config', dest='config', 45 | action='store', 46 | help='NFVbench config to execute (json format)', 47 | metavar='') 48 | parser.add_argument('-e', '--echo', dest='echo', 49 | action='store', 50 | help='NFVbench config to echo (json format)', 51 | metavar='') 52 | parser.add_argument('-t', '--timeout', dest='timeout', 53 | default=900, 54 | action='store', 55 | help='time (seconds) to wait for NFVbench result', 56 | metavar='') 57 | parser.add_argument('url', help='nfvbench server url (e.g. http://10.0.0.1:5000)') 58 | opts = parser.parse_args() 59 | 60 | if not opts.file and not opts.config and not opts.echo: 61 | print('at least one of -f or -c or -e required') 62 | sys.exit(-1) 63 | 64 | nfvbench = NfvbenchClient(opts.url) 65 | # convert JSON into a dict 66 | try: 67 | timeout = int(opts.timeout) 68 | if opts.file: 69 | with open(opts.file) as fd: 70 | config = json.loads(fd.read()) 71 | result = nfvbench.run_config(config, timeout=timeout) 72 | elif opts.config: 73 | config = json.loads(opts.config) 74 | result = nfvbench.run_config(config, timeout=timeout) 75 | elif opts.echo: 76 | config = json.loads(opts.echo) 77 | result = nfvbench.echo_config(config, timeout=timeout) 78 | print('Result:', result) 79 | except ValueError as ex: 80 | print('Input configuration is invalid: ' + str(ex)) 81 | print() 82 | 83 | 84 | if __name__ == "__main__": 85 | main() 86 | -------------------------------------------------------------------------------- /client/requirements.txt: -------------------------------------------------------------------------------- 1 | # 2 | # 3 | backports.ssl-match-hostname==3.5.0.1 # via websocket-client 4 | requests==2.13.0 5 | six==1.10.0 # via websocket-client 6 | -------------------------------------------------------------------------------- /docker/Dockerfile: -------------------------------------------------------------------------------- 1 | # docker file for creating a container that has nfvbench installed and ready to use 2 | FROM ubuntu:20.04 3 | 4 | ENV TREX_VER "v2.89" 5 | ENV VM_IMAGE_VER "0.15" 6 | ENV PYTHONIOENCODING "utf8" 7 | 8 | RUN apt-get update && apt-get install -y \ 9 | git \ 10 | kmod \ 11 | pciutils \ 12 | python3.8 \ 13 | vim \ 14 | wget \ 15 | net-tools \ 16 | iproute2 \ 17 | libelf1 \ 18 | python3-dev \ 19 | libpython3.8-dev \ 20 | python3-distutils \ 21 | gcc \ 22 | && ln -s /usr/bin/python3.8 /usr/local/bin/python3 \ 23 | && mkdir -p /opt/trex \ 24 | && mkdir /var/log/nfvbench \ 25 | && wget --no-cache --no-check-certificate https://trex-tgn.cisco.com/trex/release/$TREX_VER.tar.gz \ 26 | && tar xzf $TREX_VER.tar.gz -C /opt/trex \ 27 | && rm -f /$TREX_VER.tar.gz \ 28 | && rm -f /opt/trex/$TREX_VER/trex_client_$TREX_VER.tar.gz \ 29 | && cp -a /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex /usr/local/lib/python3.8/dist-packages/ \ 30 | && rm -rf /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex \ 31 | && wget https://bootstrap.pypa.io/get-pip.py \ 32 | && python3 get-pip.py \ 33 | && pip3 install -U pbr \ 34 | && pip3 install -U setuptools \ 35 | && cd /opt \ 36 | # Note: do not clone with --depth 1 as it will cause pbr to fail extracting the nfvbench version 37 | # from the git tag 38 | && git clone https://gerrit.opnfv.org/gerrit/nfvbench \ 39 | && cd nfvbench && pip3 install -e . \ 40 | && wget -O nfvbenchvm-$VM_IMAGE_VER.qcow2 http://artifacts.opnfv.org/nfvbench/images/nfvbenchvm_centos-$VM_IMAGE_VER.qcow2 \ 41 | # Override Xtesting testcases.yaml file by NFVbench default one 42 | && cp xtesting/testcases.yaml /usr/local/lib/python3.8/dist-packages/xtesting/ci/testcases.yaml \ 43 | && python3 ./docker/cleanup_generators.py \ 44 | && rm -rf /opt/nfvbench/.git \ 45 | # Symlink for retrocompatibility 4.x 46 | && ln -s /opt/nfvbench /nfvbench \ 47 | && apt-get remove -y wget git python3-dev libpython3.8-dev gcc \ 48 | && apt-get autoremove -y && apt-get clean && rm -rf /var/lib/apt/lists/* 49 | 50 | ENV TREX_EXT_LIBS "/opt/trex/$TREX_VER/external_libs" 51 | 52 | 53 | ENTRYPOINT ["/opt/nfvbench/docker/nfvbench-entrypoint.sh"] 54 | -------------------------------------------------------------------------------- /docker/cleanup_generators.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import os 16 | import shutil 17 | 18 | TREX_OPT = '/opt/trex' 19 | 20 | 21 | TREX_UNUSED = [ 22 | '_t-rex-64-debug', '_t-rex-64-debug-o', 'bp-sim-64', 'bp-sim-64-debug', 23 | 't-rex-64-debug', 't-rex-64-debug-o', 'automation/__init__.py', 24 | 'automation/graph_template.html', 25 | 'automation/config', 'automation/h_avc.py', 'automation/phantom', 26 | 'automation/readme.txt', 'automation/regression', 'automation/report_template.html', 27 | 'automation/sshpass.exp', 'automation/trex_perf.py', 'wkhtmltopdf-amd64' 28 | ] 29 | 30 | 31 | def remove_unused_libs(path, files): 32 | """ 33 | Remove files not used by traffic generator. 34 | """ 35 | for f in files: 36 | f = os.path.join(path, f) 37 | try: 38 | if os.path.isdir(f): 39 | shutil.rmtree(f) 40 | else: 41 | os.remove(f) 42 | except OSError: 43 | print("Skipped file:") 44 | print(f) 45 | continue 46 | 47 | 48 | def get_dir_size(start_path='.'): 49 | """ 50 | Computes size of directory. 51 | 52 | :return: size of directory with subdirectiories 53 | """ 54 | total_size = 0 55 | for dirpath, dirnames, filenames in os.walk(start_path): 56 | for f in filenames: 57 | try: 58 | fp = os.path.join(dirpath, f) 59 | total_size += os.path.getsize(fp) 60 | except OSError: 61 | continue 62 | return total_size 63 | 64 | if __name__ == "__main__": 65 | versions = os.listdir(TREX_OPT) 66 | for version in versions: 67 | trex_path = os.path.join(TREX_OPT, version) 68 | print('Cleaning TRex', version) 69 | try: 70 | size_before = get_dir_size(trex_path) 71 | remove_unused_libs(trex_path, TREX_UNUSED) 72 | size_after = get_dir_size(trex_path) 73 | print('==== Saved Space ====') 74 | print(size_before - size_after) 75 | except OSError: 76 | import traceback 77 | print(traceback.print_exc()) 78 | print('Cleanup was not finished.') 79 | -------------------------------------------------------------------------------- /docker/nfvbench-entrypoint.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | if [ -z "$1" ] || ([ $1 != 'start_rest_server' ] && [ $1 != 'run_tests' ] && [ $1 != 'zip_campaign' ]); then 17 | tail -f /dev/null 18 | elif [ $1 == 'run_tests' ]; then 19 | PARAMS="" 20 | for var in "${@:2}" 21 | do 22 | PARAMS+="$var " 23 | done 24 | eval "run_tests $PARAMS" 25 | elif [ $1 == 'zip_campaign' ]; then 26 | zip_campaign 27 | else 28 | PARAMS="--server" 29 | if [ -n "$HOST" ]; then 30 | PARAMS+=" --host $HOST" 31 | fi 32 | if [ -n "$PORT" ]; then 33 | PARAMS+=" --port $PORT" 34 | fi 35 | if [ -n "$CONFIG_FILE" ]; then 36 | if [ -f "$CONFIG_FILE" ]; then 37 | PARAMS+=" -c $CONFIG_FILE" 38 | fi 39 | fi 40 | eval "nfvbench $PARAMS" 41 | fi -------------------------------------------------------------------------------- /docs/Makefile: -------------------------------------------------------------------------------- 1 | # Minimal makefile for Sphinx documentation 2 | # 3 | 4 | # You can set these variables from the command line, and also 5 | # from the environment for the first two. 6 | SPHINXOPTS ?= 7 | SPHINXBUILD ?= sphinx-build 8 | SOURCEDIR = . 9 | BUILDDIR = _build 10 | 11 | # Put it first so that "make" without argument is like "make help". 12 | help: 13 | @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 14 | 15 | .PHONY: help Makefile 16 | 17 | # Catch-all target: route all unknown targets to Sphinx using the new 18 | # "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). 19 | %: Makefile 20 | @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) 21 | -------------------------------------------------------------------------------- /docs/conf.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 - 2023, Cisco Systems, Inc. and the NFVbench project contributors 2 | # SPDX-License-Identifier: Apache-2.0 3 | 4 | # Configuration file for the Sphinx documentation builder. 5 | # 6 | # This file only contains a selection of the most common options. For a full 7 | # list see the documentation: 8 | # https://www.sphinx-doc.org/en/master/usage/configuration.html 9 | 10 | import pbr.version 11 | 12 | 13 | # -- Path setup -------------------------------------------------------------- 14 | 15 | # If extensions (or modules to document with autodoc) are in another directory, 16 | # add these directories to sys.path here. If the directory is relative to the 17 | # documentation root, use os.path.abspath to make it absolute, like shown here. 18 | # 19 | # import os 20 | # import sys 21 | # sys.path.insert(0, os.path.abspath('.')) 22 | 23 | 24 | # -- Project information ----------------------------------------------------- 25 | 26 | project = 'NFVbench' 27 | copyright = '2016 - 2023, Cisco Systems, Inc. and the NFVbench project contributors' 28 | author = 'Cisco Systems, Inc. and the NFVbench project contributors' 29 | 30 | # -- Project version --------------------------------------------------------- 31 | 32 | # The version info for the project you're documenting, acts as replacement for 33 | # |version| and |release|, also used in various other places throughout the 34 | # built documents. 35 | # 36 | # The short X.Y version. 37 | version = pbr.version.VersionInfo(project).version_string() 38 | # The full version, including alpha/beta/rc tags. 39 | release = pbr.version.VersionInfo(project).version_string_with_vcs() 40 | 41 | # -- General configuration --------------------------------------------------- 42 | 43 | # Add any Sphinx extension module names here, as strings. They can be 44 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom 45 | # ones. 46 | extensions = [ 47 | ] 48 | 49 | # Add any paths that contain templates here, relative to this directory. 50 | templates_path = ['_templates'] 51 | 52 | # List of patterns, relative to source directory, that match files and 53 | # directories to ignore when looking for source files. 54 | # This pattern also affects html_static_path and html_extra_path. 55 | exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] 56 | 57 | 58 | # -- Options for HTML output ------------------------------------------------- 59 | 60 | # The theme to use for HTML and HTML Help pages. See the documentation for 61 | # a list of builtin themes. 62 | # 63 | html_theme = 'piccolo_theme' 64 | 65 | # Add any paths that contain custom static files (such as style sheets) here, 66 | # relative to this directory. They are copied after the builtin static files, 67 | # so a file named "default.css" will overwrite the builtin "default.css". 68 | html_static_path = ['_static'] 69 | -------------------------------------------------------------------------------- /docs/developer/building.rst: -------------------------------------------------------------------------------- 1 | 2 | .. This work is licensed under a Creative Commons Attribution 4.0 International 3 | .. License. 4 | .. http://creativecommons.org/licenses/by/4.0 5 | .. (c) Cisco Systems, Inc 6 | 7 | Building Containers and VM Images 8 | ================================= 9 | 10 | NFVbench is delivered as Docker container which is built using the Dockerfile under the docker directory. 11 | This container includes the following parts: 12 | 13 | - TRex traffic generator 14 | - NFVbench orchestration 15 | - NFVbench test VM (qcow2) 16 | 17 | .. _nfvbench-artefact-versioning: 18 | 19 | Versioning 20 | ---------- 21 | These 3 parts are versioned independently and the Dockerfile will determine the combination of versions that 22 | are packaged in the container for the version associated to the Dockerfile. 23 | 24 | The NFVbench version is controlled by the git tag that conforms to the semver version (e.g. "3.3.0"). 25 | This tag controls the version of the Dockerfile used for building the container. 26 | 27 | The TRex version is controlled by the TREX_VER variable in Dockerfile (e.g. ENV TREX_VER "v2.56"). 28 | TRex is installed in container from https://github.com/cisco-system-traffic-generator/trex-core/releases 29 | 30 | The Test VM version is controlled by the VM_IMAGE_VER variable in Dockerfile (e.g. ENV VM_IMAGE_VER "0.8"). 31 | The VM is extracted from google storage (http://artifacts.opnfv.org) 32 | 33 | Updating the VM image 34 | --------------------- 35 | 36 | When the VM image is changed, its version must be increased in order to distinguish from previous image versions. 37 | The version strings to change are located in 2 files: 38 | 39 | - docker/Dockerfile 40 | - nfvbench/nfvbenchvm/dib/build-image.sh 41 | 42 | Building and uploading the VM image 43 | ----------------------------------- 44 | The VM image is built on gerrit verify when the image is not present in google storage. 45 | It is not uploaded yet on google storage. 46 | 47 | The build + upload of the new VM image is done after the review is merged. 48 | 49 | For details on how this is done, refer to ./jjb/nfvbench/nfvbench.yaml in the opnfv releng repository. 50 | 51 | Building a new NFVbench container image 52 | --------------------------------------- 53 | A new container image can be built and published to Dockerhub by CI/CD by applying a new semver tag to the 54 | nfvbench repository. 55 | 56 | 57 | Workflow summary 58 | ---------------- 59 | 60 | NFVbench code has changed: 61 | 62 | - commit with gerrit 63 | - apply a new semver tag to trigger the container image build/publication 64 | 65 | VM code has changed: 66 | 67 | - update VM version in the 2 locations 68 | - commit VM changes with gerrit to trigger VM build and publication to google storage 69 | - IMPORTANT! wait for the VM image to be pushed to google storage before going to the next step 70 | (otherwise the container build will fail as it will not find the VM image) 71 | - apply a new semver tag to trigger the container image build/publication 72 | 73 | To increase the TRex version: 74 | 75 | - change the Trex version in Dockerfile 76 | - commit with gerrit 77 | - apply a new semver tag to trigger the container image build/publication 78 | -------------------------------------------------------------------------------- /docs/developer/design/design.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International 2 | .. License. 3 | .. http://creativecommons.org/licenses/by/4.0 4 | .. (c) Cisco Systems, Inc 5 | 6 | 7 | ******************* 8 | NFVbench components 9 | ******************* 10 | 11 | NFVbench can be decomposed in the following components: 12 | 13 | - Configuration 14 | - Orchestration: 15 | 16 | - Staging 17 | - Traffic generation 18 | - Results analysis 19 | 20 | Configuration 21 | ============= 22 | This component is in charge of getting the configuration options from the user and consolidate them with 23 | the default configuration into a running configuration. 24 | 25 | default configuration + user configuration options = running configuration 26 | 27 | User configuration can come from: 28 | 29 | - CLI configuration shortcut arguments (e.g --frame-size) 30 | - CLI configuration file (--config [file]) 31 | - CLI configuration string (--config [string]) 32 | - REST request body 33 | - custom platform pluging 34 | 35 | The precedence order for configuration is (from highest precedence to lowest precedence) 36 | 37 | - CLI configuration or REST configuration 38 | - custom platform plugin 39 | - default configuration 40 | 41 | The custom platform plugin is an optional python class that can be used to override default configuration options 42 | with default platform options which can be either hardcoded or calculated at runtime from platform specific sources 43 | (such as platform deployment configuration files). 44 | A custom platform plugin class is a child of the parent class nfvbench.config_plugin.ConfigPlugin. 45 | 46 | Orchestration 47 | ============= 48 | Once the configuration is settled, benchmark orchestration is managed by the ChainRunner class (nfvbench.chain_runner.ChainRunner). 49 | The chain runner will take care of orchestrating the staging, traffic generation and results analysis. 50 | 51 | 52 | Staging 53 | ------- 54 | The staging component is in charge of staging the OpenStack resources that are used for the requested packet path. 55 | For example, for a PVP packet path, this module will create 2 Neutron networks and one VM instance connected to these 2 networks. 56 | Multi-chaining and VM placement is also handled by this module. 57 | 58 | Main class: nfvbench.chaining.ChainManager 59 | 60 | Traffic Generation 61 | ------------------ 62 | The traffic generation component is in charge of contrilling the TRex traffic generator using its python API. 63 | It includes tasks such as: 64 | 65 | - traffic check end to end to make sure the packet path is clear in both directions before starting a benchmark 66 | - programming the TRex traffic flows based on requested parameters 67 | - fixed rate control 68 | - NDR/PDR binary search 69 | 70 | Main class: nfvbench.traffic_client.TrafficClient 71 | 72 | 73 | Traffic Generator Results Analysis 74 | ---------------------------------- 75 | At the end of a traffic generation session, this component collects the results from TRex and packages them in a format that 76 | is suitable for the various output formats (JSON, REST, file, fluentd). 77 | In the case of multi-chaining, it handles aggregation of results across chains. 78 | 79 | Main class: nfvbench.stats_manager.StatsManager 80 | -------------------------------------------------------------------------------- /docs/developer/design/index.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International 2 | .. License. 3 | .. http://creativecommons.org/licenses/by/4.0 4 | .. (c) Cisco Systems, Inc 5 | 6 | ===================== 7 | NFVbench Design Notes 8 | ===================== 9 | 10 | .. toctree:: 11 | :maxdepth: 2 12 | 13 | overview 14 | design 15 | versioning 16 | traffic_desc 17 | ndrpdr 18 | -------------------------------------------------------------------------------- /docs/developer/design/ndrpdr.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International 2 | .. License. 3 | .. http://creativecommons.org/licenses/by/4.0 4 | .. (c) Cisco Systems, Inc 5 | 6 | NDR/PDR Binary Search 7 | ===================== 8 | 9 | The NDR/PDR binary search algorithm used by NFVbench is based on the algorithm used by the 10 | FD.io CSIT project, with some additional optimizations. 11 | 12 | Algorithm Outline 13 | ----------------- 14 | 15 | The ServiceChain class (nfvbench/service_chain.py) is responsible for calculating the NDR/PDR 16 | or all frame sizes requested in the configuration. 17 | Calculation for 1 frame size is delegated to the TrafficClient class (nfvbench/traffic_client.py) 18 | 19 | Call chain for calculating the NDR-PDR for a list of frame sizes: 20 | 21 | - ServiceChain.run() 22 | - ServiceChain._get_chain_results() 23 | - for every frame size: 24 | - ServiceChain.__get_result_per_frame_size() 25 | - TrafficClient.get_ndr_pdr() 26 | - TrafficClient.__range_search() recursive binary search 27 | 28 | The search range is delimited by a left and right rate (expressed as a % of line rate per direction). 29 | The search always start at line rate per port, e.g. in the case of 2x10Gbps, the first iteration 30 | will send 10Gbps of traffic on each port. 31 | 32 | The load_epsilon configuration parameter defines the accuracy of the result as a % of line rate. 33 | The default value of 0.1 indicates for example that the measured NDR and PDR are within 0.1% of line rate of the 34 | actual NDR/PDR (e.g. 0.1% of 10Gbps is 10Mbps). It also determines how small the search range must be in the binary search. 35 | Smaller values of load_epsilon will result in more iterations and will take more time but may not 36 | always be beneficial if the absolute value falls below the precision level of the measurement. 37 | For example a value of 0.01% would translate to an absolute value of 1Mbps (for a 10Gbps port) or 38 | around 10kpps (at 64 byte size) which might be too fine grain. 39 | 40 | The recursion narrows down the range by half and stops when: 41 | 42 | - the range is smaller than the configured load_epsilon value 43 | - or when the search hits 100% or 0% of line rate 44 | 45 | Optimization 46 | ------------ 47 | 48 | Binary search algorithms assume that the drop rate curve is monotonically increasing with the Tx rate. 49 | To save time, the algorithm used by NFVbench is capable of calculating the optimal Tx rate for an 50 | arbitrary list of target maximum drop rates in one pass instead of the usual 1 pass per target maximum drop rate. 51 | This saves time linearly to the number target drop rates. 52 | For example, a typical NDR/PDR search will have 2 target maximum drop rates: 53 | 54 | - NDR = 0.001% 55 | - PDR = 0.1% 56 | 57 | The binary search will then start with a sorted list of 2 target drop rates: [0.1, 0.001]. 58 | The first part of the binary search will then focus on finding the optimal rate for the first target 59 | drop rate (0.1%). When found, the current target drop rate is removed from the list and 60 | iteration continues with the next target drop rate in the list but this time 61 | starting from the upper/lower range of the previous target drop rate, which saves significant time. 62 | The binary search continues until the target maximum drop rate list is empty. 63 | 64 | Results Granularity 65 | ------------------- 66 | The binary search results contain per direction stats (forward and reverse). 67 | In the case of multi-chaining, results contain per chain stats. 68 | The current code only reports aggregated stats (forward + reverse for all chains) but could be enhanced 69 | to report per chain stats. 70 | 71 | 72 | CPU Limitations 73 | --------------- 74 | One particularity of using a software traffic generator is that the requested Tx rate may not always be met due to 75 | resource limitations (e.g. CPU is not fast enough to generate a very high load). The algorithm should take this into 76 | consideration: 77 | 78 | - always monitor the actual Tx rate achieved as reported back by the traffic generator 79 | - actual Tx rate is always <= requested Tx rate 80 | - the measured drop rate should always be relative to the actual Tx rate 81 | - if the actual Tx rate is < requested Tx rate and the measured drop rate is already within threshold 82 | (NUL 2>NUL 16 | if errorlevel 9009 ( 17 | echo. 18 | echo.The 'sphinx-build' command was not found. Make sure you have Sphinx 19 | echo.installed, then set the SPHINXBUILD environment variable to point 20 | echo.to the full path of the 'sphinx-build' executable. Alternatively you 21 | echo.may add the Sphinx directory to PATH. 22 | echo. 23 | echo.If you don't have Sphinx installed, grab it from 24 | echo.https://www.sphinx-doc.org/ 25 | exit /b 1 26 | ) 27 | 28 | %SPHINXBUILD% -M %1 %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 29 | goto end 30 | 31 | :help 32 | %SPHINXBUILD% -M help %SOURCEDIR% %BUILDDIR% %SPHINXOPTS% %O% 33 | 34 | :end 35 | popd 36 | -------------------------------------------------------------------------------- /docs/release-notes/index.rst: -------------------------------------------------------------------------------- 1 | .. _nfvbench-releasenotes: 2 | 3 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 4 | .. http://creativecommons.org/licenses/by/4.0 5 | 6 | ************* 7 | Release Notes 8 | ************* 9 | 10 | .. toctree:: 11 | :maxdepth: 1 12 | 13 | nfvbench-release-notes 14 | nfvbenchvm-release-notes 15 | -------------------------------------------------------------------------------- /docs/release-notes/nfvbench-release-notes.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. http://creativecommons.org/licenses/by/4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | NFVbench Release Notes 6 | ++++++++++++++++++++++ 7 | 8 | Release 3.6.2 9 | ============= 10 | 11 | - NFVBENCH-152 Add service_mode method for debugging purpose 12 | - NFVBENCH-150 Add support for VXLAN latency 13 | - NFVBENCH-146 Add cache_size option 14 | - NFVBENCH-151 Allocate hugepages on two NUMAs in nfvbenchvm 15 | - NFVBENCH-149 Negative latency exception during NDR/PDR search 16 | - NFVBENCH-148 Increase the waiting time based on # of instances 17 | 18 | Release 3.5.1 19 | ============= 20 | 21 | - NFVBENCH-147 Incorrect URL used for admin check in credentials 22 | - Release the validation check for VxLAN networks 23 | - NFVBENCH-145 Config file not found. No explicit error 24 | - NFVBENCH-144 Trex cannot take account NFVBench config (platform thread id 0) 25 | 26 | - NFVBENCH-140 Retrieve High Dynamic Range latency histograms with TRex v2.59 27 | - NFVBENCH-143 Trex cannot start due to invalid config (platform None) 28 | - NFVBENCH-141 Fix Openstack user admin role check 29 | - NFVBENCH-139 Fix master_thread_id and latency_thread_id property checking 30 | - NFVBENCH-95 Add HdrHistogram encodes returned by TRex to JSON results 31 | - NFVBENCH-138 Use yaml.safe_load() instead of unsafe yaml load 32 | - NFVBENCH-137 NFVbench generates wrong L4 checksums for VxLAN traffic 33 | 34 | Release 3.4.0 35 | ============= 36 | 37 | - Add L3 traffic management with Neutron routers 38 | 39 | 40 | Release 3.3.0 41 | ============= 42 | 43 | Major release highlights: 44 | 45 | - VxLAN support 46 | - test VM can now have idle interfaces 47 | - test VM can be launched with multiqueue enabled 48 | - upgrade to TRex v2.56 49 | 50 | 51 | Release 2.0 52 | =========== 53 | NFVbench will now follow its own project release numbering (x.y.z) which is independent of the OPNFV release numbering (opnfv-x.y.z) 54 | 55 | Major release highlights: 56 | 57 | - Dedicated edge networks for each chain 58 | - Enhanced chain analysis 59 | - Code refactoring and enhanced unit testing 60 | - Miscellaneous enhancement 61 | 62 | Dedicated edge networks for each chain 63 | -------------------------------------- 64 | NFVbench 1.x only supported shared edge networks for all chains. 65 | For example, 20xPVP would create only 2 edge networks (left and right) shared by all chains. 66 | With NFVbench 2.0, chain networks are dedicated (unshared) by default with an option in 67 | the nfvbench configuration to share them. A 20xPVP run will create 2x20 networks instead. 68 | 69 | Enhanced chain analysis 70 | ----------------------- 71 | The new chain analysis improves at multiple levels: 72 | 73 | - there is now one table for each direction (forward and reverse) that both read from left to right 74 | - per-chain packet counters and latency 75 | - all-chain aggregate packet counters and latency 76 | - supports both shared and dedicated chain networks 77 | 78 | Code refactoring and enhanced unit testing 79 | ------------------------------------------ 80 | The overall code structure is now better partitioned in the following functions: 81 | 82 | - staging and resource discovery 83 | - traffic generator 84 | - stats collection 85 | 86 | The staging algorithm was rewritten to be: 87 | 88 | - a lot more robust to errors and to handle better resource reuse use cases. 89 | For example when a network with a matching name is discovered the new code will verify that the 90 | network is associated to the right VM instance 91 | - a lot more strict when it comes to the inventory of MAC addresses. For example the association 92 | from each VM MAC to a chain index for each Trex port is handled in a much more strict manner. 93 | 94 | Although not all code is unit tested, the most critical parts are unit tested with the use of 95 | the mock library. The resulting unit test code can run in isolation without needing a real system under test. 96 | 97 | 98 | OPNFV Fraser Release 99 | ==================== 100 | 101 | Over 30 Jira tickets have been addressed in this release (Jira NFVBENCH-55 to NFVBENCH-78) 102 | 103 | The Fraser release adds the following new features: 104 | 105 | - support for benchmarking non-OpenStack environments (with external setup and no OpenStack openrc file) 106 | - PVVP packet path with SRIOV at the edge and vswitch between VMs 107 | - support logging events and results through fluentd 108 | 109 | Enhancements and main bug fixes: 110 | 111 | - end to end connectivity for larger chain count is now much more accurate for large chain count - avoiding excessive drops 112 | - use newer version of TRex (2.32) 113 | - use newer version of testpmd DPDK 114 | - NDR/PDR uses actual TX rate to calculate drops - resulting in more accurate results 115 | - add pylint to unit testing 116 | - add self sufficient and standalone unit testing (without actual testbed) 117 | 118 | 119 | OPNFV Euphrates Release 120 | ======================= 121 | 122 | This is the introductory release for NFVbench. In this release, NFVbench provides the following features/capabilities: 123 | 124 | - standalone installation with a single Docker container integrating the open source TRex traffic generator 125 | - can measure data plane performance for any NFVi full stack 126 | - can setup automatically service chains with the following packet paths: 127 | - PVP (physical-VM-physical) 128 | - PVVP (physical-VM-VM-physical) intra-node and inter-node 129 | - can setup multiple service chains 130 | - N * PVP 131 | - N * PVVP 132 | - supports any external service chain (pre-set externally) that can do basic IPv4 routing 133 | - can measure 134 | - drop rate and latency for any given fixed rate 135 | - NDR (No Drop Rate) and PDR (Partial Drop Rate) with configurable drop rates 136 | - traffic specification 137 | - any fixed frame size or IMIX 138 | - uni or bidirectional traffic 139 | - any number of flows 140 | - vlan tagging can be enabled or disabled 141 | - user interface: 142 | - CLI 143 | - REST+socketIO 144 | - fully configurable runs with yaml-JSON configuration 145 | - detailed results in JSON format 146 | - summary tabular results 147 | - can send logs and results to one or more fluentd aggregators (per configuration) 148 | -------------------------------------------------------------------------------- /docs/release-notes/nfvbenchvm-release-notes.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. http://creativecommons.org/licenses/by/4.0 3 | 4 | 5 | NFVbench Loop VM Image Release Notes 6 | ++++++++++++++++++++++++++++++++++++ 7 | 8 | As explained in :ref:`nfvbench-artefact-versioning`, NFVbench loop VM image has 9 | its own version numbering scheme. Starting from version 0.11, this page 10 | summarizes the news of each release. 11 | 12 | 13 | Release 0.16 (2022-11-15) 14 | ========================= 15 | 16 | Fixes: 17 | 18 | - Increase wait for VPP service from 10 to 30 seconds (10s is not enough on some 19 | setups) and poll every second instead of sleeping 10s. 20 | 21 | - Set the MTU of the management interface to 1500 by default (to reduce the risk 22 | to get an unmanageable VM). A different value can be set using the 23 | ``INTF_MGMT_MTU`` variable in ``/etc/nfvbenchvm.conf``. 24 | 25 | Changes for developers: 26 | 27 | - Add 2 debug features to ``build-image.sh``: 28 | 29 | - The new option ``-t`` (enable debug traces) allows to show in the build log 30 | the commands run in the shell scripts, including the commands defined in the 31 | disk image builder elements. 32 | 33 | - The new option ``-d`` (debug) instructs ``disk-image-create`` to drop the 34 | developer in a shell inside the chroot in case an error occurs. This makes 35 | troubleshooting of the image possible (inspect files, run commands, ...) 36 | 37 | - Abort build on error: make sure a VM image build fails if any step fails. 38 | Else we can end up with a bad image not containing all that we want, and we 39 | discover this later at run time. 40 | 41 | - Fix build with diskimage_builder (dib) 3.16.0. 42 | 43 | - Switch VPP package repository to packagecloud.io instead of nexus.fd.io. This 44 | fixes intermittent access issues with nexus.fd.io and this will make it 45 | possible to get vpp releases higher than 19.08. 46 | 47 | - Separate loop VM and generator VM version numbers (a first step towards using 48 | nfvbench version number for the generator VM). 49 | 50 | 51 | Release 0.15 (2021-06-04) 52 | ========================= 53 | 54 | - NFVBENCH-211 Fix VPP driver for loop VM (switch UIO driver for VPP forwarder: 55 | use ``uio_pci_generic`` instead of ``igb_uio``). 56 | 57 | 58 | Release 0.14 (2021-05-21) 59 | ========================= 60 | 61 | - NFVBENCH-209 Fix NFVbench loopvm build failed on testpmd step (includes switch 62 | UIO driver for testmpd forwarder: use ``uio_pci_generic`` instead of 63 | ``igb_uio``). 64 | 65 | 66 | Release 0.13 (2021-04-28) 67 | ========================= 68 | 69 | - NFVBENCH-196: New NFVbench image for generator part (nfvbench and TRex codes inside VM) 70 | - Change Linux kernel boot-time configuration (kernel CLI parameters): 71 | 72 | - Extend CPU isolation (``isolcpus=1-7`` instead of ``isolcpus=1``) 73 | - Increase the number of 1GB huge pages (``hugepages=4`` instead of ``hugepages=2``) 74 | - Enable IOMMU (``intel_iommu=on iommu=pt``) 75 | 76 | - Load the ``vfio-pci`` kernel module with the ``enable_unsafe_noiommu_mode=1`` option. 77 | 78 | 79 | Release 0.12 (2020-01-23) 80 | ========================= 81 | 82 | - NFVBENCH-157 Add possibility to not use the ARP static configuration for VPP loop VM 83 | 84 | 85 | Release 0.11 (2019-11-26) 86 | ========================= 87 | 88 | - NFVBENCH-156 Add management interface and ssh config in NFVBench image 89 | 90 | 91 | Earlier releases 92 | ================ 93 | 94 | See NFVbench commit history. 95 | -------------------------------------------------------------------------------- /docs/requirements.txt: -------------------------------------------------------------------------------- 1 | # Python dependencies needed to build and check nfvbench documentation 2 | 3 | sphinx # BSD 4 | piccolo_theme 5 | 6 | # Extract nfvbench version from git 7 | pbr 8 | -------------------------------------------------------------------------------- /docs/user/extchains.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | =============== 6 | External Chains 7 | =============== 8 | 9 | NFVbench can measure the performance of 1 or more L3 service chains that are setup externally using OpenStack or without OpenStack. 10 | Instead of being setup by NFVbench, the complete environment (VNFs and networks) must be setup prior to running NFVbench. 11 | 12 | Each external chain is made of 1 or more VNFs and has exactly 2 edge network interfaces (left and right network interfaces) 13 | that are connected to 2 edge networks (left and right networks). 14 | The 2 edge networks for each chain can either be shared across all chains or can be independent. 15 | 16 | The internal composition of a multi-VNF service chain can be arbitrary (usually linear) as far as NFVbench is concerned, 17 | the only requirement is that the service chain can route L3 packets properly between the left and right networks. 18 | 19 | The network topology of the service chains is defined by the "service_chain_shared_net" option in the 20 | NFVbench configuration file. 21 | 22 | 23 | Shared Edge Networks 24 | -------------------- 25 | 26 | This option is defined when "service_chain_shared_net" is set to true. 27 | All chains must share the same 2 edge networks and the VNF gateway IP addresses on each edge 28 | must all belong to the same subnet. 29 | 30 | .. image:: images/nfvbench-ext-shared.png 31 | 32 | The main advantage of this mode is that only 2 network segments are needed to support an arbitrary number of chains. 33 | 34 | 35 | Multi-VLAN Edge Networks 36 | ------------------------ 37 | 38 | This option is defined when "service_chain_shared_net" is set to false (default). 39 | Each chain has its own dedicated left and right network and there is no inter-chain constraint 40 | on the VNF IP addresses since they all belong to different network segments. 41 | 42 | .. image:: images/nfvbench-ext-multi-vlans.png 43 | 44 | The advantage of this mode is that the configuration of the VNFs can be made identical (same 45 | gateway IP addresses, same static routes). 46 | However this mode requires 2 network segments per chain. 47 | 48 | 49 | Detailed Example 50 | ---------------- 51 | To run NFVbench on an external service chains using shared edge networks: 52 | 53 | - tell NFVbench to use external service chain by adding "-sc EXT" or "--service-chain EXT" to NFVbench CLI options 54 | - specify the number of external chains using the "-scc" option (defaults to 1 chain) 55 | - if OpenStack is used: 56 | - specify the name of the 2 edge networks in "external_networks" in the NFVbench configuration file 57 | - The two networks specified have to exist in Neutron ('napa' and 'marin' in the diagram below) 58 | - if OpenStack is not used: 59 | - specify the VLAN id to use for the 2 edge networks in "vlans" in the NFVbench configuration file 60 | - specify the VNF gateway IPs for the external service chains (1.1.0.2 and 2.2.0.2) 61 | - specify the traffic generator gateway IPs for the external service chains (1.1.0.102 and 2.2.0.102 in diagram below) 62 | - specify the packet source and destination IPs for the virtual devices that are simulated (10.0.0.0/8 and 20.0.0.0/8) 63 | 64 | .. image:: images/extchain-config.png 65 | 66 | L3 routing must be enabled in the VNF and configured to: 67 | 68 | - reply to ARP requests to its public IP addresses on both left and right networks 69 | - route packets from each set of remote devices toward the appropriate dest gateway IP in the traffic generator using 2 static routes (as illustrated in the diagram) 70 | 71 | Upon start, NFVbench will: 72 | - first retrieve the properties of the left and right networks using Neutron APIs, 73 | - extract the underlying network ID (typically VLAN segmentation ID), 74 | - generate packets with the proper VLAN ID and measure traffic. 75 | 76 | Note that in the case of multiple chains, all chains end interfaces must be connected to the same two left and right networks. 77 | The traffic will be load balanced across the corresponding gateway IP of these external service chains. 78 | -------------------------------------------------------------------------------- /docs/user/faq.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | Frequently Asked Questions 6 | ************************** 7 | 8 | General Questions 9 | ================= 10 | 11 | Can NFVbench be used without OpenStack 12 | -------------------------------------- 13 | Yes. This can be done using the EXT chain mode, with or without ARP 14 | (depending on whether your systen under test can do routing) and by setting the openrc_file 15 | property to empty in the NFVbench configuration. 16 | 17 | Can NFVbench be used with a different traffic generator than TRex? 18 | ------------------------------------------------------------------ 19 | This is possible but requires developing a new python class to manage the new traffic generator interface. 20 | 21 | Can I connect Trex directly to my compute node? 22 | ----------------------------------------------- 23 | Yes. 24 | 25 | Can I drive NFVbench using a REST interface? 26 | -------------------------------------------- 27 | NFVbench can run in server mode and accept HTTP requests to run any type of measurement (fixed rate run or NDR_PDR run) 28 | with any run configuration. 29 | 30 | Can I run NFVbench on a Cisco UCS-B series blade? 31 | ------------------------------------------------- 32 | Yes provided your UCS-B series server has a Cisco VIC 1340 (with a recent firmware version). 33 | TRex will require VIC firmware version 3.1(2) or higher for blade servers (which supports more filtering capabilities). 34 | In this setting, the 2 physical interfaces for data plane traffic are simply hooked to the UCS-B fabric interconnect (no need to connect to a switch). 35 | 36 | Troubleshooting 37 | =============== 38 | 39 | TrafficClientException: End-to-end connectivity cannot be ensured 40 | ------------------------------------------------------------------ 41 | Prior to running a benchmark, NFVbench will make sure that traffic is passing in the service chain by sending a small flow of packets in each direction and verifying that they are received back at the other end. 42 | This exception means that NFVbench cannot pass any traffic in the service chain. 43 | 44 | The most common issues that prevent traffic from passing are: 45 | - incorrect wiring of the NFVbench/TRex interfaces 46 | - incorrect vlan_tagging setting in the NFVbench configuration, this needs to match how the NFVbench ports on the switch are configured (trunk or access port) 47 | 48 | - if the switch port is configured as access port, you must disable vlan_tagging in the NFVbench configuration 49 | - if the switch port is configured as trunk (recommended method), you must enable it 50 | 51 | Issues with high performances at a high line rate 52 | ------------------------------------------------- 53 | 54 | Flow statistics and/or latency stream can cause performance issue when testing high line rate. 55 | 56 | Flow statistics implies CPU usage to analyse packets and retrieve statistics. CPU can reach 100% usage when high throughput is tested because only one CPU is used for packet reception in TRex. 57 | The ``--no-flow-stats`` option allows you to disable TRex statistics aggregation during the NFVBench test. 58 | This, will permit to save CPU capabilities used for packet reception. 59 | 60 | Example of use : 61 | 62 | .. code-block:: bash 63 | 64 | nfvbench ``--no-flow-stats`` 65 | 66 | 2019-10-28 10:26:52,099 INFO End-to-end connectivity established 67 | 2019-10-28 10:26:52,127 INFO Cleared all existing streams 68 | 2019-10-28 10:26:52,129 INFO Traffic flow statistics are disabled. 69 | 70 | 71 | Latency streams implies also CPU usage to analyse packets and retrieve latency values. CPU can reach 100% usage when high throughput is tested because only one CPU is used for packet reception in TRex. 72 | The ``--no-latency-streams`` option allows you to disable latency streams during the NFVBench test. 73 | This, will permit to save CPU capabilities used for packet reception but no latency information will be return (to be used only if latency value has no meaning for your test). 74 | 75 | Example of use : 76 | 77 | .. code-block:: bash 78 | 79 | nfvbench ``--no-latency-streams`` 80 | 2019-10-28 10:30:03,955 INFO End-to-end connectivity established 81 | 2019-10-28 10:30:03,982 INFO Cleared all existing streams 82 | 2019-10-28 10:30:03,982 INFO Latency streams are disabled 83 | 84 | 85 | Latency flow statistics implies CPU usage to analyse packets and retrieve statistics. CPU can reach 100% usage when high throughput is tested because only one CPU is used for packet reception in TRex. 86 | The ``--no-latency-stats`` option allows you to disable TRex statistics aggregation for latency packets during the NFVBench test. 87 | This, will permit to save CPU capabilities used for packet reception. 88 | 89 | Example of use : 90 | 91 | .. code-block:: bash 92 | 93 | nfvbench ``--no-latency-stats`` 94 | 2019-10-28 10:28:21,559 INFO Cleared all existing streams 95 | 2019-10-28 10:28:21,567 INFO Latency flow statistics are disabled. 96 | -------------------------------------------------------------------------------- /docs/user/hw_requirements.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | Requirements for running NFVbench 6 | ================================= 7 | 8 | .. _requirements: 9 | 10 | Hardware Requirements 11 | --------------------- 12 | To run NFVbench you need the following hardware: 13 | - a Linux server 14 | - a DPDK compatible NIC with at least 2 ports (preferably 10Gbps or higher) 15 | - 2 ethernet cables between the NIC and the OpenStack pod under test (usually through a top of rack switch) 16 | 17 | The DPDK-compliant NIC must be one supported by the TRex traffic generator (such as Intel X710, 18 | refer to the Trex Installation Guide for a complete list of supported NIC) 19 | 20 | To run the TRex traffic generator (that is bundled with NFVbench) you will need to wire 2 physical interfaces of the NIC to the TOR switch(es): 21 | - if you have only 1 TOR, wire both interfaces to that same TOR 22 | - 1 interface to each TOR if you have 2 TORs and want to use bonded links to your compute nodes 23 | 24 | .. image:: images/nfvbench-trex-setup.png 25 | 26 | 27 | Switch Configuration 28 | -------------------- 29 | The 2 corresponding ports on the switch(es) facing the Trex ports on the Linux server should be configured in trunk mode (NFVbench will instruct TRex to insert the appropriate vlan tag). 30 | 31 | Using a TOR switch is more representative of a real deployment and allows to measure packet flows on any compute node in the rack without rewiring and includes the overhead of the TOR switch. 32 | 33 | Although not the primary targeted use case, NFVbench could also support the direct wiring of the traffic generator to 34 | a compute node without a switch. 35 | 36 | Software Requirements 37 | --------------------- 38 | 39 | You need Docker to be installed on the Linux server. 40 | 41 | TRex uses the DPDK interface to interact with the DPDK compatible NIC for sending and receiving frames. The Linux server will 42 | need to be configured properly to enable DPDK. 43 | 44 | DPDK requires a uio (User space I/O) or vfio (Virtual Function I/O) kernel module to be installed on the host to work. 45 | There are 2 main uio kernel modules implementations (igb_uio and uio_pci_generic) and one vfio kernel module implementation. 46 | 47 | To check if a uio or vfio is already loaded on the host: 48 | 49 | .. code-block:: bash 50 | 51 | lsmod | grep -e igb_uio -e uio_pci_generic -e vfio 52 | 53 | 54 | If missing, it is necessary to install a uio/vfio kernel module on the host server: 55 | 56 | - find a suitable kernel module for your host server (any uio or vfio kernel module built with the same Linux kernel version should work) 57 | - load it using the modprobe and insmod commands 58 | 59 | Example of installation of the igb_uio kernel module: 60 | 61 | .. code-block:: bash 62 | 63 | modprobe uio 64 | insmod ./igb_uio.ko 65 | 66 | Finally, the correct iommu options and huge pages to be configured on the Linux server on the boot command line: 67 | 68 | - enable intel_iommu and iommu pass through: "intel_iommu=on iommu=pt" 69 | - for Trex, pre-allocate 1024 huge pages of 2MB each (for a total of 2GB): "hugepagesz=2M hugepages=1024" 70 | 71 | More detailed instructions can be found in the DPDK documentation (https://buildmedia.readthedocs.org/media/pdf/dpdk/latest/dpdk.pdf). 72 | -------------------------------------------------------------------------------- /docs/user/images/extchain-config.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/extchain-config.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-all-sriov-pvvp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-all-sriov-pvvp.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-all-sriov-pvvp2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-all-sriov-pvvp2.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-ext-multi-vlans.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-ext-multi-vlans.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-ext-shared.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-ext-shared.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana-filter-kql.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana-filter-kql.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana-filter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana-filter.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana-gbps-line.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana-gbps-line.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana-pps-scatter.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana-pps-scatter.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana-pps-theoretical.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana-pps-theoretical.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana-zoom-selection.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana-zoom-selection.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-kibana.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-kibana.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-npvp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-npvp.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-pvp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-pvp.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-pvpl3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-pvpl3.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-pvvp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-pvvp.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-pvvp2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-pvvp2.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-sriov-pvp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-sriov-pvp.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-sriov-pvvp.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-sriov-pvvp.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-sriov-pvvp2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-sriov-pvvp2.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-trex-setup.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-trex-setup.png -------------------------------------------------------------------------------- /docs/user/images/nfvbench-xtesting.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/docs/user/images/nfvbench-xtesting.png -------------------------------------------------------------------------------- /docs/user/index.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | 6 | .. NFVBench documentation master file, created by 7 | sphinx-quickstart on Thu Sep 29 14:25:18 2016. 8 | 9 | 10 | ******************* 11 | NFVbench User Guide 12 | ******************* 13 | 14 | The NFVbench tool provides an automated way to measure the network performance for the most common data plane packet flows on any OpenStack system. 15 | It is designed to be easy to install and easy to use by non experts (no need to be an expert in traffic generators and data plane performance testing). 16 | 17 | 18 | Table of Contents 19 | 20 | .. toctree:: 21 | :maxdepth: 3 22 | 23 | readme 24 | installation 25 | examples 26 | advanced 27 | pvpl3 28 | xtesting 29 | mpls 30 | extchains 31 | fluentd 32 | kibana 33 | sriov 34 | server 35 | faq 36 | -------------------------------------------------------------------------------- /docs/user/installation.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | =================================== 6 | Installation and Quick Start Guides 7 | =================================== 8 | 9 | .. toctree:: 10 | :maxdepth: 2 11 | 12 | hw_requirements 13 | quickstart_docker 14 | 15 | -------------------------------------------------------------------------------- /docs/user/kibana.rst: -------------------------------------------------------------------------------- 1 | .. Copyright 2016 - 2023, Cisco Systems, Inc. and the NFVbench project contributors 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | 4 | NFVbench Kibana visualization: overview 5 | ======================================= 6 | 7 | The fluentd integration offers the possibility to use elasticsearch and kibana as a visualization chain. 8 | 9 | Chain overview: 10 | 11 | .. image:: images/nfvbench-kibana.png 12 | 13 | Example of NFVbench visualizations 14 | ---------------------------------- 15 | 16 | Kibana offers a lot of visualization type (line and bar charts, pie, time series chart, data table ...) and also provide a plugin to develop graph using Vega. 17 | In the below examples, visualizations are based on an NDR result and are developed using `Vega-lite `_. 18 | Data are aggregated using ``user_label`` and ``flow_count`` properties. 19 | 20 | In ``kibana/visualizations/`` pre-created graphs are available into json files. 21 | 22 | For NDR capacity in Gbps using line chart, the offered load in Gbps (``offered_tx_rate_bps``) is used and only the maximum value of the aggregation is kept. 23 | For NDR capacity in Mpps using line chart, the actual TX rate is used (``rate_pps``) and only the maximum value of the aggregation is kept. 24 | 25 | Scatter plot graphs use the same values but keep all values instead of keeping maximum. 26 | 27 | Example of a line chart: 28 | 29 | .. image:: images/nfvbench-kibana-gbps-line.png 30 | 31 | Example of a scatter plot chart: 32 | 33 | .. image:: images/nfvbench-kibana-pps-scatter.png 34 | 35 | Vega offers the possibility to add another graph as a new layer of current graph. 36 | This solution is used to combine NFVbench results and theoretical line rate. 37 | Using ``extra_encapsulation_bytes`` in --user-info property (see :ref:`user-info`), 38 | the theoretical max value (for bps and pps) will be calculated and can be used in graph through ``theoretical_tx_rate_bps`` and ``theoretical_tx_rate_pps`` properties. 39 | 40 | Example of chart with theoretical value (red line): 41 | 42 | .. image:: images/nfvbench-kibana-pps-theoretical.png 43 | 44 | Each Vega graph can be moved, zoomed (using mouse scroll) and one set of data can be selected. 45 | 46 | Example: 47 | 48 | .. image:: images/nfvbench-kibana-zoom-selection.png 49 | 50 | These visualizations are included into Kibana dashboard for a synthesis of one set of result (i.e. same ``user_label`` value) or for comparison (i.e. a selection of ``user_label`` values). 51 | See :ref:`filterkibana` for more details about ``user_label`` selection. 52 | 53 | All these visualizations and dashboards are saved into the ``export.ndjson`` file and can be imported in an existing Kibana. See :ref:`importkibana`. 54 | 55 | .. _importkibana: 56 | 57 | Import Kibana dashboards and visualization 58 | ------------------------------------------ 59 | 60 | To import Kibana dashboard and visualization: 61 | 62 | .. code-block:: bash 63 | 64 | curl -X POST localhost:5601/api/saved_objects/_import -H "kbn-xsrf: true" --form file=@export.ndjson 65 | 66 | .. note:: ``.kibana`` index must exists in elasticsearch. 67 | .. note:: ``.kibana`` index is created automatically after a first deployment and configuration of Kibana. 68 | 69 | .. _filterkibana: 70 | 71 | Kibana user guide: Filter dashboards and visualizations 72 | ======================================================= 73 | 74 | Filter Kibana dashboard or visualization using Kibana query language (KQL) 75 | -------------------------------------------------------------------------- 76 | 77 | .. code-block:: bash 78 | 79 | user_label:*demo* and (flow_count: 128 or flow_count:130000 or flow_count:1000000) 80 | 81 | .. note:: This query will filter all user label which contains ``demo`` in the value and filter 3 flow count (128, 130k, 1M). 82 | .. note:: ``flow_count`` is a number so KQL query can not contain formatted string. 83 | 84 | Example: 85 | 86 | .. image:: images/nfvbench-kibana-filter-kql.png 87 | 88 | 89 | Filter Kibana dashboard or visualization using Kibana filters 90 | ------------------------------------------------------------- 91 | 92 | Kibana offers the possibility to add filter by selecting field and operator (is, is not, is one of, is not one of, exists, does not exist). 93 | 94 | Example: 95 | 96 | .. image:: images/nfvbench-kibana-filter.png 97 | -------------------------------------------------------------------------------- /docs/user/mpls.rst: -------------------------------------------------------------------------------- 1 | .. Copyright 2016 - 2023, Cisco Systems, Inc. and the NFVbench project contributors 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | 4 | ========================== 5 | MPLS encapsulation feature 6 | ========================== 7 | 8 | This feature allows to generate packets with standard MPLS L2VPN double stack MPLS labels, where the outer label is transport and the inner label is VPN. 9 | The top layer of a packets encapsulated inside MPLS L2VPN seems to be an Ethernet layer with the rest of the IP stack inside. 10 | Please refer to RFC-3031 for more details. 11 | The whole MPLS packet structure looks like the following: 12 | 13 | ###[ Ethernet ]### 14 | dst = ['00:8a:96:bb:14:28'] 15 | src = 3c:fd:fe:a3:48:7c 16 | type = 0x8847 17 | ###[ MPLS ]### <-------------- Outer Label 18 | label = 16303 19 | cos = 1 20 | s = 0 21 | ttl = 255 22 | ###[ MPLS ]### <-------------- Inner Label 23 | label = 5010 24 | cos = 1 25 | s = 1 26 | ttl = 255 27 | ###[ Ethernet ]### 28 | dst = fa:16:3e:bd:02:b5 29 | src = 3c:fd:fe:a3:48:7c 30 | type = 0x800 31 | ###[ IP ]### 32 | version = 4 33 | ihl = None 34 | tos = 0x0 35 | len = None 36 | id = 1 37 | flags = 38 | frag = 0 39 | ttl = 64 40 | proto = udp 41 | chksum = None 42 | src = 16.0.0.1 43 | dst = 48.0.0.1 44 | \options \ 45 | ###[ UDP ]### 46 | sport = 53 47 | dport = 53 48 | len = None 49 | chksum = None 50 | 51 | Example: nfvbench generates mpls traffic port A ----> port B. This example assumes openstack is at the other end of the mpls tunnels. 52 | Packets generated and sent to port B are delivered to the MPLS domain infrastructure which will transport that packet to the other end 53 | of the MPLS transport tunnel using the outer label. At that point, the outer label is decapsulated and the inner label is used to 54 | select the destination openstack network. After decapsulation of the inner label, the resulting L2 frame is then forwarded to the 55 | destination VM corresponding to the destination MAC. When the VM receives the packet, it is sent back to far end port of the traffic 56 | generator (port B) using either L2 forwarding or L3 routing though the peer virtual interface. The return packet is then encapsulated 57 | with the inner label first then outer label to reach nfvbench on port B. 58 | 59 | Only 2 MPLS labels stack is supported. If more than two labels stack is required then these operations should be handled by MPLS transport 60 | domain where nfvbench is attached next-hop mpls router and rest of the mpls domain should be configured accordingly to be able 61 | pop/swap/push labels and deliver packet to the proper destination based on an initial transport label injected by nfvbench, VPN label 62 | should stay unchanged until its delivered to PE (compute node). 63 | Set nfvbench 'mpls' parameter to 'true' to enable MPLS encapsulation. 64 | When this option is enabled internal networks 'network type' parameter value should be 'mpls' 65 | MPLS and VxLAN encapsulations are mutual exclusive features if 'mpls' is 'true' then 'vxlan' should be set to 'false' and vise versa. 66 | no_flow_stats, no_latency_stats, no_latency_streams parameters should be set to 'true' because these features are not supported at the moment. 67 | In future when these features will be supported they will require special NIC hardware. 68 | 69 | Example of 1-chain MPLS configuration: 70 | internal_networks: 71 | left: 72 | network_type: mpls 73 | segmentation_id: 5010 74 | mpls_transport_labels: 16303 75 | physical_network: phys_sriov0 76 | right: 77 | network_type: mpls 78 | segmentation_id: 5011 79 | mpls_transport_labels: 16303 80 | physical_network: phys_sriov1 81 | 82 | Example of 2-chain MPLS configuration: 83 | internal_networks: 84 | left: 85 | network_type: mpls 86 | segmentation_id: [5010, 5020] 87 | mpls_transport_labels: [16303, 16304] 88 | physical_network: phys_sriov0 89 | right: 90 | network_type: mpls 91 | segmentation_id: [5011, 5021] 92 | mpls_transport_labels: [16303, 16304] 93 | physical_network: phys_sriov1 94 | 95 | Example of how to run: 96 | nfvbench --rate 50000pps --duration 30 --mpls 97 | -------------------------------------------------------------------------------- /docs/user/pvpl3.rst: -------------------------------------------------------------------------------- 1 | .. This work is licensed under a Creative Commons Attribution 4.0 International License. 2 | .. SPDX-License-Identifier: CC-BY-4.0 3 | .. (c) Cisco Systems, Inc 4 | 5 | 6 | PVP L3 Router Internal Chain 7 | ---------------------------- 8 | 9 | NFVbench can measure the performance of 1 L3 service chain that are setup by NFVbench (VMs, routers and networks). 10 | 11 | PVP L3 router chain is made of 1 VNF (in vpp mode) and has exactly 2 end network interfaces (left and right internal network interfaces) that are connected to 2 neutron routers with 2 edge networks (left and right edge networks). 12 | The PVP L3 router service chain can route L3 packets properly between the left and right networks. 13 | 14 | To run NFVbench on such PVP L3 router service chain: 15 | 16 | - explicitly tell NFVbench to use PVP service chain with L3 router option by adding ``-l3`` or ``--l3-router`` to NFVbench CLI options or ``l3_router: true`` in config 17 | - explicitly tell NFVbench to use VPP forwarder with ``vm_forwarder: vpp`` in config 18 | - specify the 2 end point networks (networks between NFVBench and neutron routers) of your environment in ``internal_networks`` inside the config file. 19 | - The two networks specified will be created if not existing in Neutron and will be used as the end point networks by NFVbench ('lyon' and 'bordeaux' in the diagram below) 20 | - specify the 2 edge networks (networks between neutron routers and loopback VM) of your environment in ``edge_networks`` inside the config file. 21 | - The two networks specified will be created if not existing in Neutron and will be used as the router gateway networks by NFVbench ('paris' and 'marseille' in the diagram below) 22 | - specify the router gateway IPs for the PVPL3 router service chain (1.2.0.1 and 2.2.0.1) 23 | - specify the traffic generator gateway IPs for the PVPL3 router service chain (1.2.0.254 and 2.2.0.254 in diagram below) 24 | - specify the packet source and destination IPs for the virtual devices that are simulated (10.0.0.0/8 and 20.0.0.0/8) 25 | 26 | 27 | .. image:: images/nfvbench-pvpl3.png 28 | 29 | nfvbench configuration file: 30 | 31 | .. code-block:: bash 32 | 33 | vm_forwarder: vpp 34 | 35 | traffic_generator: 36 | ip_addrs: ['10.0.0.0/8', '20.0.0.0/8'] 37 | tg_gateway_ip_addrs: ['1.2.0.254', '2.2.0.254'] 38 | gateway_ip_addrs: ['1.2.0.1', '2.2.0.1'] 39 | 40 | internal_networks: 41 | left: 42 | name: 'lyon' 43 | cidr: '1.2.0.0/24' 44 | gateway: '1.2.0.1' 45 | right: 46 | name: 'bordeaux' 47 | cidr: '2.2.0.0/24' 48 | gateway: '2.2.0.1' 49 | 50 | edge_networks: 51 | left: 52 | name: 'paris' 53 | cidr: '1.1.0.0/24' 54 | gateway: '1.1.0.1' 55 | right: 56 | name: 'marseille' 57 | cidr: '2.1.0.0/24' 58 | gateway: '2.1.0.1' 59 | 60 | Upon start, NFVbench will: 61 | - first retrieve the properties of the left and right networks using Neutron APIs, 62 | - extract the underlying network ID (typically VLAN segmentation ID), 63 | - generate packets with the proper VLAN ID and measure traffic. 64 | 65 | 66 | Please note: ``l3_router`` option is also compatible with external routers. In this case NFVBench will use ``EXT`` chain. 67 | 68 | .. note:: Using a long NFVbench run test, end-to-end connectivity can be lost depending on ARP stale time SUT configuration. 69 | To avoid this issue, activate Gratuitous ARP stream using ``--gratuitous-arp`` or ``-garp`` option. 70 | -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_gbps_line_chart.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: https: //vega.github.io/schema/vega-lite/v2.json 3 | title: "Capacity in Gbps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "offered_tx_rate_bps", "flow_count", "frame_size", "user_label"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | transform: [ 17 | { calculate: "datum._source['offered_tx_rate_bps'] / 1000000000" as: "offered_load_gbps"}, 18 | { calculate: "datum._source['flow_count']" as: "flow_count"}, 19 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "frame_size"}, 20 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 21 | { aggregate: [{op: "max", field: "offered_load_gbps", as: "max_offered_load_gbps"}], groupby: ["frame_size", "label","flow_count"]} 22 | ], 23 | center: true, 24 | hconcat: [ 25 | { 26 | width:10, 27 | selection: { 28 | legendSel: { 29 | type: "multi", 30 | encodings: [ "color", "shape" ], 31 | toggle: "event.shiftKey" 32 | } 33 | }, 34 | encoding: { 35 | y: { 36 | field: "label", 37 | type: "nominal", 38 | axis: { 39 | title: "", 40 | domain: false, 41 | ticks: false, 42 | offset: 10 43 | }, 44 | }, 45 | color: { 46 | condition: { 47 | selection: "legendSel", 48 | field: "label", 49 | type: "nominal", 50 | legend: null 51 | }, 52 | value: "lightgrey" 53 | } 54 | }, 55 | mark: { 56 | type: "square", 57 | size: 120, 58 | opacity: 1 59 | } 60 | }, 61 | { 62 | width:500, 63 | height:280, 64 | transform: [ 65 | {filter: { selection: "legendSel"}} 66 | ], 67 | mark: { 68 | type: "line", 69 | tooltip: true, 70 | point: true 71 | }, 72 | selection: { 73 | grid: { 74 | type: "interval", 75 | resolve: "global", 76 | bind: "scales", 77 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 78 | zoom: "wheel![!event.shiftKey]" 79 | }, 80 | pts: { 81 | type: "single", 82 | fields: ["label"] 83 | } 84 | }, 85 | encoding: { 86 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 87 | y: { field: "max_offered_load_gbps", type: "quantitative", title: "Offered load (Gbps)" , axis: {offset: 10} } 88 | color: {field: "label", "type": "nominal", 89 | "legend": null} 90 | shape: {field: "flow_count", type: "nominal"} 91 | tooltip: {"field": "max_offered_load_gbps", "type": "quantitative"} 92 | } 93 | } 94 | ] 95 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_gbps_scatter_plot.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: "https://vega.github.io/schema/vega-lite/v2.json" 3 | title: "Capacity in Gbps - 0.001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "offered_tx_rate_bps", "flow_count", "frame_size", "user_label"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | transform: [ 17 | { calculate: "datum._source['offered_tx_rate_bps'] / 1000000000" as: "Offered load (Gbps)"}, 18 | { calculate: "datum._source['flow_count']" as: "Flow count"}, 19 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "Frame size (bytes)"} 20 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 21 | ], 22 | center: true, 23 | hconcat: [ 24 | { 25 | width:10, 26 | selection: { 27 | legendSel: { 28 | type: "multi", 29 | encodings: [ "color", "shape" ], 30 | toggle: "event.shiftKey" 31 | } 32 | }, 33 | encoding: { 34 | y: { 35 | field: "label", 36 | type: "nominal", 37 | axis: { 38 | title: "", 39 | domain: false, 40 | ticks: false, 41 | offset: 10 42 | }, 43 | }, 44 | color: { 45 | condition: { 46 | selection: "legendSel", 47 | field: "label", 48 | type: "nominal", 49 | legend: null 50 | }, 51 | value: "lightgrey" 52 | } 53 | }, 54 | mark: { 55 | type: "square", 56 | size: 120, 57 | opacity: 1 58 | } 59 | }, 60 | { 61 | width:500, 62 | height:280, 63 | transform: [ 64 | {filter: { selection: "legendSel"}} 65 | 66 | ], 67 | mark: { 68 | "type": "point", 69 | "tooltip": true 70 | }, 71 | selection: { 72 | grid: { 73 | type: "interval", 74 | resolve: "global", 75 | bind: "scales", 76 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 77 | zoom: "wheel![!event.shiftKey]" 78 | }, 79 | pts: { 80 | type: "single", 81 | fields: ["label"] 82 | } 83 | }, 84 | encoding: { 85 | x: { 86 | field: "Frame size (bytes)", 87 | type: "quantitative" 88 | } 89 | y: { 90 | field: "Offered load (Gbps)", 91 | type: "quantitative" 92 | } 93 | color: { 94 | field: "label", "type": "nominal", 95 | "legend": null 96 | } 97 | shape: { 98 | field: "Flow count", 99 | type: "nominal" 100 | } 101 | tooltip: { 102 | "field": "Offered load (Gbps)", 103 | "type": "quantitative" 104 | } 105 | } 106 | } 107 | ] 108 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_gbps_theoretical_line_chart.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: https: //vega.github.io/schema/vega-lite/v2.json 3 | title: "Capacity in Gbps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "offered_tx_rate_bps", "theoretical_tx_rate_bps", "flow_count", "frame_size", "user_label"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | transform: [ 17 | { calculate: "datum._source['offered_tx_rate_bps'] / 1000000000" as: "offered_load_gbps"}, 18 | { calculate: "datum._source['theoretical_tx_rate_bps'] / 1000000000" as: "theoretical_tx_rate_bps"}, 19 | { calculate: "datum._source['flow_count']" as: "flow_count"}, 20 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "frame_size"}, 21 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 22 | { aggregate: [{op: "max", field: "offered_load_gbps", as: "max_offered_load_gbps"}, {op: "max", field: "theoretical_tx_rate_bps", as: "theoretical_tx_rate_bps"}], groupby: ["frame_size", "label","flow_count"]} 23 | ], 24 | center: true, 25 | hconcat: [ 26 | { 27 | width:10, 28 | selection: { 29 | legendSel: { 30 | type: "multi", 31 | encodings: [ "color", "shape" ], 32 | toggle: "event.shiftKey" 33 | } 34 | }, 35 | encoding: { 36 | y: { 37 | field: "label", 38 | type: "nominal", 39 | axis: { 40 | title: "", 41 | domain: false, 42 | ticks: false, 43 | offset: 10 44 | }, 45 | }, 46 | color: { 47 | condition: { 48 | selection: "legendSel", 49 | field: "label", 50 | type: "nominal", 51 | legend: null 52 | }, 53 | value: "lightgrey" 54 | } 55 | }, 56 | mark: { 57 | type: "square", 58 | size: 120, 59 | opacity: 1 60 | } 61 | }, 62 | { 63 | layer: [ 64 | { 65 | mark: { 66 | type: "line", 67 | tooltip: true, 68 | point: true 69 | }, 70 | encoding: { 71 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 72 | y: { field: "theoretical_tx_rate_bps", type: "quantitative", title: "Theoretical rate (Gbps)" , axis: {offset: 10} } 73 | strokeDash: {"field": "theoretical_tx_rate_bps", "type": "nominal"} 74 | tooltip: [{"field": "theoretical_tx_rate_bps", "type": "quantitative", title: "theoretical max capacity"}], 75 | color: { "value": "red"} 76 | } 77 | }, 78 | { 79 | width:500, 80 | height:280, 81 | transform: [ 82 | {filter: { selection: "legendSel"}} 83 | ], 84 | mark: { 85 | type: "line", 86 | tooltip: true, 87 | point: true 88 | }, 89 | selection: { 90 | grid: { 91 | type: "interval", 92 | resolve: "global", 93 | bind: "scales", 94 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 95 | zoom: "wheel![!event.shiftKey]" 96 | }, 97 | pts: { 98 | type: "single", 99 | fields: ["Label"] 100 | } 101 | }, 102 | encoding: { 103 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 104 | y: { field: "max_offered_load_gbps", type: "quantitative", title: "Offered load (Gbps)" , axis: {offset: 10} } 105 | color: {field: "label", "type": "nominal", 106 | "legend": null} 107 | shape: {field: "flow_count", type: "nominal"} 108 | tooltip: {"field": "max_offered_load_gbps", "type": "quantitative"} 109 | } 110 | } 111 | ] 112 | } 113 | ] 114 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_gbps_theoretical_scatter_plot.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: "https://vega.github.io/schema/vega-lite/v2.json" 3 | title: "Capacity in Gbps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "offered_tx_rate_bps", "theoretical_tx_rate_bps", "flow_count", "frame_size", "user_label"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | transform: [ 17 | { calculate: "datum._source['offered_tx_rate_bps'] / 1000000000" as: "offered_tx_rate_bps"}, 18 | { calculate: "datum._source['theoretical_tx_rate_bps'] / 1000000000" as: "theoretical_tx_rate_bps"}, 19 | { calculate: "datum._source['flow_count']" as: "flow_count"}, 20 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "frame_size"} 21 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 22 | ], 23 | center: true, 24 | hconcat: [ 25 | { 26 | width:10, 27 | selection: { 28 | legendSel: { 29 | type: "multi", 30 | encodings: [ "color", "shape" ], 31 | toggle: "event.shiftKey" 32 | } 33 | }, 34 | encoding: { 35 | y: { 36 | field: "label", 37 | type: "nominal", 38 | axis: { 39 | title: "", 40 | domain: false, 41 | ticks: false, 42 | offset: 10 43 | }, 44 | }, 45 | color: { 46 | condition: { 47 | selection: "legendSel", 48 | field: "label", 49 | type: "nominal", 50 | legend: null 51 | }, 52 | value: "lightgrey" 53 | } 54 | }, 55 | mark: { 56 | type: "square", 57 | size: 120, 58 | opacity: 1 59 | } 60 | }, 61 | { 62 | layer: [ 63 | { 64 | mark: { 65 | type: "line", 66 | tooltip: true 67 | }, 68 | encoding: { 69 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 70 | y: { field: "theoretical_tx_rate_bps", type: "quantitative", title: "Offered load (Gbps)" , axis: {offset: 10} } 71 | strokeDash: {"field": "theoretical_tx_rate_bps", "type": "nominal"} 72 | tooltip: [{"field": "theoretical_tx_rate_bps", "type": "quantitative", title: "theoretical max capacity"}], 73 | color: { "value": "red"} 74 | } 75 | }, 76 | { 77 | width:500, 78 | height:280, 79 | transform: [ 80 | {filter: { selection: "legendSel"}} 81 | ], 82 | mark: { 83 | "type": "point", 84 | "tooltip": true 85 | }, 86 | selection: { 87 | grid: { 88 | type: "interval", 89 | resolve: "global", 90 | bind: "scales", 91 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 92 | zoom: "wheel![!event.shiftKey]" 93 | }, 94 | pts: { 95 | type: "single", 96 | fields: ["label"] 97 | } 98 | }, 99 | encoding: { 100 | x: { 101 | field: "frame_size", 102 | title: "Frame size (bytes)", 103 | type: "quantitative" 104 | } 105 | y: { 106 | field: "offered_tx_rate_bps", 107 | title: "Offered load (Gbps)", 108 | type: "quantitative" 109 | } 110 | color: { 111 | field: "label", "type": "nominal", 112 | "legend": null 113 | } 114 | shape: { 115 | field: "flow_count", 116 | type: "nominal" 117 | } 118 | tooltip: { 119 | "field": "offered_tx_rate_bps", 120 | "title": "Offered load (Gbps)", 121 | "type": "quantitative" 122 | } 123 | } 124 | } 125 | ] 126 | } 127 | ] 128 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_pps_line_chart.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: https: //vega.github.io/schema/vega-lite/v2.json 3 | title: "Capacity in Mpps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "rate_pps", "flow_count", "frame_size", "user_label", "type"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | 17 | transform: [ 18 | { calculate: "datum._source['rate_pps'] / 1000000" as: "offered_load_pps"}, 19 | { calculate: "datum._source['flow_count']" as: "flow_count"}, 20 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "frame_size"}, 21 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 22 | { aggregate: [{op: "max", field: "offered_load_pps", as: "max_offered_load_pps"}], groupby: ["frame_size", "label","flow_count"]} 23 | 24 | ], 25 | center: true, 26 | hconcat: [ 27 | { 28 | width:10, 29 | selection: { 30 | legendSel: { 31 | type: "multi", 32 | encodings: [ "color", "shape" ], 33 | toggle: "event.shiftKey" 34 | } 35 | }, 36 | encoding: { 37 | y: { 38 | field: "label", 39 | type: "nominal", 40 | axis: { 41 | title: "", 42 | domain: false, 43 | ticks: false, 44 | offset: 10 45 | }, 46 | }, 47 | color: { 48 | condition: { 49 | selection: "legendSel", 50 | field: "label", 51 | type: "nominal", 52 | legend: null 53 | }, 54 | value: "lightgrey" 55 | } 56 | }, 57 | mark: { 58 | type: "square", 59 | size: 120, 60 | opacity: 1 61 | } 62 | }, 63 | { 64 | width:500, 65 | height:280, 66 | transform: [ 67 | {filter: { selection: "legendSel"}} 68 | 69 | ], 70 | mark: { 71 | type: "line", 72 | tooltip: true, 73 | point: true 74 | }, 75 | selection: { 76 | grid: { 77 | type: "interval", 78 | resolve: "global", 79 | bind: "scales", 80 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 81 | zoom: "wheel![!event.shiftKey]" 82 | }, 83 | pts: { 84 | type: "single", 85 | fields: ["Label"] 86 | } 87 | }, 88 | encoding: { 89 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 90 | y: { field: "max_offered_load_pps", type: "quantitative", title: "Offered load (Mpps)" , axis: {offset: 10} } 91 | color: {field: "label", "type": "nominal", 92 | "legend": null} 93 | tooltip: {"field": "max_offered_load_pps", "type": "quantitative"}, 94 | shape: {field: "flow_count", type: "nominal"} 95 | } 96 | } 97 | ] 98 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_pps_scatter_plot.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: "https://vega.github.io/schema/vega-lite/v2.json" 3 | title: "Capacity in Mpps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "rate_pps", "flow_count", "frame_size", "user_label"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | transform: [ 17 | { calculate: "datum._source['rate_pps'] / 1000000" as: "Offered load (Mpps)"}, 18 | { calculate: "datum._source['flow_count']" as: "Flow count"}, 19 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "Frame size (bytes)"} 20 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 21 | ], 22 | center: true, 23 | hconcat: [ 24 | { 25 | width:10, 26 | selection: { 27 | legendSel: { 28 | type: "multi", 29 | encodings: [ "color", "shape" ], 30 | toggle: "event.shiftKey" 31 | } 32 | }, 33 | encoding: { 34 | y: { 35 | field: "label", 36 | type: "nominal", 37 | axis: { 38 | title: "", 39 | domain: false, 40 | ticks: false, 41 | offset: 10 42 | }, 43 | }, 44 | color: { 45 | condition: { 46 | selection: "legendSel", 47 | field: "label", 48 | type: "nominal", 49 | legend: null 50 | }, 51 | value: "lightgrey" 52 | } 53 | }, 54 | mark: { 55 | type: "square", 56 | size: 120, 57 | opacity: 1 58 | } 59 | }, 60 | { 61 | width:500, 62 | height:280, 63 | transform: [ 64 | {filter: { selection: "legendSel"}} 65 | 66 | ], 67 | mark: { 68 | "type": "point", 69 | "tooltip": true 70 | }, 71 | selection: { 72 | grid: { 73 | type: "interval", 74 | resolve: "global", 75 | bind: "scales", 76 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 77 | zoom: "wheel![!event.shiftKey]" 78 | }, 79 | pts: { 80 | type: "single", 81 | fields: ["label"] 82 | } 83 | }, 84 | encoding: { 85 | x: { 86 | field: "Frame size (bytes)", 87 | type: "quantitative" 88 | } 89 | y: { 90 | field: "Offered load (Mpps)", 91 | type: "quantitative" 92 | } 93 | color: { 94 | field: "label", "type": "nominal", 95 | "legend": null 96 | } 97 | shape: { 98 | field: "Flow count", 99 | type: "nominal" 100 | } 101 | tooltip: { 102 | "field": "Offered load (Mpps)", 103 | "type": "quantitative" 104 | } 105 | } 106 | } 107 | ] 108 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_pps_theoretical_line_chart.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: https: //vega.github.io/schema/vega-lite/v2.json 3 | title: "Capacity in Mpps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "theoretical_tx_rate_pps", "rate_pps", "flow_count", "frame_size", "user_label", "type"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | 17 | transform: [ 18 | { calculate: "datum._source['rate_pps'] / 1000000" as: "offered_load_pps"}, 19 | { calculate: "datum._source['theoretical_tx_rate_pps'] / 1000000" as: "theoretical_tx_rate_pps"}, 20 | { calculate: "datum._source['flow_count']" as: "flow_count"}, 21 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "frame_size"}, 22 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 23 | { aggregate: [{op: "max", field: "offered_load_pps", as: "max_offered_load_pps"}, {op: "max", field: "theoretical_tx_rate_pps", as: "theoretical_tx_rate_pps"}], groupby: ["frame_size", "label","flow_count"]} 24 | 25 | ], 26 | center: true, 27 | hconcat: [ 28 | { 29 | width:10, 30 | selection: { 31 | legendSel: { 32 | type: "multi", 33 | encodings: [ "color", "shape" ], 34 | toggle: "event.shiftKey" 35 | } 36 | }, 37 | encoding: { 38 | y: { 39 | field: "label", 40 | type: "nominal", 41 | axis: { 42 | title: "", 43 | domain: false, 44 | ticks: false, 45 | offset: 10 46 | }, 47 | }, 48 | color: { 49 | condition: { 50 | selection: "legendSel", 51 | field: "label", 52 | type: "nominal", 53 | legend: null 54 | }, 55 | value: "lightgrey" 56 | } 57 | }, 58 | mark: { 59 | type: "square", 60 | size: 120, 61 | opacity: 1 62 | } 63 | }, 64 | { 65 | layer: [{ 66 | mark: { 67 | type: "line", 68 | tooltip: true, 69 | point: true 70 | }, 71 | encoding: { 72 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 73 | y: { field: "theoretical_tx_rate_pps", type: "quantitative", title: "Offered load (Mpps)" , axis: {offset: 10} } 74 | strokeDash: {"field": "theoretical_tx_rate_pps", "type": "nominal"} 75 | tooltip: [{"field": "theoretical_tx_rate_pps", "type": "quantitative", title: "theoretical max capacity"}], 76 | color: { "value": "red"} 77 | } 78 | },{ 79 | width:500, 80 | height:280, 81 | transform: [ 82 | {filter: { selection: "legendSel"}} 83 | 84 | ],mark: { 85 | type: "line", 86 | tooltip: true, 87 | point: true 88 | }, 89 | selection: { 90 | grid: { 91 | type: "interval", 92 | resolve: "global", 93 | bind: "scales", 94 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 95 | zoom: "wheel![!event.shiftKey]" 96 | }, 97 | pts: { 98 | type: "single", 99 | fields: ["Label"] 100 | } 101 | }, 102 | encoding: { 103 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 104 | y: { field: "max_offered_load_pps", type: "quantitative", title: "Offered load (Mpps)" , axis: {offset: 10} } 105 | color: {field: "label", "type": "nominal", 106 | "legend": null} 107 | tooltip: {"field": "max_offered_load_pps", "type": "quantitative"}, 108 | shape: {field: "flow_count", type: "nominal"} 109 | } 110 | } 111 | ] 112 | } 113 | 114 | ] 115 | } -------------------------------------------------------------------------------- /kibana/visualizations/ndr_capacity_pps_theoretical_scatter_plot.json: -------------------------------------------------------------------------------- 1 | { 2 | $schema: "https://vega.github.io/schema/vega-lite/v2.json" 3 | title: "Capacity in Mpps - 0,001% loss ratio" 4 | data: { 5 | url: { 6 | %context%: true 7 | %timefield%: "@timestamp" 8 | index: resu* 9 | body: { 10 | _source: ["@timestamp", "_source", "rate_pps", "theoretical_tx_rate_pps", "flow_count", "frame_size", "user_label"] 11 | }, 12 | size: 10000 13 | }, 14 | format: { property: "hits.hits" } 15 | }, 16 | transform: [ 17 | { calculate: "datum._source['rate_pps'] / 1000000" as: "rate_pps"}, 18 | { calculate: "datum._source['theoretical_tx_rate_pps'] / 1000000" as: "theoretical_tx_rate_pps"}, 19 | { calculate: "datum._source['flow_count']" as: "flow_count"}, 20 | { calculate: "isNaN(toNumber(datum._source['frame_size'])) ? 362 : toNumber(datum._source['frame_size'])" as: "frame_size"} 21 | { calculate: "datum._source['user_label'] + '-' + datum._source['flow_count']" as: "label"} 22 | ], 23 | center: true, 24 | hconcat: [ 25 | { 26 | width:10, 27 | selection: { 28 | legendSel: { 29 | type: "multi", 30 | encodings: [ "color", "shape" ], 31 | toggle: "event.shiftKey" 32 | } 33 | }, 34 | encoding: { 35 | y: { 36 | field: "label", 37 | type: "nominal", 38 | axis: { 39 | title: "", 40 | domain: false, 41 | ticks: false, 42 | offset: 10 43 | }, 44 | }, 45 | color: { 46 | condition: { 47 | selection: "legendSel", 48 | field: "label", 49 | type: "nominal", 50 | legend: null 51 | }, 52 | value: "lightgrey" 53 | } 54 | }, 55 | mark: { 56 | type: "square", 57 | size: 120, 58 | opacity: 1 59 | } 60 | }, 61 | { 62 | layer: [ 63 | { 64 | mark: { 65 | type: "line", 66 | tooltip: true 67 | }, 68 | encoding: { 69 | x: { field: "frame_size", type: "quantitative", title: "Frame size (bytes)", axis: {offset: 10} } 70 | y: { field: "theoretical_tx_rate_pps", type: "quantitative", title: "Offered load (Mpps)" , axis: {offset: 10} } 71 | strokeDash: {"field": "theoretical_tx_rate_pps", "type": "nominal"} 72 | tooltip: [{"field": "theoretical_tx_rate_pps", "type": "quantitative", title: "theoretical max capacity"}], 73 | color: { "value": "red"} 74 | } 75 | }, 76 | { 77 | width:500, 78 | height:280, 79 | transform: [ 80 | {filter: { selection: "legendSel"}} 81 | ], 82 | mark: { 83 | "type": "point", 84 | "tooltip": true 85 | }, 86 | selection: { 87 | grid: { 88 | type: "interval", 89 | resolve: "global", 90 | bind: "scales", 91 | translate: "[mousedown[!event.shiftKey], window:mouseup] > window:mousemove!", 92 | zoom: "wheel![!event.shiftKey]" 93 | }, 94 | pts: { 95 | type: "single", 96 | fields: ["label"] 97 | } 98 | }, 99 | encoding: { 100 | x: { 101 | field: "frame_size", 102 | title: "Frame size (bytes)", 103 | type: "quantitative" 104 | } 105 | y: { 106 | field: "rate_pps", 107 | title: "Offered load (Mpps)", 108 | type: "quantitative" 109 | } 110 | color: { 111 | field: "label", "type": "nominal", 112 | "legend": null 113 | } 114 | shape: { 115 | field: "flow_count", 116 | type: "nominal" 117 | } 118 | tooltip: { 119 | "field": "rate_pps", 120 | "title": "Offered load (Mpps)", 121 | "type": "quantitative" 122 | } 123 | } 124 | } 125 | ] 126 | } 127 | ] 128 | } -------------------------------------------------------------------------------- /nfvbench/__init__.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | 16 | import pbr.version 17 | 18 | __version__ = pbr.version.VersionInfo('nfvbench').version_string_with_vcs() 19 | -------------------------------------------------------------------------------- /nfvbench/chain_workers.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | 18 | class BasicWorker(object): 19 | 20 | def __init__(self, stats_manager): 21 | self.stats_manager = stats_manager 22 | self.chain_manager = stats_manager.chain_runner.chain_manager 23 | self.config = stats_manager.config 24 | self.specs = stats_manager.specs 25 | 26 | def get_compute_nodes_bios(self): 27 | return {} 28 | 29 | def get_version(self): 30 | return {} 31 | 32 | def config_interfaces(self): 33 | return {} 34 | 35 | def close(self): 36 | pass 37 | 38 | def insert_interface_stats(self, pps_list): 39 | """Insert interface stats to a list of packet path stats. 40 | 41 | pps_list: a list of packet path stats instances indexed by chain index 42 | 43 | Specialized workers can insert their own interface stats inside each existing packet path 44 | stats for every chain. 45 | """ 46 | 47 | def update_interface_stats(self, diff=False): 48 | """Update all interface stats. 49 | 50 | diff: if False, simply refresh the interface stats values with latest values 51 | if True, diff the interface stats with the latest values 52 | Make sure that the interface stats inserted in insert_interface_stats() are updated 53 | with proper values 54 | """ 55 | -------------------------------------------------------------------------------- /nfvbench/config.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | 16 | from attrdict import AttrDict 17 | import yaml 18 | 19 | from .log import LOG 20 | 21 | def config_load(file_name, from_cfg=None, whitelist_keys=None): 22 | """Load a yaml file into a config dict, merge with from_cfg if not None 23 | The config file content taking precedence in case of duplicate 24 | """ 25 | try: 26 | with open(file_name, encoding="utf-8") as fileobj: 27 | cfg = AttrDict(yaml.safe_load(fileobj)) 28 | except IOError: 29 | raise Exception("Configuration file at '{}' was not found. Please use correct path " 30 | "and verify it is visible to container if you run nfvbench in container." 31 | .format(file_name)) from IOError 32 | 33 | if from_cfg: 34 | if not whitelist_keys: 35 | whitelist_keys = [] 36 | _validate_config(cfg, from_cfg, whitelist_keys) 37 | cfg = from_cfg + cfg 38 | 39 | return cfg 40 | 41 | 42 | def config_loads(cfg_text, from_cfg=None, whitelist_keys=None): 43 | """Same as config_load but load from a string 44 | """ 45 | try: 46 | cfg = AttrDict(yaml.safe_load(cfg_text)) 47 | except TypeError: 48 | # empty string 49 | cfg = AttrDict() 50 | except ValueError as e: 51 | # In case of wrong path or file not readable or string not well formatted 52 | LOG.error("String %s is not well formatted. Please verify your yaml/json string. " 53 | "If string is a file path, file was not found. Please use correct path and " 54 | "verify it is visible to container if you run nfvbench in container.", cfg_text) 55 | raise Exception(e) from e 56 | if from_cfg: 57 | if not whitelist_keys: 58 | whitelist_keys = [] 59 | _validate_config(cfg, from_cfg, whitelist_keys) 60 | return from_cfg + cfg 61 | return cfg 62 | 63 | 64 | def _validate_config(subset, superset, whitelist_keys): 65 | def get_err_config(subset, superset): 66 | result = {} 67 | for k, v in list(subset.items()): 68 | if k not in whitelist_keys: 69 | if k not in superset: 70 | result.update({k: v}) 71 | elif v is not None and superset[k] is not None: 72 | if not isinstance(v, type(superset[k])): 73 | result.update({k: v}) 74 | continue 75 | if isinstance(v, dict): 76 | res = get_err_config(v, superset[k]) 77 | if res: 78 | result.update({k: res}) 79 | if not result: 80 | return None 81 | return result 82 | 83 | err_cfg = get_err_config(subset, superset) 84 | if err_cfg: 85 | err_msg = 'The provided configuration has unknown options or values with invalid type: '\ 86 | + str(err_cfg) 87 | LOG.error(err_msg) 88 | raise Exception(err_msg) 89 | -------------------------------------------------------------------------------- /nfvbench/config_plugin.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | """Configuration Plugin. 17 | 18 | This module is used to override the configuration with platform specific constraints and extensions 19 | """ 20 | import abc 21 | from . import specs 22 | 23 | 24 | class ConfigPluginBase(object, metaclass=abc.ABCMeta): 25 | """Base class for config plugins.""" 26 | 27 | class InitializationFailure(Exception): 28 | """Used in case of any init failure.""" 29 | 30 | def __init__(self, config): 31 | """Save configuration.""" 32 | if not config: 33 | raise ConfigPluginBase.InitializationFailure( 34 | 'Initialization parameters need to be assigned.') 35 | self.config = config 36 | 37 | @abc.abstractmethod 38 | def get_config(self): 39 | """Return updated default configuration file.""" 40 | 41 | def set_config(self, config): 42 | """Set a new configuration. 43 | 44 | This method is called when the config has changed after this instance was initialized. 45 | This is needed in the frequent case where the main config is changed in a copy and to 46 | prevent this instance to keep pointing to the old copy of the config 47 | """ 48 | self.config = config 49 | 50 | @abc.abstractmethod 51 | def get_openstack_spec(self): 52 | """Return OpenStack specs for host.""" 53 | 54 | @abc.abstractmethod 55 | def get_run_spec(self, config, openstack_spec): 56 | """Return RunSpec for given platform.""" 57 | 58 | @abc.abstractmethod 59 | def validate_config(self, cfg, openstack_spec): 60 | """Validate config file.""" 61 | 62 | @abc.abstractmethod 63 | def prepare_results_config(self, cfg): 64 | """Insert any plugin specific information to the results. 65 | 66 | This function is called before running configuration is copied. 67 | Example usage is to remove sensitive information like switch credentials. 68 | """ 69 | 70 | @abc.abstractmethod 71 | def get_version(self): 72 | """Return platform version.""" 73 | 74 | 75 | class ConfigPlugin(ConfigPluginBase): 76 | """No-op config plugin class. Does not change anything.""" 77 | 78 | def __init__(self, config): 79 | """Invoke the base class constructor.""" 80 | ConfigPluginBase.__init__(self, config) 81 | 82 | def get_config(self): 83 | """Public interface for updating config file. Just returns given config.""" 84 | return self.config 85 | 86 | def get_openstack_spec(self): 87 | """Return OpenStack specs for host.""" 88 | return specs.OpenStackSpec() 89 | 90 | def get_run_spec(self, config, openstack_spec): 91 | """Return RunSpec for given platform.""" 92 | return specs.RunSpec(config.no_vswitch_access, openstack_spec) 93 | 94 | def validate_config(self, cfg, openstack_spec): 95 | """Nothing to validate by default.""" 96 | 97 | def prepare_results_config(self, cfg): 98 | """Nothing to add the results by default.""" 99 | return cfg 100 | 101 | def get_version(self): 102 | """Return an empty version.""" 103 | return {} 104 | -------------------------------------------------------------------------------- /nfvbench/factory.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | """Factory for creating worker and config plugin instances.""" 17 | 18 | from . import chain_workers as workers 19 | from .config_plugin import ConfigPlugin 20 | 21 | 22 | class BasicFactory(object): 23 | """Basic factory class to be overridden for advanced customization.""" 24 | 25 | def get_chain_worker(self, encaps, service_chain): 26 | """Get a chain worker based on encaps and service chain type.""" 27 | return workers.BasicWorker 28 | 29 | def get_config_plugin_class(self): 30 | """Get a config plugin.""" 31 | return ConfigPlugin 32 | -------------------------------------------------------------------------------- /nfvbench/fluentd.py: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import logging 16 | 17 | from datetime import datetime 18 | from fluent import sender 19 | import pytz 20 | 21 | 22 | class FluentLogHandler(logging.Handler): 23 | '''This is a minimalist log handler for use with Fluentd 24 | 25 | Needs to be attached to a logger using the addHandler method. 26 | It only picks up from every record: 27 | - the formatted message (no timestamp and no level) 28 | - the level name 29 | - the runlogdate (to tie multiple run-related logs together) 30 | The timestamp is retrieved by the fluentd library. 31 | There will be only one instance of FluentLogHandler running. 32 | ''' 33 | 34 | def __init__(self, fluentd_configs): 35 | logging.Handler.__init__(self) 36 | self.log_senders = [] 37 | self.result_senders = [] 38 | self.runlogdate = "1970-01-01T00:00:00.000000+0000" 39 | self.formatter = logging.Formatter('%(message)s') 40 | for fluentd_config in fluentd_configs: 41 | if fluentd_config.logging_tag: 42 | self.log_senders.append( 43 | sender.FluentSender(fluentd_config.logging_tag, host=fluentd_config.ip, 44 | port=fluentd_config.port)) 45 | if fluentd_config.result_tag: 46 | self.result_senders.append( 47 | sender.FluentSender(fluentd_config.result_tag, host=fluentd_config.ip, 48 | port=fluentd_config.port)) 49 | self.__warning_counter = 0 50 | self.__error_counter = 0 51 | 52 | def start_new_run(self): 53 | '''Delimitate a new run in the stream of records with a new timestamp 54 | ''' 55 | # reset counters 56 | self.__warning_counter = 0 57 | self.__error_counter = 0 58 | self.runlogdate = self.__get_timestamp() 59 | # send start record 60 | self.__send_start_record() 61 | 62 | def emit(self, record): 63 | data = { 64 | "loglevel": record.levelname, 65 | "message": self.formatter.format(record), 66 | "@timestamp": self.__get_timestamp() 67 | } 68 | # if runlogdate is Jan 1st 1970, it's a log from server (not an nfvbench run) 69 | # so do not send runlogdate 70 | if self.runlogdate != "1970-01-01T00:00:00.000000+0000": 71 | data["runlogdate"] = self.runlogdate 72 | 73 | self.__update_stats(record.levelno) 74 | for log_sender in self.log_senders: 75 | log_sender.emit(None, data) 76 | 77 | # this function is called by summarizer, and used for sending results 78 | def record_send(self, record): 79 | for result_sender in self.result_senders: 80 | result_sender.emit(None, record) 81 | 82 | # send START log record for each run 83 | def __send_start_record(self): 84 | data = { 85 | "runlogdate": self.runlogdate, 86 | "loglevel": "START", 87 | "message": "NFVBENCH run is started", 88 | "numloglevel": 0, 89 | "numerrors": 0, 90 | "numwarnings": 0, 91 | "@timestamp": self.__get_timestamp() 92 | } 93 | for log_sender in self.log_senders: 94 | log_sender.emit(None, data) 95 | 96 | # send stats related to the current run and reset state for a new run 97 | def send_run_summary(self, run_summary_required): 98 | if run_summary_required or self.__get_highest_level() == logging.ERROR: 99 | data = { 100 | "loglevel": "RUN_SUMMARY", 101 | "message": self.__get_highest_level_desc(), 102 | "numloglevel": self.__get_highest_level(), 103 | "numerrors": self.__error_counter, 104 | "numwarnings": self.__warning_counter, 105 | "@timestamp": self.__get_timestamp() 106 | } 107 | # if runlogdate is Jan 1st 1970, it's a log from server (not an nfvbench run) 108 | # so don't send runlogdate 109 | if self.runlogdate != "1970-01-01T00:00:00.000000+0000": 110 | data["runlogdate"] = self.runlogdate 111 | for log_sender in self.log_senders: 112 | log_sender.emit(None, data) 113 | 114 | def __get_highest_level(self): 115 | if self.__error_counter > 0: 116 | return logging.ERROR 117 | if self.__warning_counter > 0: 118 | return logging.WARNING 119 | return logging.INFO 120 | 121 | def __get_highest_level_desc(self): 122 | highest_level = self.__get_highest_level() 123 | if highest_level == logging.INFO: 124 | return "GOOD RUN" 125 | if highest_level == logging.WARNING: 126 | return "RUN WITH WARNINGS" 127 | return "RUN WITH ERRORS" 128 | 129 | def __update_stats(self, levelno): 130 | if levelno == logging.WARNING: 131 | self.__warning_counter += 1 132 | elif levelno == logging.ERROR: 133 | self.__error_counter += 1 134 | 135 | def __get_timestamp(self): 136 | return datetime.utcnow().replace(tzinfo=pytz.utc).strftime( 137 | "%Y-%m-%dT%H:%M:%S.%f%z") 138 | -------------------------------------------------------------------------------- /nfvbench/log.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import logging 16 | 17 | _product_name = 'nfvbench' 18 | 19 | def setup(mute_stdout=False): 20 | # logging.basicConfig() 21 | if mute_stdout: 22 | handler = logging.NullHandler() 23 | else: 24 | formatter_str = '%(asctime)s %(levelname)s %(message)s' 25 | handler = logging.StreamHandler() 26 | handler.setFormatter(logging.Formatter(formatter_str)) 27 | 28 | # Add handler to logger 29 | logger = logging.getLogger(_product_name) 30 | logger.addHandler(handler) 31 | # disable unnecessary information capture 32 | logging.logThreads = 0 33 | logging.logProcesses = 0 34 | # to make sure each log record does not have a source file name attached 35 | # pylint: disable=protected-access 36 | logging._srcfile = None 37 | # pylint: enable=protected-access 38 | 39 | def add_file_logger(logfile): 40 | if logfile: 41 | file_formatter_str = '%(asctime)s %(levelname)s %(message)s' 42 | file_handler = logging.FileHandler(logfile, mode='w') 43 | file_handler.setFormatter(logging.Formatter(file_formatter_str)) 44 | logger = logging.getLogger(_product_name) 45 | logger.addHandler(file_handler) 46 | 47 | def set_level(debug=False): 48 | log_level = logging.DEBUG if debug else logging.INFO 49 | logger = logging.getLogger(_product_name) 50 | logger.setLevel(log_level) 51 | 52 | def getLogger(): 53 | logger = logging.getLogger(_product_name) 54 | return logger 55 | 56 | LOG = getLogger() 57 | -------------------------------------------------------------------------------- /nfvbench/nfvbenchvm/nfvbenchvm.conf: -------------------------------------------------------------------------------- 1 | FORWARDER={forwarder} 2 | INTF_MAC1={intf_mac1} 3 | INTF_MAC2={intf_mac2} 4 | TG_MAC1={tg_mac1} 5 | TG_MAC2={tg_mac2} 6 | VNF_GATEWAY1_CIDR={vnf_gateway1_cidr} 7 | VNF_GATEWAY2_CIDR={vnf_gateway2_cidr} 8 | TG_NET1={tg_net1} 9 | TG_NET2={tg_net2} 10 | TG_GATEWAY1_IP={tg_gateway1_ip} 11 | TG_GATEWAY2_IP={tg_gateway2_ip} 12 | VIF_MQ_SIZE={vif_mq_size} 13 | NUM_MBUFS={num_mbufs} 14 | INTF_MGMT_CIDR={intf_mgmt_cidr} 15 | INTF_MGMT_IP_GW={intf_mgmt_ip_gw} 16 | INTF_MAC_MGMT={intf_mac_mgmt} -------------------------------------------------------------------------------- /nfvbench/specs.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | # 15 | 16 | 17 | class Encaps(object): 18 | VLAN = "VLAN" 19 | VxLAN = "VxLAN" 20 | MPLS = "MPLS" 21 | NO_ENCAPS = "NONE" 22 | 23 | encaps_mapping = { 24 | 'VLAN': VLAN, 25 | 'VXLAN': VxLAN, 26 | 'MPLS': MPLS, 27 | 'NONE': NO_ENCAPS 28 | } 29 | 30 | @classmethod 31 | def get(cls, network_type): 32 | return cls.encaps_mapping.get(network_type.upper(), None) 33 | 34 | 35 | class ChainType(object): 36 | PVP = "PVP" 37 | PVVP = "PVVP" 38 | EXT = "EXT" 39 | names = [EXT, PVP, PVVP] 40 | 41 | 42 | class OpenStackSpec(object): 43 | def __init__(self): 44 | self.__vswitch = "BASIC" 45 | self.__encaps = Encaps.NO_ENCAPS 46 | 47 | @property 48 | def vswitch(self): 49 | return self.__vswitch 50 | 51 | @vswitch.setter 52 | def vswitch(self, vsw): 53 | if vsw is None: 54 | raise Exception('Trying to set vSwitch as None.') 55 | 56 | self.__vswitch = vsw.upper() 57 | 58 | @property 59 | def encaps(self): 60 | return self.__encaps 61 | 62 | @encaps.setter 63 | def encaps(self, enc): 64 | if enc is None: 65 | raise Exception('Trying to set Encaps as None.') 66 | 67 | self.__encaps = enc 68 | 69 | 70 | class RunSpec(object): 71 | def __init__(self, no_vswitch_access, openstack_spec): 72 | self.use_vswitch = (not no_vswitch_access) and openstack_spec \ 73 | and openstack_spec.vswitch != "BASIC" 74 | 75 | 76 | class Specs(object): 77 | def __init__(self): 78 | self.openstack = None 79 | self.run_spec = None 80 | 81 | def set_openstack_spec(self, openstack_spec): 82 | self.openstack = openstack_spec 83 | 84 | def set_run_spec(self, run_spec): 85 | self.run_spec = run_spec 86 | -------------------------------------------------------------------------------- /nfvbench/stats_collector.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | 16 | import time 17 | 18 | 19 | class StatsCollector(object): 20 | """Base class for all stats collector classes.""" 21 | 22 | def __init__(self, start_time): 23 | self.start_time = start_time 24 | self.stats = [] 25 | 26 | def get(self): 27 | return self.stats 28 | 29 | def peek(self): 30 | return self.stats[-1] 31 | 32 | @staticmethod 33 | def _get_drop_percentage(drop_pkts, total_pkts): 34 | return float(drop_pkts * 100) / total_pkts 35 | 36 | @staticmethod 37 | def _get_rx_pps(tx_pps, drop_percentage): 38 | return (tx_pps * (100 - drop_percentage)) / 100 39 | 40 | def _get_current_time_diff(self): 41 | return int((time.time() - self.start_time) * 1000) 42 | 43 | 44 | class IntervalCollector(StatsCollector): 45 | """Collects stats while traffic is running. Frequency is specified by 'interval_sec' setting.""" 46 | 47 | last_tx_pkts = 0 48 | last_rx_pkts = 0 49 | last_time = 0 50 | 51 | def __init__(self, start_time): 52 | StatsCollector.__init__(self, start_time) 53 | self.notifier = None 54 | 55 | def attach_notifier(self, notifier): 56 | self.notifier = notifier 57 | 58 | def add(self, stats): 59 | pass 60 | 61 | def reset(self): 62 | # don't reset time! 63 | self.last_rx_pkts = 0 64 | self.last_tx_pkts = 0 65 | 66 | def add_ndr_pdr(self, tag, stats): 67 | pass 68 | 69 | 70 | class IterationCollector(StatsCollector): 71 | """Collects stats after traffic is stopped. Frequency is specified by 'duration_sec' setting.""" 72 | 73 | def __init__(self, start_time): 74 | StatsCollector.__init__(self, start_time) 75 | 76 | def add(self, stats, tx_pps): 77 | drop_percentage = self._get_drop_percentage(stats['overall']['rx']['dropped_pkts'], 78 | stats['overall']['tx']['total_pkts']) 79 | 80 | record = { 81 | 'total_tx_pps': int(stats['total_tx_rate']), 82 | 'tx_pps': tx_pps, 83 | 'tx_pkts': stats['overall']['tx']['total_pkts'], 84 | 'rx_pps': self._get_rx_pps(tx_pps, drop_percentage), 85 | 'rx_pkts': stats['overall']['rx']['total_pkts'], 86 | 'drop_pct': stats['overall']['rx']['dropped_pkts'], 87 | 'drop_percentage': drop_percentage, 88 | 'time_ms': int(time.time() * 1000) 89 | } 90 | 91 | if 'warning' in stats: 92 | record['warning'] = stats['warning'] 93 | 94 | self.stats.append(record) 95 | 96 | def add_ndr_pdr(self, tag, rate): 97 | last_stats = self.peek() 98 | last_stats['{}_pps'.format(tag)] = rate 99 | -------------------------------------------------------------------------------- /nfvbench/stats_manager.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | import time 17 | 18 | from .log import LOG 19 | from .packet_stats import PacketPathStatsManager 20 | from .stats_collector import IntervalCollector 21 | 22 | 23 | class StatsManager(object): 24 | """A class to collect detailed stats and handle fixed rate runs for all chain types.""" 25 | 26 | def __init__(self, chain_runner): 27 | self.chain_runner = chain_runner 28 | self.config = chain_runner.config 29 | self.traffic_client = chain_runner.traffic_client 30 | self.specs = chain_runner.specs 31 | self.notifier = chain_runner.notifier 32 | self.interval_collector = None 33 | self.factory = chain_runner.factory 34 | # create a packet path stats manager for fixed rate runs only 35 | if self.config.single_run: 36 | pps_list = [] 37 | self.traffic_client.insert_interface_stats(pps_list) 38 | self.pps_mgr = PacketPathStatsManager(self.config, pps_list) 39 | else: 40 | self.pps_mgr = None 41 | self.worker = None 42 | 43 | def create_worker(self): 44 | """Create a worker to fetch custom data. 45 | 46 | This is done late as we need to know the dest MAC for all VNFs, which can happen 47 | as late as after ARP discovery. 48 | """ 49 | if not self.worker and self.specs.openstack: 50 | WORKER_CLASS = self.factory.get_chain_worker(self.specs.openstack.encaps, 51 | self.config.service_chain) 52 | self.worker = WORKER_CLASS(self) 53 | 54 | def _generate_traffic(self): 55 | if self.config.no_traffic: 56 | return {} 57 | 58 | self.interval_collector = IntervalCollector(time.time()) 59 | self.interval_collector.attach_notifier(self.notifier) 60 | LOG.info('Starting to generate traffic...') 61 | stats = {} 62 | for stats in self.traffic_client.run_traffic(): 63 | self.interval_collector.add(stats) 64 | 65 | LOG.info('...traffic generating ended.') 66 | return stats 67 | 68 | def get_stats(self): 69 | return self.interval_collector.get() if self.interval_collector else [] 70 | 71 | def get_version(self): 72 | return self.worker.get_version() if self.worker else {} 73 | 74 | def _update_interface_stats(self, diff=False): 75 | """Update interface stats for both the traffic generator and the worker.""" 76 | self.traffic_client.update_interface_stats(diff) 77 | if self.worker: 78 | self.worker.update_interface_stats(diff) 79 | 80 | def run_fixed_rate(self): 81 | """Run a fixed rate and analyze results.""" 82 | # Baseline the packet path stats 83 | self._update_interface_stats() 84 | 85 | in_flight_stats = self._generate_traffic() 86 | result = { 87 | 'stats': in_flight_stats 88 | } 89 | # New analysis code with packet path stats 90 | # Diff all interface stats and return packet path stats analysis 91 | # Diff the packet path stats 92 | self._update_interface_stats(diff=True) 93 | result['packet_path_stats'] = self.pps_mgr.get_results() 94 | return result 95 | 96 | def get_compute_nodes_bios(self): 97 | return self.worker.get_compute_nodes_bios() if self.worker else {} 98 | 99 | def close(self): 100 | if self.worker: 101 | self.worker.close() 102 | -------------------------------------------------------------------------------- /nfvbench/traffic_gen/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/nfvbench/traffic_gen/__init__.py -------------------------------------------------------------------------------- /nfvbench/traffic_gen/traffic_base.py: -------------------------------------------------------------------------------- 1 | # Copyright 2016 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | import abc 16 | import sys 17 | 18 | from nfvbench.log import LOG 19 | from . import traffic_utils 20 | from hdrh.histogram import HdrHistogram 21 | from functools import reduce 22 | 23 | 24 | class Latency(object): 25 | """A class to hold latency data.""" 26 | 27 | def __init__(self, latency_list=None): 28 | """Create a latency instance. 29 | 30 | latency_list: aggregate all latency values from list if not None 31 | """ 32 | self.min_usec = sys.maxsize 33 | self.max_usec = 0 34 | self.avg_usec = 0 35 | self.hdrh = None 36 | if latency_list: 37 | hdrh_list = [] 38 | for lat in latency_list: 39 | if lat.available(): 40 | self.min_usec = min(self.min_usec, lat.min_usec) 41 | self.max_usec = max(self.max_usec, lat.max_usec) 42 | self.avg_usec += lat.avg_usec 43 | if lat.hdrh_available(): 44 | hdrh_list.append(HdrHistogram.decode(lat.hdrh)) 45 | 46 | # aggregate histograms if any 47 | if hdrh_list: 48 | def add_hdrh(x, y): 49 | x.add(y) 50 | return x 51 | decoded_hdrh = reduce(add_hdrh, hdrh_list) 52 | self.hdrh = HdrHistogram.encode(decoded_hdrh).decode('utf-8') 53 | 54 | # round to nearest usec 55 | self.avg_usec = int(round(float(self.avg_usec) / len(latency_list))) 56 | 57 | def available(self): 58 | """Return True if latency information is available.""" 59 | return self.min_usec != sys.maxsize 60 | 61 | def hdrh_available(self): 62 | """Return True if latency histogram information is available.""" 63 | return self.hdrh is not None 64 | 65 | class TrafficGeneratorException(Exception): 66 | """Exception for traffic generator.""" 67 | 68 | class AbstractTrafficGenerator(object): 69 | 70 | def __init__(self, traffic_client): 71 | self.traffic_client = traffic_client 72 | self.generator_config = traffic_client.generator_config 73 | self.config = traffic_client.config 74 | 75 | @abc.abstractmethod 76 | def get_version(self): 77 | # Must be implemented by sub classes 78 | return None 79 | 80 | @abc.abstractmethod 81 | def connect(self): 82 | # Must be implemented by sub classes 83 | return None 84 | 85 | @abc.abstractmethod 86 | def create_traffic(self, l2frame_size, rates, bidirectional, latency=True, e2e=False): 87 | # Must be implemented by sub classes 88 | return None 89 | 90 | def modify_rate(self, rate, reverse): 91 | """Change the rate per port. 92 | 93 | rate: new rate in % (0 to 100) 94 | reverse: 0 for port 0, 1 for port 1 95 | """ 96 | port_index = int(reverse) 97 | port = self.port_handle[port_index] 98 | self.rates[port_index] = traffic_utils.to_rate_str(rate) 99 | LOG.info('Modified traffic stream for port %s, new rate=%s.', port, self.rates[port_index]) 100 | 101 | @abc.abstractmethod 102 | def get_stats(self, ifstats): 103 | # Must be implemented by sub classes 104 | return None 105 | 106 | @abc.abstractmethod 107 | def start_traffic(self): 108 | # Must be implemented by sub classes 109 | return None 110 | 111 | @abc.abstractmethod 112 | def stop_traffic(self): 113 | # Must be implemented by sub classes 114 | return None 115 | 116 | @abc.abstractmethod 117 | def cleanup(self): 118 | """Cleanup the traffic generator.""" 119 | return None 120 | 121 | def clear_streamblock(self): 122 | """Clear all streams from the traffic generator.""" 123 | 124 | @abc.abstractmethod 125 | def resolve_arp(self): 126 | """Resolve all configured remote IP addresses. 127 | 128 | return: None if ARP failed to resolve for all IP addresses 129 | else a dict of list of dest macs indexed by port# 130 | the dest macs in the list are indexed by the chain id 131 | """ 132 | 133 | @abc.abstractmethod 134 | def get_macs(self): 135 | """Return the local port MAC addresses. 136 | 137 | return: a list of MAC addresses indexed by the port# 138 | """ 139 | 140 | @abc.abstractmethod 141 | def get_port_speed_gbps(self): 142 | """Return the local port speeds. 143 | 144 | return: a list of speed in Gbps indexed by the port# 145 | """ 146 | 147 | def get_theoretical_rates(self, avg_packet_size): 148 | 149 | result = {} 150 | 151 | # actual interface speed? (may be a virtual override) 152 | intf_speed = self.config.intf_speed_used 153 | 154 | if hasattr(self.config, 'user_info') and self.config.user_info is not None: 155 | if "extra_encapsulation_bytes" in self.config.user_info: 156 | frame_size_full_encapsulation = avg_packet_size + self.config.user_info[ 157 | "extra_encapsulation_bytes"] 158 | result['theoretical_tx_rate_pps'] = traffic_utils.bps_to_pps( 159 | intf_speed, frame_size_full_encapsulation) * 2 160 | result['theoretical_tx_rate_bps'] = traffic_utils.pps_to_bps( 161 | result['theoretical_tx_rate_pps'], avg_packet_size) 162 | else: 163 | result['theoretical_tx_rate_pps'] = traffic_utils.bps_to_pps(intf_speed, 164 | avg_packet_size) * 2 165 | result['theoretical_tx_rate_bps'] = traffic_utils.pps_to_bps( 166 | result['theoretical_tx_rate_pps'], avg_packet_size) 167 | return result 168 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/element-deps: -------------------------------------------------------------------------------- 1 | vm 2 | block-device-mbr 3 | cloud-init-datasources 4 | install-static 5 | package-installs 6 | devuser 7 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/fdio-release.repo: -------------------------------------------------------------------------------- 1 | [fdio-release] 2 | name=FD.io release 17.10 binary RPM package repository for CentOS 7 3 | baseurl=https://packagecloud.io/fdio/1710/el/7/$basearch 4 | repo_gpgcheck=1 5 | gpgcheck=0 6 | enabled=1 7 | gpgkey=https://packagecloud.io/fdio/1710/gpgkey 8 | sslverify=1 9 | sslcacert=/etc/pki/tls/certs/ca-bundle.crt 10 | metadata_expire=300 11 | 12 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/51-add-cpu-isolation: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | grubby --update-kernel=ALL --args="isolcpus=1-7 rcu_nocbs=1 nohz=on nohz_full=1 nmi_watchdog=0" 11 | grubby --update-kernel=ALL --args="default_hugepagesz=1G hugepagesz=1G hugepages=4" 12 | grubby --update-kernel=ALL --args="intel_iommu=on iommu=pt" 13 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/52-change-resolution: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | grubby --update-kernel=ALL --args="vga=792" 11 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/finalise.d/53-boot-from-new-kernel: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | grubby --set-default-index=0 11 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/package-installs.yaml: -------------------------------------------------------------------------------- 1 | bc: 2 | gcc: 3 | tuna: 4 | wget: 5 | screen: 6 | telnet: 7 | libyaml-devel: 8 | numactl-libs: 9 | numactl-devel: 10 | vpp: 11 | vpp-plugins: 12 | vpp-config: 13 | kernel-firmware: 14 | kernel-headers: 15 | kernel-devel: 16 | openssh-server: 17 | dpdk-tools: 18 | git: 19 | python3-dev: 20 | libpython3.6-dev: 21 | python3-pip: 22 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/01-update-kernel: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | if [ $DIB_USE_ELREPO_KERNEL != "True" ]; then 11 | exit 0 12 | fi 13 | 14 | # Installing the latest kernel from ELRepo 15 | rpm --import https://www.elrepo.org/RPM-GPG-KEY-elrepo.org 16 | rpm -Uvh http://www.elrepo.org/elrepo-release-7.0-2.el7.elrepo.noarch.rpm 17 | yum remove -y kernel-firmware kernel-headers kernel-devel 18 | yum install -y --enablerepo=elrepo-kernel kernel-lt kernel-lt-headers kernel-lt-devel 19 | 20 | # gcc will be removed with old kernel as dependency, so reinstalling it back 21 | yum install -y gcc 22 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/02-pip-package: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | python3 -m pip install --upgrade pip 11 | python3 -m pip install setuptools wheel pbr 12 | python3 -m pip install pyyaml 13 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/03-copy-rc-local: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | # set accurate rc.local file corresponding to current image built 11 | if [ $DIB_DEV_IMAGE = "loopvm" ]; then 12 | mv /etc/rc.d/rc.local.loopvm /etc/rc.d/rc.local 13 | else 14 | mv /etc/rc.d/rc.local.generator /etc/rc.d/rc.local 15 | fi 16 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/04-add-execute-attribute: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | chmod +x /etc/rc.d/rc.local 11 | chmod +x /etc/sysconfig/network-scripts/ifcfg-eth0 12 | chmod +x /etc/profile.d/nfvbench.sh 13 | chmod +x /nfvbench/configure-nfvbench.sh 14 | chmod +x /nfvbench/start-nfvbench.sh 15 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/51-cloudcfg-edit: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | 3 | import yaml 4 | cloudcfg = "/etc/cloud/cloud.cfg" 5 | user = "cloud-user" 6 | 7 | with open(cloudcfg) as f: 8 | cfg = yaml.safe_load(f) 9 | 10 | # allow SSH password auth 11 | cfg['ssh_pwauth'] = "1" 12 | 13 | try: 14 | if cfg['system_info']['default_user']['name']: 15 | synver = "2" 16 | except KeyError: 17 | synver = "1" 18 | 19 | if synver == "1": 20 | if cfg['user'] == user: 21 | print("No change needed") 22 | exit() 23 | else: 24 | cfg['user'] = user 25 | elif synver == "2": 26 | if cfg['system_info']['default_user']['name'] == user: 27 | print("No change needed") 28 | exit() 29 | else: 30 | # Change the user to cloud-user 31 | cfg['system_info']['default_user']['name'] = user 32 | cfg['system_info']['default_user']['gecos'] = "Cloud User" 33 | print cfg['system_info']['default_user']['name'] 34 | 35 | with open(cloudcfg, "w") as f: 36 | yaml.dump(cfg, f, default_flow_style=False) 37 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/52-nfvbench-script: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Make sure the disk image build fails if nfvbench installation fails 8 | set -euo pipefail 9 | 10 | if [ $DIB_DEV_IMAGE != "generator" ]; then 11 | exit 0 12 | fi 13 | 14 | # TRex installation 15 | mkdir -p /opt/trex 16 | mkdir /var/log/nfvbench 17 | 18 | wget --no-cache --no-check-certificate https://trex-tgn.cisco.com/trex/release/$TREX_VER.tar.gz 19 | tar xzf $TREX_VER.tar.gz -C /opt/trex 20 | rm -f /$TREX_VER.tar.gz 21 | rm -f /opt/trex/$TREX_VER/trex_client_$TREX_VER.tar.gz 22 | cp -a /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex /usr/local/lib/python3.6/site-packages/ 23 | rm -rf /opt/trex/$TREX_VER/automation/trex_control_plane/interactive/trex 24 | 25 | # NFVbench installation 26 | cd /opt 27 | if [[ "${DIB_NFVBENCH_CODE_ORIGIN}" == "static" ]]; then 28 | # nfvbench code has been copied by the install-static element to 29 | # /opt/nfvbench without the .git/ directory. But pip will need that .git/ 30 | # directory to compute nfvbench version, so will now finish the incomplete 31 | # job of install-static: 32 | STATIC_NFVBENCH_CODE="$(dirname $0)/../static/opt/nfvbench" 33 | [ -d "${STATIC_NFVBENCH_CODE}" ] || { 34 | echo "Error: directory ${STATIC_NFVBENCH_CODE} missing." 35 | echo " You requested a build from local nfvbench code with" 36 | echo " DIB_NFVBENCH_CODE_ORIGIN=static, but you likely forgot" 37 | echo " to clone nfvbench code in elements/nfvbenchvm/static/opt/nfvbench" 38 | exit 1 39 | } 40 | rsync -lr "${STATIC_NFVBENCH_CODE}"/ /opt/nfvbench/ 41 | else 42 | git clone https://gerrit.opnfv.org/gerrit/nfvbench 43 | fi 44 | cd nfvbench/ 45 | python3 -m pip install . --use-deprecated=legacy-resolver 46 | cp xtesting/testcases.yaml /usr/local/lib/python3.6/site-packages/xtesting/ci/testcases.yaml 47 | python3 ./docker/cleanup_generators.py 48 | rm -rf /opt/nfvbench/.git 49 | rm -rf /opt/nfvbench/nfvbench 50 | rm -rf /opt/nfvbench/behave_tests 51 | # symlink to NFVbench sources 52 | ln -s /usr/local/lib/python3.6/site-packages/nfvbench /opt/nfvbench/nfvbench 53 | ln -s /usr/local/lib/python3.6/site-packages/behave_tests /opt/nfvbench/behave_tests 54 | # persist env variables 55 | echo "export TREX_VER=\"$TREX_VER\"" >> /etc/profile.d/nfvbench.sh 56 | echo "export TREX_EXT_LIBS=\"/opt/trex/$TREX_VER/external_libs\"" >> /etc/profile.d/nfvbench.sh 57 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/53-sshd-script: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | # Set UseDNS no value in sshd_config to reduce time to connect 11 | echo "UseDNS no" >> /etc/ssh/sshd_config 12 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/post-install.d/99-cleanup: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | if [ ${DIB_DEBUG_TRACE:-1} -gt 0 ]; then 4 | set -x 5 | fi 6 | 7 | # Stop on error 8 | set -euo pipefail 9 | 10 | yum erase -y python-devel libyaml-devel numactl-devel kernel-devel kernel-headers kernel-lt-headers kernel-lt-devel gcc git python3-dev libpython3.6-dev 11 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/cloud/cloud.cfg.d/99-disable-network-config.cfg: -------------------------------------------------------------------------------- 1 | network: {config: disabled} 2 | 3 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modprobe.d/vfio.conf: -------------------------------------------------------------------------------- 1 | options vfio enable_unsafe_noiommu_mode=1 2 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/modules-load.d/vfio-pci.conf: -------------------------------------------------------------------------------- 1 | vfio-pci 2 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/openstack/clouds.yaml: -------------------------------------------------------------------------------- 1 | # clouds.yaml file -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/profile.d/nfvbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/rc.d/rc.local.generator: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | touch /var/lock/subsys/local 4 | 5 | # Waiting for cloud-init to generate $NFVBENCH_CONF, retry 60 seconds 6 | NFVBENCH_CONF=/etc/nfvbenchvm.conf 7 | retry=30 8 | until [ $retry -eq 0 ]; do 9 | if [ -f $NFVBENCH_CONF ]; then break; fi 10 | retry=$[$retry-1] 11 | sleep 2 12 | done 13 | if [ ! -f $NFVBENCH_CONF ]; then 14 | exit 0 15 | fi 16 | 17 | # Parse and obtain all configurations 18 | echo "Generating configurations for NFVbench and TRex..." 19 | eval $(cat $NFVBENCH_CONF) 20 | touch /nfvbench_configured.flag 21 | 22 | # Add DNS entry 23 | if [ $DNS_SERVERS ]; then 24 | IFS="," read -a dns <<< $DNS_SERVERS 25 | for d in "${dns[@]}"; do 26 | echo "nameserver $d" >> /etc/resolv.conf 27 | done 28 | fi 29 | 30 | # CPU isolation optimizations 31 | echo 1 > /sys/bus/workqueue/devices/writeback/cpumask 32 | echo 1 > /sys/devices/virtual/workqueue/cpumask 33 | echo 1 > /proc/irq/default_smp_affinity 34 | for irq in `ls /proc/irq/`; do 35 | if [ -f /proc/irq/$irq/smp_affinity ]; then 36 | echo 1 > /proc/irq/$irq/smp_affinity 37 | fi 38 | done 39 | 40 | NET_PATH=/sys/class/net 41 | 42 | get_eth_port() { 43 | # device mapping for CentOS Linux 7: 44 | # lspci: 45 | # 00.03.0 Ethernet controller: Red Hat, Inc. Virtio network device 46 | # 00.04.0 Ethernet controller: Red Hat, Inc. Virtio network device 47 | # /sys/class/net: 48 | # /sys/class/net/eth0 -> ../../devices/pci0000:00/0000:00:03.0/virtio0/net/eth0 49 | # /sys/class/net/eth1 -> ../../devices/pci0000:00/0000:00:04.0/virtio1/net/eth1 50 | 51 | mac=$1 52 | for f in $(ls $NET_PATH/); do 53 | if grep -q "$mac" $NET_PATH/$f/address; then 54 | eth_port=$(readlink $NET_PATH/$f | cut -d "/" -f8) 55 | # some virtual interfaces match on MAC and do not have a PCI address 56 | if [ "$eth_port" -a "$eth_port" != "N/A" ]; then 57 | # Found matching interface 58 | logger "NFVBENCHVM: found interface $f ($eth_port) matching $mac" 59 | break 60 | else 61 | eth_port="" 62 | fi 63 | fi; 64 | done 65 | if [ -z "$eth_port" ]; then 66 | echo "ERROR: Cannot find eth port for MAC $mac" >&2 67 | logger "NFVBENCHVM ERROR: Cannot find eth port for MAC $mac" 68 | return 1 69 | fi 70 | echo $eth_port 71 | return 0 72 | } 73 | 74 | # Set VM MANAGEMENT port up and running 75 | if [ $INTF_MGMT_CIDR ] && [ $INTF_MGMT_IP_GW ]; then 76 | if [ $INTF_MAC_MGMT ]; then 77 | ETH_PORT=$(get_eth_port $INTF_MAC_MGMT) 78 | elif [ "$CLOUD_DETAIL" ] && [ "$PORT_MGMT_NAME" ]; then 79 | $INTF_MAC_MGMT=$(openstack --os-cloud $CLOUD_DETAIL port list | grep $PORT_MGMT_NAME | grep -o -Ei '([a-fA-F0-9:]{17}|[a-fA-F0-9]{12}$)' | head -1) 80 | ETH_PORT=$(get_eth_port $INTF_MAC_MGMT) 81 | else 82 | ETH_PORT="" 83 | fi 84 | if [ -z "$ETH_PORT" ]; then 85 | echo "ERROR: Cannot find eth port for management port" >&2 86 | logger "NFVBENCHVM ERROR: Cannot find eth port for management port" 87 | return 1 88 | fi 89 | 90 | # By default, configure the MTU of the management interface to the 91 | # conservative value of 1500: this will reduce the risk to get an 92 | # unmanageable VM in some setups. 93 | # 94 | # To set the MTU to a different value, configure the INTF_MGMT_MTU variable 95 | # in /etc/nfvbenchvm.conf. If INTF_MGMT_MTU is set to the special value 96 | # "auto", the MTU will not be configured and it will keep the value set by 97 | # the hypervisor ("legacy" nfvbenchvm behavior). If INTF_MGMT_MTU is unset, 98 | # the MTU will be set to 1500. In other cases, the MTU will be set to the 99 | # value of INTF_MGMT_MTU. 100 | # 101 | if [[ -z "$INTF_MGMT_MTU" ]]; then 102 | ip link set $ETH_PORT mtu 1500 103 | elif [[ "$INTF_MGMT_MTU" != "auto" ]]; then 104 | ip link set $ETH_PORT mtu $INTF_MGMT_MTU 105 | fi 106 | 107 | ip addr add $INTF_MGMT_CIDR dev $ETH_PORT 108 | ip link set $ETH_PORT up 109 | ip route add default via $INTF_MGMT_IP_GW dev $ETH_PORT 110 | else 111 | echo "INFO: VM management IP Addresses missing in $NFVBENCH_CONF" 112 | fi 113 | 114 | /nfvbench/configure-nfvbench.sh 115 | 116 | if [ $ACTION ]; then 117 | /nfvbench/start-nfvbench.sh $ACTION 118 | else 119 | /nfvbench/start-nfvbench.sh 120 | fi 121 | 122 | exit 0 123 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/sysconfig/network-scripts/ifcfg-eth0: -------------------------------------------------------------------------------- 1 | DEVICE="eth0" 2 | BOOTPROTO="dhcp" 3 | ONBOOT="no" 4 | TYPE="Ethernet" 5 | USERCTL="yes" 6 | PEERDNS="yes" 7 | IPV6INIT="no" 8 | PERSISTENT_DHCLIENT="0" 9 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/sysconfig/network-scripts/ifcfg-eth1: -------------------------------------------------------------------------------- 1 | DEVICE="eth1" 2 | BOOTPROTO="dhcp" 3 | ONBOOT="no" 4 | TYPE="Ethernet" 5 | USERCTL="yes" 6 | PEERDNS="yes" 7 | IPV6INIT="no" 8 | PERSISTENT_DHCLIENT="0" 9 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/etc/systemd/system/nfvbench.service: -------------------------------------------------------------------------------- 1 | [Unit] 2 | Description=nfvbench service 3 | After=network.target 4 | 5 | [Service] 6 | Type=forking 7 | User=root 8 | RemainAfterExit=yes 9 | ExecStart=/bin/bash -a -c "source /etc/profile.d/nfvbench.sh && /usr/bin/screen -dmSL nfvbench /usr/local/bin/nfvbench -c /etc/nfvbench/nfvbench.conf --server" 10 | 11 | [Install] 12 | WantedBy=multi-user.target 13 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/nfvbench.conf: -------------------------------------------------------------------------------- 1 | traffic_generator: 2 | generator_profile: 3 | - name: trex-local 4 | tool: TRex 5 | ip: 127.0.0.1 6 | zmq_pub_port: 4500 7 | zmq_rpc_port: 4501 8 | software_mode: false 9 | 10 | cores: {{CORES}} 11 | platform: 12 | master_thread_id: '0' 13 | latency_thread_id: '1' 14 | dual_if: 15 | - socket: 0 16 | threads: [{{CORE_THREADS}}] 17 | 18 | interfaces: 19 | - port: 0 20 | pci: "{{PCI_ADDRESS_1}}" 21 | switch: 22 | - port: 1 23 | pci: "{{PCI_ADDRESS_2}}" 24 | switch: 25 | intf_speed: -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/nfvbench/start-nfvbench.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | 4 | restart_nfvbench_service(){ 5 | service nfvbench restart 6 | echo "NFVbench running in screen 'nfvbench'" 7 | logger "NFVBENCHVM: NFVbench running in screen 'nfvbench'" 8 | } 9 | 10 | start_nfvbench(){ 11 | ln -sfn /etc/nfvbench/nfvbench.cfg /etc/nfvbench/nfvbench.conf 12 | restart_nfvbench_service 13 | } 14 | 15 | start_nfvbench_e2e_mode(){ 16 | ln -sfn /etc/nfvbench/e2e.cfg /etc/nfvbench/nfvbench.conf 17 | restart_nfvbench_service 18 | } 19 | 20 | start_nfvbench_loopback_mode(){ 21 | ln -sfn /etc/nfvbench/loopback.cfg /etc/nfvbench/nfvbench.conf 22 | restart_nfvbench_service 23 | } 24 | 25 | usage() { 26 | echo "Usage: $0 action" 27 | echo "action (optional):" 28 | echo "e2e start NFVbench with E2E config file" 29 | echo "loopback start NFVbench with loopback config file" 30 | echo "" 31 | echo "If no action is given NFVbench will start with default config file" 32 | exit 1 33 | } 34 | 35 | # ---------------------------------------------------------------------------- 36 | # Parse command line options and configure the script 37 | # ---------------------------------------------------------------------------- 38 | if [ "$#" -lt 1 ]; then 39 | start_nfvbench 40 | exit 0 41 | else 42 | if [ $1 = "e2e" ]; then 43 | start_nfvbench_e2e_mode 44 | exit 0 45 | elif [ $1 = "loopback" ]; then 46 | start_nfvbench_loopback_mode 47 | exit 0 48 | else 49 | usage 50 | fi 51 | fi 52 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/startup.conf: -------------------------------------------------------------------------------- 1 | unix { 2 | nodaemon 3 | log /tmp/vpp.log 4 | full-coredump 5 | startup-config /etc/vpp/vm.conf 6 | cli-listen /run/vpp/cli.sock 7 | } 8 | 9 | api-trace { 10 | on 11 | } 12 | 13 | dpdk { 14 | dev default { 15 | num-rx-desc 1024 16 | num-tx-desc 1024 17 | num-rx-queues {{VIF_MQ_SIZE}} 18 | } 19 | socket-mem 1024 20 | dev {{PCI_ADDRESS_1}} 21 | dev {{PCI_ADDRESS_2}} 22 | uio-driver uio_pci_generic 23 | num-mbufs {{NUM_MBUFS}} 24 | } 25 | 26 | api-segment { 27 | gid vpp 28 | } 29 | 30 | cpu { 31 | main-core 0 32 | workers {{WORKER_CORES}} 33 | } 34 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/elements/nfvbenchvm/static/vpp/vm.conf: -------------------------------------------------------------------------------- 1 | set int state {{INTF_1}} up 2 | set int state {{INTF_2}} up 3 | set interface ip address {{INTF_1}} {{VNF_GATEWAY1_CIDR}} 4 | set interface ip address {{INTF_2}} {{VNF_GATEWAY2_CIDR}} 5 | set ip arp {{INTF_1}} {{TG_GATEWAY1_IP}} {{TG_MAC1}} static 6 | set ip arp {{INTF_2}} {{TG_GATEWAY2_IP}} {{TG_MAC2}} static 7 | ip route add {{TG_NET1}} via {{TG_GATEWAY1_IP}} 8 | ip route add {{TG_NET2}} via {{TG_GATEWAY2_IP}} 9 | -------------------------------------------------------------------------------- /nfvbenchvm/dib/verify-image.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | # 3 | # A shell script to verify that a VM image is present in google storage 4 | # If not present in google storage, verify it is present locally 5 | # If not present locally, build it but do not uplaod to google storage 6 | 7 | bash build-image.sh -v 8 | -------------------------------------------------------------------------------- /requirements-dev.txt: -------------------------------------------------------------------------------- 1 | # Python requirements to setup a development environment to be able to do 2 | # anything directly or with tox: run unit tests, run code quality checks, build 3 | # the docs, ... 4 | 5 | -c https://opendev.org/openstack/requirements/raw/branch/stable/yoga/upper-constraints.txt 6 | -r requirements.txt 7 | -r test-requirements.txt 8 | -r docs/requirements.txt 9 | 10 | # Install tox with the same version as on OPNFV build servers 11 | tox==3.21.4 12 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | # Python dependencies required to run nfvbench and behave_tests 2 | # 3 | # The order of packages is significant, because pip processes them in the order 4 | # of appearance. Changing the order has an impact on the overall integration 5 | # process, which may cause wedges in the gate later. 6 | 7 | pbr!=2.1.0 # Apache-2.0 8 | 9 | attrdict>=2.0.0 10 | bitmath>=1.3.1.1 11 | pytz # MIT 12 | python-glanceclient # Apache-2.0 13 | python-neutronclient # Apache-2.0 14 | python-novaclient # Apache-2.0 15 | python-openstackclient # Apache-2.0 16 | python-keystoneclient!=2.1.0 # Apache-2.0 17 | PyYAML # MIT 18 | tabulate # MIT 19 | Flask!=0.11 # BSD 20 | fluent-logger>=0.5.3 21 | netaddr # BSD 22 | hdrhistogram>=0.8.0 23 | 24 | # Extra requirements for behave_tests: 25 | requests!=2.20.0,!=2.24.0 # Apache-2.0 26 | retry>=0.9.2 27 | xtesting>=0.92.0 28 | behave>=1.2.6 29 | -------------------------------------------------------------------------------- /setup.cfg: -------------------------------------------------------------------------------- 1 | # Copyright 2017 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | 15 | [metadata] 16 | name = nfvbench 17 | summary = An NFV benchmarking tool for Mercury OpenStack 18 | description-file = 19 | README.rst 20 | author = OpenStack 21 | author-email = openstack-dev@lists.openstack.org 22 | home-page = http://www.openstack.org/ 23 | classifier = 24 | Environment :: OpenStack 25 | Intended Audience :: Developers 26 | Intended Audience :: Information Technology 27 | Intended Audience :: System Administrators 28 | License :: OSI Approved :: Apache Software License 29 | Operating System :: POSIX :: Linux 30 | Operating System :: MacOS 31 | Programming Language :: Python 32 | Programming Language :: Python :: 3 33 | Programming Language :: Python :: 3.8 34 | 35 | [files] 36 | packages = 37 | nfvbench 38 | behave_tests 39 | 40 | [entry_points] 41 | console_scripts = 42 | nfvbench = nfvbench.nfvbench:main 43 | nfvbench_client = client.nfvbench_client:main 44 | xtesting.testcase = 45 | nfvbench_behaveframework = behave_tests.behavedriver:BehaveDriver 46 | 47 | [compile_catalog] 48 | directory = nfvbench/locale 49 | domain = nfvbench 50 | 51 | [update_catalog] 52 | domain = nfvbench 53 | output_dir = nfvbench/locale 54 | input_file = nfvbench/locale/nfvbench.pot 55 | 56 | [extract_messages] 57 | keywords = _ gettext ngettext l_ lazy_gettext 58 | mapping_file = babel.cfg 59 | output_file = nfvbench/locale/nfvbench.pot 60 | 61 | [wheel] 62 | universal = 1 63 | -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); 4 | # you may not use this file except in compliance with the License. 5 | # You may obtain a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, 11 | # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 12 | # implied. 13 | # See the License for the specific language governing permissions and 14 | # limitations under the License. 15 | 16 | # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT 17 | import setuptools 18 | 19 | setuptools.setup( 20 | setup_requires=['pbr>=2.0.0'], 21 | pbr=True) 22 | -------------------------------------------------------------------------------- /test-requirements.txt: -------------------------------------------------------------------------------- 1 | # Python dependencies required by tox to run unit tests and code quality checks 2 | # 3 | # (dependencies required to build and check the docs can be found in 4 | # docs/requirements.txt) 5 | # 6 | # The order of packages is significant, because pip processes them in the order 7 | # of appearance. Changing the order has an impact on the overall integration 8 | # process, which may cause wedges in the gate later. 9 | 10 | # Requirements for nfvbench unit tests: 11 | pytest # MIT 12 | scapy>=2.3.1 13 | # Extra requirements for behave_tests unit tests: 14 | pytest-subtests 15 | 16 | # Requirements for pep8 test: 17 | hacking==5.0.0 18 | flake8>=3.3.0 19 | 20 | # Requirements for linter test: 21 | pylint==2.15.9 22 | -------------------------------------------------------------------------------- /test/__init__.py: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/opnfv/nfvbench/8ecfd4c886507fe602398a8623e6044d40ea8090/test/__init__.py -------------------------------------------------------------------------------- /test/mock_trex.py: -------------------------------------------------------------------------------- 1 | # Copyright 2018 Cisco Systems, Inc. All rights reserved. 2 | # 3 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 4 | # not use this file except in compliance with the License. You may obtain 5 | # a copy of the License at 6 | # 7 | # http://www.apache.org/licenses/LICENSE-2.0 8 | # 9 | # Unless required by applicable law or agreed to in writing, software 10 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 11 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 12 | # License for the specific language governing permissions and limitations 13 | # under the License. 14 | """This module creates the missing Trex library classes when they are not installed.""" 15 | 16 | import sys 17 | 18 | # Because trex_stl_lib may not be installed when running unit test 19 | # nfvbench.traffic_client will try to import STLError: 20 | # from trex.stl.api import STLError 21 | # will raise ImportError: No module named trex.stl.api 22 | # trex_gen.py will also try to import a number of trex.stl.api classes 23 | try: 24 | import trex.stl.api 25 | assert trex.stl.api 26 | except ImportError: 27 | from types import ModuleType 28 | 29 | # Make up a trex.stl.api.STLError class 30 | class STLDummy(Exception): 31 | """Dummy class.""" 32 | 33 | trex_lib_mod = ModuleType('trex') 34 | sys.modules['trex'] = trex_lib_mod 35 | stl_lib_mod = ModuleType('trex.stl') 36 | trex_lib_mod.stl = stl_lib_mod 37 | sys.modules['trex.stl'] = stl_lib_mod 38 | api_mod = ModuleType('trex.stl.api') 39 | stl_lib_mod.api = api_mod 40 | sys.modules['trex.stl.api'] = api_mod 41 | api_mod.STLError = STLDummy 42 | api_mod.STLxyz = STLDummy 43 | api_mod.CTRexVmInsFixHwCs = STLDummy 44 | api_mod.Dot1Q = STLDummy 45 | api_mod.Ether = STLDummy 46 | api_mod.ARP = STLDummy 47 | api_mod.IP = STLDummy 48 | api_mod.ARP = STLDummy 49 | api_mod.STLClient = STLDummy 50 | api_mod.STLFlowLatencyStats = STLDummy 51 | api_mod.STLFlowStats = STLDummy 52 | api_mod.STLPktBuilder = STLDummy 53 | api_mod.STLScVmRaw = STLDummy 54 | api_mod.STLStream = STLDummy 55 | api_mod.STLTXCont = STLDummy 56 | api_mod.STLTXMultiBurst = STLDummy 57 | api_mod.STLVmFixChecksumHw = STLDummy 58 | api_mod.STLVmFixIpv4 = STLDummy 59 | api_mod.STLVmFlowVar = STLDummy 60 | api_mod.STLVmFlowVarRepeatableRandom = STLDummy 61 | api_mod.STLVmTupleGen = STLDummy 62 | api_mod.STLVmWrFlowVar = STLDummy 63 | api_mod.UDP = STLDummy 64 | api_mod.bind_layers = STLDummy 65 | api_mod.FlagsField = STLDummy 66 | api_mod.Packet = STLDummy 67 | api_mod.ThreeBytesField = STLDummy 68 | api_mod.XByteField = STLDummy 69 | 70 | common_mod = ModuleType('trex.common') 71 | trex_lib_mod.common = common_mod 72 | sys.modules['trex.common'] = common_mod 73 | services_mod = ModuleType('trex.common.services') 74 | common_mod.services = services_mod 75 | sys.modules['trex.common.services'] = services_mod 76 | arp_mod = ModuleType('trex.common.services.trex_service_arp') 77 | services_mod.trex_stl_service_arp = arp_mod 78 | sys.modules['trex.common.services.trex_service_arp'] = arp_mod 79 | arp_mod.ServiceARP = STLDummy 80 | 81 | def no_op(): 82 | """Empty function.""" 83 | -------------------------------------------------------------------------------- /test/ut_behave_tests/__init__.py: -------------------------------------------------------------------------------- 1 | """Unit tests for the behave_tests package.""" 2 | -------------------------------------------------------------------------------- /test/ut_behave_tests/test_steps.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2022 Orange 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | """ 18 | Unit tests for some of the functions found in behave_tests/features/steps/steps.py 19 | """ 20 | 21 | import logging 22 | import unittest 23 | from unittest.mock import call, Mock, patch 24 | 25 | from behave_tests.features.steps.steps import get_last_result 26 | from .test_utils import setup_logging, stub_requests_get 27 | 28 | 29 | def setUpModule(): 30 | setup_logging() 31 | 32 | 33 | class TestGetLastResult(unittest.TestCase): 34 | def setUp(self): 35 | # Mock requests.get() so that TestAPI results come from JSON files 36 | # found in test_data/ directory. 37 | patcher = patch('behave_tests.features.steps.testapi.requests') 38 | self._mock_requests = patcher.start() 39 | self._mock_requests.get.side_effect = stub_requests_get 40 | self.addCleanup(patcher.stop) 41 | 42 | # Setup a mock for behave context 43 | self._context = Mock() 44 | self._context.data = { 45 | 'PROJECT_NAME': "nfvbench", 46 | 'TEST_DB_URL': "http://127.0.0.1:8000/api/v1/results" 47 | } 48 | self._context.logger = logging.getLogger("behave_tests") 49 | 50 | def test_get_last_result_throughput_characterization(self): 51 | self._context.json = { 52 | "frame_sizes": ['64'], 53 | "flow_count": "100k", 54 | "duration_sec": '10', 55 | "rate": "ndr", 56 | "user_label": "amical_tc18_loopback" 57 | } 58 | self._context.tag = "throughput" 59 | 60 | last_result = get_last_result(self._context, reference=True) 61 | 62 | self.assertIsNotNone(last_result) 63 | self.assertEqual(16765582, last_result["synthesis"]["total_tx_rate"]) 64 | self.assertEqual(25, round(last_result["synthesis"]["avg_delay_usec"])) 65 | 66 | self._mock_requests.get.assert_called_once_with( 67 | "http://127.0.0.1:8000/api/v1/results?" 68 | "project=nfvbench&case=characterization&criteria=PASS&page=1") 69 | 70 | def test_get_last_result_latency_characterization(self): 71 | self._context.json = { 72 | "frame_sizes": ['768'], 73 | "flow_count": "100k", 74 | "duration_sec": '10', 75 | "rate": "90%", 76 | "user_label": "amical_tc6_intensive" 77 | } 78 | self._context.tag = "latency" 79 | 80 | last_result = get_last_result(self._context, reference=True) 81 | 82 | self.assertIsNotNone(last_result) 83 | self.assertEqual(262275, last_result["synthesis"]["total_tx_rate"]) 84 | self.assertEqual(353, round(last_result["synthesis"]["avg_delay_usec"])) 85 | 86 | self._mock_requests.get.assert_has_calls([ 87 | call("http://127.0.0.1:8000/api/v1/results?" 88 | "project=nfvbench&case=characterization&criteria=PASS&page=1"), 89 | call("http://127.0.0.1:8000/api/v1/results?" 90 | "project=nfvbench&case=characterization&criteria=PASS&page=2")]) 91 | 92 | def test_last_result_not_found(self): 93 | self._context.json = { 94 | "frame_sizes": ['64'], 95 | "flow_count": "100k", 96 | "duration_sec": '10', 97 | "rate": "ndr", 98 | "user_label": "toto_titi_tata" # User label not in test data 99 | } 100 | self._context.tag = "throughput" 101 | 102 | with self.assertRaises(AssertionError): 103 | get_last_result(self._context, reference=True) 104 | 105 | self._mock_requests.get.assert_has_calls([ 106 | call("http://127.0.0.1:8000/api/v1/results?" 107 | "project=nfvbench&case=characterization&criteria=PASS&page=1"), 108 | call("http://127.0.0.1:8000/api/v1/results?" 109 | "project=nfvbench&case=characterization&criteria=PASS&page=2")]) 110 | 111 | def test_get_last_result_throughput_non_regression(self): 112 | self._context.CASE_NAME = "non-regression" 113 | self._context.json = { 114 | "frame_sizes": ['1518'], 115 | "flow_count": "100k", 116 | "duration_sec": '10', 117 | "rate": "ndr", 118 | "user_label": "amical_tc12_basic" 119 | } 120 | self._context.tag = "throughput" 121 | 122 | last_result = get_last_result(self._context) 123 | 124 | self.assertIsNotNone(last_result) 125 | self.assertEqual(512701, last_result["synthesis"]["total_tx_rate"]) 126 | self.assertEqual(148, round(last_result["synthesis"]["avg_delay_usec"])) 127 | 128 | self._mock_requests.get.assert_called_once_with( 129 | "http://127.0.0.1:8000/api/v1/results?" 130 | "project=nfvbench&case=non-regression&criteria=PASS&page=1") 131 | 132 | def test_get_last_result_latency_non_regression(self): 133 | self._context.CASE_NAME = "non-regression" 134 | self._context.json = { 135 | "frame_sizes": ['1518'], 136 | "flow_count": "100k", 137 | "duration_sec": '10', 138 | "rate": "70%", 139 | "user_label": "amical_tc12_basic" 140 | } 141 | self._context.tag = "latency" 142 | 143 | last_result = get_last_result(self._context) 144 | 145 | self.assertIsNotNone(last_result) 146 | self.assertEqual(352040, last_result["synthesis"]["total_tx_rate"]) 147 | self.assertEqual(114, round(last_result["synthesis"]["avg_delay_usec"])) 148 | 149 | self._mock_requests.get.assert_called_once_with( 150 | "http://127.0.0.1:8000/api/v1/results?" 151 | "project=nfvbench&case=non-regression&criteria=PASS&page=1") 152 | -------------------------------------------------------------------------------- /test/ut_behave_tests/test_utils.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python 2 | # Copyright 2022 Orange 3 | # 4 | # Licensed under the Apache License, Version 2.0 (the "License"); you may 5 | # not use this file except in compliance with the License. You may obtain 6 | # a copy of the License at 7 | # 8 | # http://www.apache.org/licenses/LICENSE-2.0 9 | # 10 | # Unless required by applicable law or agreed to in writing, software 11 | # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT 12 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the 13 | # License for the specific language governing permissions and limitations 14 | # under the License. 15 | # 16 | 17 | """ 18 | Utility functions for unit tests. 19 | """ 20 | 21 | import json 22 | import logging 23 | import pathlib 24 | import unittest 25 | from unittest.mock import Mock 26 | 27 | 28 | # ----------------------------------------------------------------------------------------- 29 | # Logging helpers 30 | # ----------------------------------------------------------------------------------------- 31 | 32 | def setup_logging(log_filename="ut_behave_tests.log", *, log_to_console=False): 33 | """Setup logging for unit tests. 34 | 35 | Principles: 36 | - re-use the global "behave_tests" logger 37 | - if `log_to_console` is True, log messages up to INFO level to the console 38 | (can be useful when running tests with unittest instead of pytest) 39 | - if `log_filename` is provided, log messages up to DEBUG level to the log file 40 | """ 41 | logger = logging.getLogger("behave_tests") 42 | logger.setLevel(logging.DEBUG) 43 | formatter = logging.Formatter("%(levelname)s:%(filename)s:%(lineno)s: %(message)s") 44 | 45 | # Configure logging to the console 46 | if log_to_console: 47 | ch = logging.StreamHandler() 48 | ch.setLevel(logging.INFO) 49 | ch.setFormatter(formatter) 50 | logger.addHandler(ch) 51 | 52 | # Configure logging to the log file 53 | if log_filename is not None: 54 | fh = logging.FileHandler(filename=log_filename, mode='w') 55 | fh.setLevel(logging.DEBUG) 56 | fh.setFormatter(formatter) 57 | logger.addHandler(fh) 58 | 59 | return logger 60 | 61 | 62 | # ----------------------------------------------------------------------------------------- 63 | # Test data helpers 64 | # ----------------------------------------------------------------------------------------- 65 | 66 | def get_test_data_dir() -> pathlib.Path: 67 | """Get absolute path of the test_data/ dir. 68 | 69 | We need this because the unit tests can be run from different locations 70 | depending on the context (tox, development, ...) 71 | """ 72 | return pathlib.Path(__file__).parent / 'test_data' 73 | 74 | 75 | def stub_requests_get(testapi_url): 76 | """Mock a request to TestAPI results database. 77 | 78 | Instead of doing a real request, build a filename from the URL suffix, find 79 | the file in the `test_data` directory and return the contents of the file. 80 | 81 | Args: 82 | testapi_url: a URL starting with `http://127.0.0.1:8000/api/v1/results?` 83 | and followed by the results file name without extension. 84 | 85 | Returns: 86 | A mock of a `requests.Response` object with the attributes `text` and 87 | `status_code` and the method `json()`. 88 | """ 89 | response = Mock() 90 | filename_prefix = testapi_url.replace('http://127.0.0.1:8000/api/v1/results?', '') 91 | if filename_prefix == testapi_url: 92 | raise ValueError("For unit tests, TestAPI URL must start with " 93 | "http://127.0.0.1:8000/api/v1/results?") 94 | page_filename = get_test_data_dir() / (filename_prefix + ".json") 95 | try: 96 | with open(page_filename, 'r', encoding='utf-8') as results: 97 | response.text = results.read() 98 | response.json = lambda: json.loads(response.text) 99 | response.status_code = 200 100 | return response 101 | except FileNotFoundError as e: 102 | logging.getLogger("behave_tests").exception(e) 103 | raise ValueError(f"No test data available for TestAPI URL: {testapi_url}") from e 104 | 105 | 106 | class TestStubRequestsGet(unittest.TestCase): 107 | def test_valid_url(self): 108 | response = stub_requests_get("http://127.0.0.1:8000/api/v1/results?" 109 | "project=nfvbench&case=characterization&criteria=PASS&page=1") 110 | self.assertEqual(200, response.status_code) 111 | self.assertEqual("nfvbench", response.json()["results"][0]["project_name"]) 112 | 113 | def test_bad_prefix(self): 114 | with self.assertRaises(ValueError): 115 | stub_requests_get("http://no.way/api/v1/results?" "dummy_suffix") 116 | 117 | def test_file_not_found(self): 118 | with self.assertRaises(ValueError): 119 | stub_requests_get("http://127.0.0.1:8000/api/v1/results?" "dummy_suffix") 120 | -------------------------------------------------------------------------------- /tox.ini: -------------------------------------------------------------------------------- 1 | [tox] 2 | minversion = 1.6 3 | envlist = py38,pep8,lint,docs,docs-linkcheck 4 | skipsdist = True 5 | 6 | [testenv] 7 | usedevelop = True 8 | install_command = pip install -U {opts} {packages} 9 | setenv = 10 | VIRTUAL_ENV={envdir} 11 | deps = 12 | -chttps://opendev.org/openstack/requirements/raw/branch/stable/yoga/upper-constraints.txt 13 | -r{toxinidir}/requirements.txt 14 | -r{toxinidir}/test-requirements.txt 15 | commands = py.test -q --basetemp={envtmpdir} {posargs} test/ 16 | 17 | [testenv:pep8] 18 | basepython = python3.8 19 | skip_install = True 20 | commands = flake8 {toxinidir} 21 | 22 | [testenv:lint] 23 | basepython = python3.8 24 | skip_install = True 25 | commands = pylint --rcfile pylint.rc nfvbench test 26 | 27 | [flake8] 28 | # H803 skipped on purpose per list discussion. 29 | # E123, E125 skipped as they are invalid PEP-8. 30 | # E117,E126,E127,E128,E211,E226,E231,E252,E305,E731,W503,W504 skipped as they 31 | # are introduced by newer versions of flake8 (update from 2.2.4 to 3.9.2) 32 | # H216,H301,H401 skipped as they are introduced by newer version of hacking 33 | # (update from 0.10.3 to 4.1.0) 34 | max-line-length = 100 35 | show-source = True 36 | #E117: over-indented (comment) 37 | #E126: continuation line over-indented for hanging indent 38 | #E127: continuation line over-indented for visual indent 39 | #E128: continuation line under-indented for visual indent 40 | #E211: whitespace before '(' 41 | #E226: missing whitespace around arithmetic operator 42 | #E231: missing whitespace after ',' 43 | #E252: missing whitespace around parameter equals 44 | #E302: expected 2 blank linee 45 | #E303: too many blank lines (2) 46 | #E305: expected 2 blank lines after class or function definition, found 1 47 | #E731: do not assign a lambda expression, use a def 48 | #W503: line break before binary operator 49 | #W504: line break after binary operator 50 | #H101: Use TODO(NAME) 51 | #H216: The unittest.mock module should be used rather than the third party mock package unless actually needed. If so, disable the H216 check in hacking config and ensure mock is declared in the project's requirements. 52 | #H233: Python 3.x incompatible use of print operator 53 | #H236: Python 3.x incompatible __metaclass__, use six.add_metaclass() 54 | #H301: one import per line 55 | #H302: import only modules. 56 | #H304: No relative imports 57 | #H306: imports not in alphabetical order 58 | #H401: docstring should not start with a space 59 | #H404: multi line docstring should start without a leading new line 60 | #H405: multi line docstring summary not separated with an empty line 61 | #H904: Wrap long lines in parentheses instead of a backslash 62 | ignore = E117,E123,E125,E126,E127,E128,E211,E226,E231,E252,E302,E303,E305,E731,W503,W504,H101,H104,H216,H233,H236,H301,H302,H304,H306,H401,H404,H405,H803,H904,D102,D100,D107 63 | builtins = _ 64 | exclude=venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,build,nfvbenchvm 65 | 66 | [testenv:docs] 67 | basepython = python3.8 68 | deps = 69 | -chttps://opendev.org/openstack/requirements/raw/branch/stable/yoga/upper-constraints.txt 70 | -rdocs/requirements.txt 71 | skip_install = True 72 | commands = 73 | sphinx-build -b html -n -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/html 74 | echo "Generated docs available in {toxinidir}/docs/_build/html" 75 | whitelist_externals = echo 76 | 77 | [testenv:docs-linkcheck] 78 | basepython = python3.8 79 | deps = 80 | -chttps://opendev.org/openstack/requirements/raw/branch/stable/yoga/upper-constraints.txt 81 | -rdocs/requirements.txt 82 | skip_install = True 83 | passenv = http_proxy https_proxy no_proxy 84 | commands = sphinx-build -b linkcheck -d {envtmpdir}/doctrees ./docs/ {toxinidir}/docs/_build/linkcheck 85 | -------------------------------------------------------------------------------- /xtesting/ansible/host_vars/127.0.0.1: -------------------------------------------------------------------------------- 1 | docker_args: 2 | env: {} 3 | params: 4 | net: host 5 | privileged: true 6 | volumes: 7 | - /lib/modules/$(uname -r):/lib/modules/$(uname -r) 8 | - /usr/src/kernels:/usr/src/kernels -v /dev:/dev 9 | - /home/opnfv/nfvbench/config:/etc/nfvbench 10 | - /home/opnfv/nfvbench/results:/var/lib/xtesting/results 11 | -------------------------------------------------------------------------------- /xtesting/ansible/site.yml: -------------------------------------------------------------------------------- 1 | --- 2 | - hosts: 3 | - 127.0.0.1 4 | roles: 5 | - role: collivier.xtesting 6 | project: nfvbench 7 | gerrit_project: nfvbench 8 | builds: 9 | steps: 10 | - name: build opnfv/nfvbench 11 | containers: 12 | - name: nfvbench 13 | ref_arg: BRANCH 14 | path: docker 15 | suites: 16 | - container: nfvbench 17 | tests: 18 | - 10kpps-pvp-run 19 | - characterization 20 | - non-regression 21 | properties: 22 | execution-type: SEQUENTIALLY -------------------------------------------------------------------------------- /xtesting/testcases.yaml: -------------------------------------------------------------------------------- 1 | --- 2 | tiers: 3 | - 4 | name: nfvbench 5 | order: 1 6 | description: 'Data Plane Performance Testing' 7 | testcases: 8 | - 9 | case_name: characterization 10 | project_name: nfvbench 11 | criteria: 100 12 | blocking: true 13 | clean_flag: false 14 | description: '' 15 | run: 16 | name: 'nfvbench_behaveframework' 17 | args: 18 | suites: 19 | - /opt/nfvbench/behave_tests/features/characterization-full.feature 20 | tags: 21 | - characterization 22 | console: 23 | - true 24 | - 25 | case_name: non-regression 26 | project_name: nfvbench 27 | criteria: 100 28 | blocking: true 29 | clean_flag: false 30 | description: '' 31 | run: 32 | name: 'nfvbench_behaveframework' 33 | args: 34 | suites: 35 | - /opt/nfvbench/behave_tests/features/non-regression.feature 36 | tags: 37 | - non-regression 38 | console: 39 | - true 40 | 41 | - 42 | name: nfvbench-rapid-characterization 43 | order: 2 44 | description: 'Data Plane Performance Testing' 45 | testcases: 46 | - 47 | case_name: rapid-characterization 48 | project_name: nfvbench 49 | criteria: 100 50 | blocking: true 51 | clean_flag: false 52 | description: '' 53 | run: 54 | name: 'nfvbench_behaveframework' 55 | args: 56 | suites: 57 | - /opt/nfvbench/behave_tests/features/characterization-samples.feature 58 | tags: 59 | - characterization 60 | console: 61 | - true 62 | - 63 | name: quick-test-10kpps 64 | order: 3 65 | description: 'Quick nfvbench test at low packet rate' 66 | testcases: 67 | - 68 | case_name: quick-test-10kpps 69 | project_name: nfvbench 70 | criteria: 100 71 | blocking: true 72 | clean_flag: false 73 | description: '' 74 | run: 75 | name: 'nfvbench_behaveframework' 76 | args: 77 | suites: 78 | - /opt/nfvbench/behave_tests/features/quick-test-10kpps.feature 79 | tags: 80 | - quick-test-10kpps 81 | console: 82 | - true 83 | - 84 | name: nfvbench-demo 85 | order: 4 86 | description: 'Data Plane Performance Testing' 87 | testcases: 88 | - 89 | case_name: 10kpps-pvp-run 90 | project_name: nfvbench 91 | criteria: 100 92 | blocking: true 93 | clean_flag: false 94 | description: '' 95 | run: 96 | name: 'bashfeature' 97 | args: 98 | cmd: 99 | - nfvbench -c /etc/nfvbench/nfvbench.cfg --rate 10kpps 100 | console: 101 | - true 102 | --------------------------------------------------------------------------------