├── ovn-tester ├── cms │ ├── __init__.py │ ├── ovn_kubernetes │ │ ├── __init__.py │ │ └── tests │ │ │ ├── netpol_large.py │ │ │ ├── netpol_small.py │ │ │ ├── density_light.py │ │ │ ├── netpol_cross_ns.py │ │ │ ├── netpol.py │ │ │ ├── service_route.py │ │ │ ├── cluster_density.py │ │ │ ├── base_cluster_bringup.py │ │ │ ├── density_heavy.py │ │ │ └── netpol_multitenant.py │ └── openstack │ │ ├── __init__.py │ │ └── tests │ │ └── base_openstack.py ├── TODO ├── requirements.txt ├── ovn_exceptions.py ├── ovn_context.py ├── ovn_ext_cmd.py ├── ovn_stats.py ├── ovn_load_balancer.py ├── ovn_sandbox.py ├── ovn_tester.py └── ovn_workload.py ├── .gitignore ├── logo.png ├── utils ├── requirements.txt ├── logs-checker.sh ├── helpers.py ├── mine-poll-intervals.sh ├── latency.py ├── mine-db-poll-intervals.sh └── process-stats.py ├── pyproject.toml ├── physical-deployments ├── ci.yml └── physical-deployment.yml ├── test-scenarios ├── openstack-low-scale.yml ├── openstack-20-projects-10-vms.yml ├── ocp-120-density-heavy.yml ├── ocp-120-density-light.yml ├── ocp-180-density-heavy.yml ├── ocp-180-density-light.yml ├── ocp-20-cluster-density.yml ├── ocp-20-density-heavy.yml ├── ocp-20-density-light.yml ├── ocp-250-density-heavy.yml ├── ocp-250-density-light.yml ├── ocp-500-density-heavy.yml ├── ocp-60-cluster-density.yml ├── ocp-60-density-heavy.yml ├── ocp-60-density-light.yml ├── ocp-120-cluster-density.yml ├── ocp-180-cluster-density.yml ├── ocp-250-cluster-density.yml ├── ocp-500-cluster-density.yml ├── ocp-500-density-light.yml ├── ocp-20-cluster-density-netdev.yml ├── ocp-120-cluster-density-netdev.yml ├── ocp-20-np-multitenant.yml ├── ocp-60-np-multitenant.yml ├── ocp-120-np-multitenant.yml ├── ocp-180-np-multitenant.yml ├── ocp-250-np-multitenant.yml ├── ocp-500-np-multitenant.yml ├── ovn-20.yml ├── ovn-120.yml ├── ovn-500.yml ├── ovn-etcd-low-scale.yml ├── ovn-low-scale-ic.yml └── ovn-low-scale.yml ├── .github ├── pull_request_template.md └── workflows │ └── test.yml ├── ovn-fake-multinode-utils ├── playbooks │ ├── pull-ovn-tester.yml │ ├── pull-fake-multinode.yml │ ├── install-fake-multinode.yml │ ├── collect-logs.yml │ ├── configure-tester.yml │ ├── install-dependencies.yml │ ├── deploy-minimal.yml │ └── bringup-cluster.yml ├── scripts │ ├── perf.sh │ └── log-collector.sh ├── get-config-value.py ├── process-monitor.py ├── generate-hosts.py └── translate_yaml.py ├── Dockerfile ├── .cirrus.yml ├── CONTRIBUTING.md ├── LICENSE ├── README.md ├── do.sh └── copyright /ovn-tester/cms/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | /runtime 2 | /test_results 3 | __pycache__ 4 | -------------------------------------------------------------------------------- /logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/ovn-org/ovn-heater/HEAD/logo.png -------------------------------------------------------------------------------- /utils/requirements.txt: -------------------------------------------------------------------------------- 1 | numpy 2 | pandas 3 | plotly 4 | netaddr 5 | pyyaml 6 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/__init__.py: -------------------------------------------------------------------------------- 1 | from .ovn_kubernetes import * # noqa 2 | -------------------------------------------------------------------------------- /pyproject.toml: -------------------------------------------------------------------------------- 1 | [tool.black] 2 | line-length = 79 3 | skip-string-normalization = 1 4 | -------------------------------------------------------------------------------- /ovn-tester/TODO: -------------------------------------------------------------------------------- 1 | - Properly implement ovn_exceptions.py 2 | - Improve configuration parsing in ovn_tester.py. 3 | - Check and import licenses from ovn-scale-test repo. 4 | -------------------------------------------------------------------------------- /ovn-tester/requirements.txt: -------------------------------------------------------------------------------- 1 | netaddr 2 | numpy 3 | paramiko 4 | pandas 5 | plotly 6 | pyyaml 7 | randmac 8 | git+https://github.com/openstack/ovsdbapp@master#egg=ovsdbapp 9 | -------------------------------------------------------------------------------- /physical-deployments/ci.yml: -------------------------------------------------------------------------------- 1 | internal-iface: lo 2 | 3 | central-nodes: 4 | - 5 | 6 | tester-node: 7 | name: 8 | ssh_key: /root/.ssh/id_rsa 9 | 10 | worker-nodes: 11 | - 12 | -------------------------------------------------------------------------------- /ovn-tester/cms/openstack/__init__.py: -------------------------------------------------------------------------------- 1 | from .openstack import ( 2 | OpenStackCloud, 3 | OVN_HEATER_CMS_PLUGIN, 4 | ExternalNetworkSpec, 5 | ) 6 | 7 | __all__ = [ 8 | OpenStackCloud, 9 | OVN_HEATER_CMS_PLUGIN, 10 | ExternalNetworkSpec, 11 | ] 12 | -------------------------------------------------------------------------------- /physical-deployments/physical-deployment.yml: -------------------------------------------------------------------------------- 1 | internal-iface: eno1 2 | 3 | central-nodes: 4 | - host02.mydomain.com 5 | 6 | tester-node: 7 | name: host02.mydoman.com 8 | ssh_key: /path/to/ssh_key 9 | 10 | worker-nodes: 11 | - host03.mydomain.com 12 | - host04.mydomain.com 13 | -------------------------------------------------------------------------------- /test-scenarios/openstack-low-scale.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: false 3 | cms_name: openstack 4 | 5 | cluster: 6 | clustered_db: true 7 | log_txns_db: true 8 | n_workers: 3 9 | 10 | base_openstack: 11 | n_projects: 2 12 | n_chassis_per_gw_lrp: 3 13 | n_vms_per_project: 3 14 | -------------------------------------------------------------------------------- /test-scenarios/openstack-20-projects-10-vms.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: false 3 | cms_name: openstack 4 | 5 | cluster: 6 | clustered_db: true 7 | log_txns_db: true 8 | n_workers: 3 9 | 10 | base_openstack: 11 | n_projects: 20 12 | n_chassis_per_gw_lrp: 3 13 | n_vms_per_project: 10 14 | -------------------------------------------------------------------------------- /test-scenarios/ocp-120-density-heavy.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 120 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_heavy: 14 | n_startup: 5150 15 | n_pods: 5400 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-120-density-light.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 120 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_startup: 27750 15 | n_pods: 29400 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-180-density-heavy.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 180 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_heavy: 14 | n_startup: 7850 15 | n_pods: 8100 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-180-density-light.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 180 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_startup: 42850 15 | n_pods: 44100 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-20-cluster-density.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 20 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | cluster_density: 14 | n_startup: 400 15 | n_runs: 500 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-20-density-heavy.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 20 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_heavy: 14 | n_startup: 4000 15 | n_pods: 5000 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-20-density-light.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 20 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_startup: 4000 15 | n_pods: 5000 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-250-density-heavy.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 250 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_heavy: 14 | n_startup: 11000 15 | n_pods: 11250 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-250-density-light.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 250 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_startup: 60000 15 | n_pods: 61250 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-500-density-heavy.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 500 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_heavy: 14 | n_startup: 22250 15 | n_pods: 22500 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-60-cluster-density.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 60 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | cluster_density: 14 | n_startup: 300 15 | n_runs: 500 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-60-density-heavy.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 60 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_heavy: 14 | n_startup: 2450 15 | n_pods: 2700 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-60-density-light.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 60 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_startup: 13450 15 | n_pods: 14700 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-120-cluster-density.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 120 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | cluster_density: 14 | n_startup: 800 15 | n_runs: 1000 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-180-cluster-density.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 180 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | cluster_density: 14 | n_startup: 2800 15 | n_runs: 3000 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-250-cluster-density.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 250 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | cluster_density: 14 | n_startup: 3800 15 | n_runs: 4000 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-500-cluster-density.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 500 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | cluster_density: 14 | n_startup: 7800 15 | n_runs: 8000 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-500-density-light.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 500 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_startup: 121250 15 | n_pods: 122500 16 | -------------------------------------------------------------------------------- /test-scenarios/ocp-20-cluster-density-netdev.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | datapath_type: netdev 8 | monitor_all: true 9 | n_workers: 20 10 | 11 | base_cluster_bringup: 12 | n_pods_per_node: 10 13 | 14 | cluster_density: 15 | n_startup: 400 16 | n_runs: 500 17 | -------------------------------------------------------------------------------- /test-scenarios/ocp-120-cluster-density-netdev.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | datapath_type: netdev 8 | monitor_all: true 9 | n_workers: 120 10 | 11 | base_cluster_bringup: 12 | n_pods_per_node: 10 13 | 14 | cluster_density: 15 | n_startup: 800 16 | n_runs: 1000 17 | -------------------------------------------------------------------------------- /ovn-tester/ovn_exceptions.py: -------------------------------------------------------------------------------- 1 | class OvnTestException(Exception): 2 | pass 3 | 4 | 5 | class OvnInvalidConfigException(OvnTestException): 6 | pass 7 | 8 | 9 | class OvnPingTimeoutException(OvnTestException): 10 | pass 11 | 12 | 13 | class OvnChassisTimeoutException(OvnTestException): 14 | pass 15 | 16 | 17 | class SSHError(OvnTestException): 18 | pass 19 | -------------------------------------------------------------------------------- /.github/pull_request_template.md: -------------------------------------------------------------------------------- 1 | 10 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/netpol_large.py: -------------------------------------------------------------------------------- 1 | from cms.ovn_kubernetes.tests.netpol import NetPol 2 | 3 | 4 | class NetpolLarge(NetPol): 5 | def __init__(self, config, clusters, global_cfg): 6 | super().__init__('netpol_large', config, clusters) 7 | 8 | def run(self, clusters, global_cfg): 9 | self.init(clusters, global_cfg) 10 | super().run(clusters, global_cfg) 11 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/netpol_small.py: -------------------------------------------------------------------------------- 1 | from cms.ovn_kubernetes.tests.netpol import NetPol 2 | 3 | 4 | class NetpolSmall(NetPol): 5 | def __init__(self, config, clusters, global_cfg): 6 | super().__init__('netpol_small', config, clusters) 7 | 8 | def run(self, clusters, global_cfg): 9 | self.init(clusters, global_cfg) 10 | super().run(clusters, global_cfg, True) 11 | -------------------------------------------------------------------------------- /utils/logs-checker.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit -o pipefail 4 | 5 | if grep -B 30 -A 30 "Result: FAIL" ./test_results/*/test-log \ 6 | || grep -B 30 -A 30 "Traceback" ./test_results/*/test-log \ 7 | || grep "Failed to run test" ./test_results/*/test-log; then 8 | exit 1 9 | fi 10 | 11 | if ! grep "Result: SUCCESS" ./test_results/*/test-log; then 12 | exit 1 13 | fi 14 | 15 | exit 0 16 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/pull-ovn-tester.yml: -------------------------------------------------------------------------------- 1 | - name: Install ovn-tester 2 | hosts: tester_hosts 3 | tasks: 4 | - name: Copy ovn-tester image 5 | ansible.posix.synchronize: 6 | src: "{{ rundir }}/ovn-tester-image.tar" 7 | dest: "{{ ovn_fake_multinode_target_path }}/ovn-tester-image.tar" 8 | 9 | - name: Load latest containers 10 | ansible.builtin.shell: | 11 | podman load -i {{ ovn_fake_multinode_target_path }}/ovn-tester-image.tar 12 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM ovn/ovn-multi-node 2 | 3 | ARG SSH_KEY 4 | 5 | COPY ovn-tester /ovn-tester 6 | 7 | RUN mkdir -p /root/.ssh/ 8 | COPY $SSH_KEY /root/.ssh/ 9 | 10 | COPY ovn-fake-multinode-utils/process-monitor.py /tmp/ 11 | 12 | # This variable is needed on systems where global python's 13 | # environment is marked as "Externally managed" (PEP 668) to allow pip 14 | # installation of "global" packages. 15 | ENV PIP_BREAK_SYSTEM_PACKAGES=1 16 | RUN pip3 install -r /ovn-tester/requirements.txt 17 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/pull-fake-multinode.yml: -------------------------------------------------------------------------------- 1 | - name: Install ovn-fake-multinode 2 | hosts: all 3 | tasks: 4 | - name: Copy ovn-multi-node image 5 | ansible.posix.synchronize: 6 | src: "{{ rundir }}/ovn-fake-multinode/ovn-multi-node-image.tar" 7 | dest: "{{ ovn_fake_multinode_path }}/ovn-multi-node-image.tar" 8 | 9 | - name: Load latest containers 10 | ansible.builtin.shell: | 11 | podman load -i {{ ovn_fake_multinode_path }}/ovn-multi-node-image.tar 12 | -------------------------------------------------------------------------------- /test-scenarios/ocp-20-np-multitenant.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 20 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | netpol_multitenant: 14 | n_namespaces: 500 15 | ranges: 16 | - r1: 17 | start: 200 18 | n_pods: 5 19 | - r2: 20 | start: 480 21 | n_pods: 20 22 | - r3: 23 | start: 495 24 | n_pods: 100 25 | -------------------------------------------------------------------------------- /test-scenarios/ocp-60-np-multitenant.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 60 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | netpol_multitenant: 14 | n_namespaces: 500 15 | ranges: 16 | - r1: 17 | start: 200 18 | n_pods: 5 19 | - r2: 20 | start: 480 21 | n_pods: 20 22 | - r3: 23 | start: 495 24 | n_pods: 100 25 | -------------------------------------------------------------------------------- /test-scenarios/ocp-120-np-multitenant.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 120 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | netpol_multitenant: 14 | n_namespaces: 500 15 | ranges: 16 | - r1: 17 | start: 200 18 | n_pods: 5 19 | - r2: 20 | start: 480 21 | n_pods: 20 22 | - r3: 23 | start: 495 24 | n_pods: 100 25 | -------------------------------------------------------------------------------- /test-scenarios/ocp-180-np-multitenant.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 180 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | netpol_multitenant: 14 | n_namespaces: 500 15 | ranges: 16 | - r1: 17 | start: 200 18 | n_pods: 5 19 | - r2: 20 | start: 480 21 | n_pods: 20 22 | - r3: 23 | start: 495 24 | n_pods: 100 25 | -------------------------------------------------------------------------------- /test-scenarios/ocp-250-np-multitenant.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 250 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | netpol_multitenant: 14 | n_namespaces: 500 15 | ranges: 16 | - r1: 17 | start: 200 18 | n_pods: 5 19 | - r2: 20 | start: 480 21 | n_pods: 20 22 | - r3: 23 | start: 495 24 | n_pods: 100 25 | -------------------------------------------------------------------------------- /test-scenarios/ocp-500-np-multitenant.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 500 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | netpol_multitenant: 14 | n_namespaces: 500 15 | ranges: 16 | - r1: 17 | start: 200 18 | n_pods: 5 19 | - r2: 20 | start: 480 21 | n_pods: 20 22 | - r3: 23 | start: 495 24 | n_pods: 100 25 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/install-fake-multinode.yml: -------------------------------------------------------------------------------- 1 | - name: Install ovn-fake-multinode 2 | hosts: all 3 | tasks: 4 | - name: Ensure ovn-fake-multinode remote dirs 5 | file: 6 | path: "{{ ovn_fake_multinode_target_path }}" 7 | state: directory 8 | 9 | - name: Sync ovn-fake-multinode 10 | ansible.posix.synchronize: 11 | src: "{{ ovn_fake_multinode_local_path }}" 12 | dest: "{{ ovn_fake_multinode_target_path }}" 13 | delete: true 14 | recursive: true 15 | -------------------------------------------------------------------------------- /test-scenarios/ovn-20.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 20 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_pods: 10 15 | 16 | density_heavy: 17 | n_pods: 10 18 | pods_vip_ratio: 2 19 | 20 | cluster_density: 21 | n_runs: 10 22 | 23 | netpol_multitenant: 24 | n_namespaces: 500 25 | ranges: 26 | - r1: 27 | start: 200 28 | n_pods: 5 29 | - r2: 30 | start: 480 31 | n_pods: 20 32 | - r3: 33 | start: 495 34 | n_pods: 100 35 | -------------------------------------------------------------------------------- /test-scenarios/ovn-120.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 120 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_pods: 20 15 | 16 | density_heavy: 17 | n_pods: 20 18 | pods_vip_ratio: 2 19 | 20 | cluster_density: 21 | n_runs: 10 22 | 23 | netpol_multitenant: 24 | n_namespaces: 500 25 | ranges: 26 | - r1: 27 | start: 200 28 | n_pods: 5 29 | - r2: 30 | start: 480 31 | n_pods: 20 32 | - r3: 33 | start: 495 34 | n_pods: 100 35 | -------------------------------------------------------------------------------- /test-scenarios/ovn-500.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: true 7 | monitor_all: true 8 | n_workers: 500 9 | 10 | base_cluster_bringup: 11 | n_pods_per_node: 10 12 | 13 | density_light: 14 | n_pods: 20 15 | 16 | density_heavy: 17 | n_pods: 20 18 | pods_vip_ratio: 2 19 | 20 | cluster_density: 21 | n_runs: 10 22 | 23 | netpol_multitenant: 24 | n_namespaces: 500 25 | ranges: 26 | - r1: 27 | start: 200 28 | n_pods: 5 29 | - r2: 30 | start: 480 31 | n_pods: 20 32 | - r3: 33 | start: 495 34 | n_pods: 100 35 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/collect-logs.yml: -------------------------------------------------------------------------------- 1 | - name: Collect remote logs 2 | hosts: all 3 | tasks: 4 | - name: Collect logs from containers 5 | shell: | 6 | /tmp/log-collector.sh {{ inventory_hostname }} {{ node_name }} 7 | - name: Collect perf results 8 | shell: | 9 | /tmp/perf.sh {{ inventory_hostname }} {{ node_name }} 10 | - name: Retrieve logs 11 | fetch: 12 | src: "/tmp/{{ inventory_hostname }}.tgz" 13 | dest: "{{ results_dir }}/" 14 | flat: yes 15 | - name: Retrieve perf logs 16 | fetch: 17 | src: "/tmp/{{ inventory_hostname }}-perf.tgz" 18 | dest: "{{ results_dir }}/" 19 | flat: yes 20 | -------------------------------------------------------------------------------- /test-scenarios/ovn-etcd-low-scale.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | cms_name: ovn_kubernetes 4 | 5 | cluster: 6 | clustered_db: False 7 | enable_ssl: False 8 | monitor_all: true 9 | use_ovsdb_etcd: true 10 | n_workers: 2 11 | 12 | base_cluster_bringup: 13 | n_pods_per_node: 2 14 | 15 | density_light: 16 | n_pods: 10 17 | 18 | density_heavy: 19 | n_pods: 10 20 | pods_vip_ratio: 2 21 | 22 | cluster_density: 23 | n_runs: 10 24 | 25 | netpol_multitenant: 26 | n_namespaces: 5 27 | ranges: 28 | - r1: 29 | start: 2 30 | n_pods: 2 31 | - r2: 32 | start: 3 33 | n_pods: 5 34 | - r3: 35 | start: 4 36 | n_pods: 10 37 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/configure-tester.yml: -------------------------------------------------------------------------------- 1 | - name: Configure the tester 2 | hosts: tester_hosts 3 | tasks: 4 | - name: Copy physical deployment file to tester host 5 | ansible.builtin.copy: 6 | src: "{{ phys_deployment }}" 7 | dest: /tmp/physical-deployment.yml 8 | 9 | - name: Copy physical deployment file to tester container 10 | ansible.builtin.shell: | 11 | podman cp /tmp/physical-deployment.yml ovn-tester:/physical-deployment.yml 12 | 13 | - name: Copy test file to tester host 14 | ansible.builtin.copy: 15 | src: "{{ test_file }}" 16 | dest: /tmp/test-scenario.yml 17 | 18 | - name: Copy test file to the tester container 19 | ansible.builtin.shell: | 20 | podman cp /tmp/test-scenario.yml ovn-tester:/test-scenario.yml 21 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/scripts/perf.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host=$1 4 | node_name=$2 5 | 6 | function collect_flamegraph_data() { 7 | c=$1 8 | mkdir ${host}-perf/$c 9 | pid=$(podman exec $c /bin/sh -c "pidof -s perf") 10 | podman exec $c /bin/sh -c "kill $pid && tail --pid=$pid -f /dev/null" 11 | podman exec $c /bin/sh -c "perf script report flamegraph -o /tmp/ovn-flamegraph.html" 12 | podman cp $c:/tmp/ovn-flamegraph.html ${host}-perf/$c/ 13 | } 14 | 15 | mkdir /tmp/${host}-perf 16 | 17 | pushd /tmp 18 | for c in $(podman ps --format "{{.Names}}" --filter "name=${node_name}"); do 19 | collect_flamegraph_data $c 20 | done 21 | 22 | for c in $(podman ps --format "{{.Names}}" --filter "name=ovn-central"); do 23 | collect_flamegraph_data $c 24 | done 25 | 26 | tar cvfz ${host}-perf.tgz ${host}-perf 27 | rm -rf ${host}-perf 28 | popd 29 | -------------------------------------------------------------------------------- /utils/helpers.py: -------------------------------------------------------------------------------- 1 | try: 2 | from collections.abc import Mapping 3 | except ImportError: 4 | from collections import Mapping 5 | 6 | import os 7 | from typing import Dict, Tuple 8 | 9 | 10 | def get_node_config(config: Dict) -> Tuple[str, Dict]: 11 | mappings: Dict = {} 12 | if isinstance(config, Mapping): 13 | host = list(config.keys())[0] 14 | if config[host]: 15 | mappings = config[host] 16 | else: 17 | host = config 18 | return host, mappings 19 | 20 | 21 | def get_prefix_suffix(hosts: str) -> Tuple[str, str]: 22 | prefix = os.path.commonprefix(hosts) 23 | rev = [x[::-1] for x in hosts] 24 | suffix = os.path.commonprefix(rev)[::-1] 25 | return prefix, suffix 26 | 27 | 28 | def get_shortname(host: str, prefix: str, suffix: str) -> str: 29 | return host[len(prefix) : len(host) - len(suffix)] 30 | -------------------------------------------------------------------------------- /.github/workflows/test.yml: -------------------------------------------------------------------------------- 1 | name: Check Style 2 | 3 | on: 4 | push: 5 | branches: [ main ] 6 | pull_request: 7 | branches: [ main ] 8 | 9 | jobs: 10 | lint: 11 | 12 | runs-on: ubuntu-latest 13 | strategy: 14 | matrix: 15 | python-version: ['3.8', '3.9', '3.10', '3.11', '3.12', '3.13'] 16 | 17 | steps: 18 | - uses: actions/checkout@v4 19 | - name: Set up Python ${{ matrix.python-version }} 20 | uses: actions/setup-python@v5 21 | with: 22 | python-version: ${{ matrix.python-version }} 23 | - name: Install dependencies 24 | run: | 25 | python -m pip install --upgrade pip 26 | python -m pip install flake8 black==22.12.0 27 | # Ignore E203, W503 and W504 which is against PEP 8 style 28 | - name: flake8 29 | run: | 30 | flake8 --ignore=E203,W504,W503 . 31 | - name: Black 32 | run: | 33 | black --check --diff . 34 | -------------------------------------------------------------------------------- /utils/mine-poll-intervals.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function mine_data_for_poll_intervals { 4 | res=$(echo "$1" \ 5 | | grep 'Unreasonably' | cut -c 45- \ 6 | | sed 's/.*Unreasonably long \([0-9]*\)ms .*/\1/' | sort -h \ 7 | | datamash count 1 min 1 max 1 median 1 mean 1 perc 1 \ 8 | -t ' ' --format="%10.1f" ) 9 | if [ -z "$res" ]; then 10 | printf "%10.1f\n" 0 11 | else 12 | echo "$res" 13 | fi 14 | } 15 | 16 | n_files=$# 17 | echo 18 | echo "Unreasonably long poll intervals:" 19 | echo 20 | echo "---------------------------------------------------------------------" 21 | echo " Count Min Max Median Mean 95 percentile" 22 | echo "---------------------------------------------------------------------" 23 | 24 | for file in "$@"; do 25 | mine_data_for_poll_intervals "$(cat $file)" 26 | done 27 | 28 | if test ${n_files} -gt 1; then 29 | echo "---------------------------------------------------------------------" 30 | mine_data_for_poll_intervals "$(cat $@)" 31 | fi 32 | echo 33 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/install-dependencies.yml: -------------------------------------------------------------------------------- 1 | - name: Install dependencies 2 | hosts: all 3 | tasks: 4 | - name: Install required packages 5 | ansible.builtin.package: 6 | name: 7 | - git 8 | - gcc 9 | - python3-pyyaml 10 | - python3-devel 11 | - containers-common 12 | state: present 13 | when: ansible_os_family == "RedHat" 14 | 15 | # 'skip-broken' might not work with specific ansible versions 16 | # so ignore_errors for packages that might not be there. 17 | # https://github.com/ansible/ansible/commit/6bcb494f8306 18 | - name: Install optional packages 19 | ansible.builtin.package: 20 | name: 21 | - openvswitch 22 | state: present 23 | ignore_errors: true 24 | when: ansible_os_family == "RedHat" 25 | 26 | - name: Install required packages 27 | ansible.builtin.apt: 28 | name: 29 | - git 30 | - gcc 31 | - openvswitch-switch 32 | - python3-yaml 33 | - python3-all-dev 34 | update_cache: true 35 | state: present 36 | when: ansible_os_family == "Debian" 37 | 38 | - name: Install container command 39 | ansible.builtin.package: 40 | name: 41 | - podman 42 | state: present 43 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/get-config-value.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | import argparse 3 | import yaml 4 | 5 | 6 | def parser_setup(parser: argparse.ArgumentParser) -> None: 7 | group = parser.add_argument_group() 8 | group.add_argument( 9 | "config", 10 | metavar="CONFIG_FILE", 11 | help="Configuration file", 12 | ) 13 | group.add_argument( 14 | "section", 15 | metavar="SECTION", 16 | help="Configuration file section", 17 | ) 18 | group.add_argument( 19 | "variable", metavar="VAR", help="Configuration variable to retrieve" 20 | ) 21 | group.add_argument( 22 | "--default", help="Default value if VAR is not in CONFIG" 23 | ) 24 | 25 | 26 | def get_config_value(args: argparse.Namespace) -> str: 27 | with open(args.config, 'r') as config_file: 28 | parsed = yaml.safe_load(config_file) 29 | 30 | try: 31 | return parsed[args.section][args.variable] 32 | except KeyError: 33 | if args.default: 34 | return args.default 35 | raise 36 | 37 | 38 | def main(): 39 | parser = argparse.ArgumentParser(description="Read YAML config value") 40 | parser_setup(parser) 41 | args = parser.parse_args() 42 | val = get_config_value(args) 43 | print(val, end='') 44 | 45 | 46 | if __name__ == "__main__": 47 | main() 48 | -------------------------------------------------------------------------------- /test-scenarios/ovn-low-scale-ic.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | run_ipv4: True 4 | run_ipv6: True 5 | cms_name: ovn_kubernetes 6 | 7 | cluster: 8 | clustered_db: true 9 | monitor_all: true 10 | n_az: 2 11 | n_workers: 4 12 | n_relays: 2 13 | 14 | ext_cmd: 15 | - c0: 16 | iteration: 1 17 | node: "ovn-scale-*" 18 | cmd: "perf record -a -g -q -F 99 -o perf.data" 19 | pid_name: "ovn-controller" 20 | pid_opt: "-p" 21 | background_opt: True 22 | test: "density_light" 23 | - c1: 24 | iteration: 9 25 | node: "ovn-scale-*" 26 | cmd: "kill" 27 | pid_name: "perf" 28 | test: "density_light" 29 | - c2: 30 | iteration: 1 31 | node: "ovn-central*" 32 | cmd: "perf record -a -g -q -F 99 -o perf.data" 33 | pid_name: "ovn-northd" 34 | pid_opt: "-p" 35 | background_opt: True 36 | test: "density_light" 37 | - c3: 38 | iteration: 9 39 | node: "ovn-central*" 40 | cmd: "kill" 41 | pid_name: "perf" 42 | test: "density_light" 43 | 44 | base_cluster_bringup: 45 | n_pods_per_node: 2 46 | 47 | density_light: 48 | n_pods: 10 49 | 50 | density_heavy: 51 | n_startup: 10 52 | pods_vip_ratio: 2 53 | n_pods: 20 54 | 55 | cluster_density: 56 | n_startup: 6 57 | n_runs: 10 58 | 59 | netpol_multitenant: 60 | n_namespaces: 5 61 | ranges: 62 | - r1: 63 | start: 2 64 | n_pods: 2 65 | - r2: 66 | start: 3 67 | n_pods: 5 68 | - r3: 69 | start: 4 70 | n_pods: 10 71 | 72 | netpol_cross_ns: 73 | n_ns: 10 74 | pods_ns_ratio: 5 75 | 76 | netpol_small: 77 | n_ns: 2 78 | pods_ns_ratio: 16 79 | n_labels: 4 80 | 81 | netpol_large: 82 | n_ns: 2 83 | pods_ns_ratio: 16 84 | n_labels: 4 85 | 86 | service_route: 87 | n_lb: 16 88 | n_backends: 4 89 | -------------------------------------------------------------------------------- /test-scenarios/ovn-low-scale.yml: -------------------------------------------------------------------------------- 1 | global: 2 | log_cmds: False 3 | run_ipv4: True 4 | run_ipv6: True 5 | cms_name: ovn_kubernetes 6 | 7 | cluster: 8 | clustered_db: true 9 | log_txns_db: true 10 | monitor_all: true 11 | n_workers: 2 12 | n_relays: 3 13 | 14 | ext_cmd: 15 | - c0: 16 | iteration: 1 17 | node: "ovn-scale-*" 18 | cmd: "perf record -a -g -q -F 99 -o perf.data" 19 | pid_name: "ovn-controller" 20 | pid_opt: "-p" 21 | background_opt: True 22 | test: "density_light" 23 | - c1: 24 | iteration: 9 25 | node: "ovn-scale-*" 26 | cmd: "kill" 27 | pid_name: "perf" 28 | test: "density_light" 29 | - c2: 30 | iteration: 1 31 | node: "ovn-central*" 32 | cmd: "perf record -a -g -q -F 99 -o perf.data" 33 | pid_name: "ovn-northd" 34 | pid_opt: "-p" 35 | background_opt: True 36 | test: "density_light" 37 | - c3: 38 | iteration: 9 39 | node: "ovn-central*" 40 | cmd: "kill" 41 | pid_name: "perf" 42 | test: "density_light" 43 | 44 | base_cluster_bringup: 45 | n_pods_per_node: 2 46 | 47 | density_light: 48 | n_pods: 10 49 | 50 | density_heavy: 51 | n_startup: 10 52 | pods_vip_ratio: 2 53 | n_pods: 20 54 | 55 | cluster_density: 56 | n_startup: 6 57 | n_runs: 10 58 | 59 | netpol_multitenant: 60 | n_namespaces: 5 61 | ranges: 62 | - r1: 63 | start: 2 64 | n_pods: 2 65 | - r2: 66 | start: 3 67 | n_pods: 5 68 | - r3: 69 | start: 4 70 | n_pods: 10 71 | 72 | netpol_cross_ns: 73 | n_ns: 10 74 | pods_ns_ratio: 5 75 | 76 | netpol_small: 77 | n_ns: 2 78 | pods_ns_ratio: 16 79 | n_labels: 4 80 | 81 | netpol_large: 82 | n_ns: 2 83 | pods_ns_ratio: 16 84 | n_labels: 4 85 | 86 | service_route: 87 | n_lb: 16 88 | n_backends: 4 89 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/density_light.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from ovn_context import Context 3 | from cms.ovn_kubernetes import Namespace 4 | from ovn_ext_cmd import ExtCmd 5 | from ovn_utils import distribute_n_tasks_per_clusters 6 | 7 | 8 | DensityCfg = namedtuple( 9 | 'DensityCfg', ['n_pods', 'n_startup', 'pods_vip_ratio'] 10 | ) 11 | 12 | 13 | class DensityLight(ExtCmd): 14 | def __init__(self, config, clusters, global_cfg): 15 | super().__init__(config, clusters) 16 | test_config = config.get('density_light', dict()) 17 | self.config = DensityCfg( 18 | n_pods=test_config.get('n_pods', 0), 19 | n_startup=test_config.get('n_startup', 0), 20 | pods_vip_ratio=0, 21 | ) 22 | 23 | def run(self, clusters, global_cfg): 24 | ns = Namespace(clusters, 'ns_density_light', global_cfg) 25 | n_startup_per_cluster = distribute_n_tasks_per_clusters( 26 | self.config.n_startup, len(clusters) 27 | ) 28 | 29 | with Context( 30 | clusters, 'density_light_startup', len(clusters), brief_report=True 31 | ) as ctx: 32 | for i in ctx: 33 | ports = clusters[i].provision_ports( 34 | n_startup_per_cluster[i], passive=True 35 | ) 36 | ns.add_ports(ports, i) 37 | 38 | n_iterations = self.config.n_pods - self.config.n_startup 39 | with Context( 40 | clusters, 'density_light', n_iterations, test=self 41 | ) as ctx: 42 | for i in ctx: 43 | ovn = clusters[i % len(clusters)] 44 | ports = ovn.provision_ports(1) 45 | ns.add_ports(ports[0:1], i % len(clusters)) 46 | ovn.ping_ports(ports) 47 | 48 | if not global_cfg.cleanup: 49 | return 50 | with Context( 51 | clusters, 'density_light_cleanup', brief_report=True 52 | ) as ctx: 53 | ns.unprovision() 54 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/netpol_cross_ns.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from ovn_context import Context 3 | from cms.ovn_kubernetes import Namespace 4 | from ovn_ext_cmd import ExtCmd 5 | 6 | NpCrossNsCfg = namedtuple('NpCrossNsCfg', ['n_ns', 'pods_ns_ratio']) 7 | 8 | 9 | class NetpolCrossNs(ExtCmd): 10 | def __init__(self, config, clusters, global_cfg): 11 | super().__init__(config, clusters) 12 | test_config = config.get('netpol_cross', dict()) 13 | self.config = NpCrossNsCfg( 14 | n_ns=test_config.get('n_ns', 0), 15 | pods_ns_ratio=test_config.get('pods_ns_ratio', 0), 16 | ) 17 | 18 | def run(self, clusters, global_cfg): 19 | all_ns = [] 20 | 21 | with Context( 22 | clusters, 'netpol_cross_ns_startup', brief_report=True 23 | ) as ctx: 24 | for i in range(self.config.n_ns): 25 | az_index = i % len(clusters) 26 | ovn = clusters[az_index] 27 | ports = ovn.provision_ports(self.config.pods_ns_ratio) 28 | ns = Namespace( 29 | clusters, 30 | f'NS_netpol_cross_ns_startup_{i}', 31 | global_cfg, 32 | az_index, 33 | ) 34 | ns.add_ports(ports[0 : self.config.pods_ns_ratio], az_index) 35 | if global_cfg.run_ipv4: 36 | ns.default_deny(4, az_index) 37 | if global_cfg.run_ipv6: 38 | ns.default_deny(6, az_index) 39 | all_ns.append(ns) 40 | 41 | with Context( 42 | clusters, 'netpol_cross_ns', self.config.n_ns, test=self 43 | ) as ctx: 44 | for i in ctx: 45 | ns = all_ns[i] 46 | ext_ns = all_ns[(i + 1) % self.config.n_ns] 47 | if global_cfg.run_ipv4: 48 | ns.allow_cross_namespace(ext_ns, 4) 49 | if global_cfg.run_ipv6: 50 | ns.allow_cross_namespace(ext_ns, 6) 51 | ns.check_enforcing_cross_ns(ext_ns) 52 | 53 | if not global_cfg.cleanup: 54 | return 55 | with Context( 56 | clusters, 'netpol_cross_ns_cleanup', brief_report=True 57 | ) as ctx: 58 | for ns in all_ns: 59 | ns.unprovision() 60 | -------------------------------------------------------------------------------- /ovn-tester/ovn_context.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import ovn_stats 3 | import time 4 | 5 | from typing import List 6 | 7 | log = logging.getLogger(__name__) 8 | 9 | active_context = None 10 | 11 | 12 | ITERATION_STAT_NAME = 'Iteration Total' 13 | 14 | 15 | class Context: 16 | def __init__( 17 | self, 18 | clusters: List, 19 | test_name: str, 20 | max_iterations: int = 1, 21 | brief_report: bool = False, 22 | test=None, 23 | ): 24 | self.iteration = -1 25 | self.test_name = test_name 26 | self.max_iterations = max_iterations 27 | self.brief_report = brief_report 28 | self.iteration_start = None 29 | self.failed = False 30 | self.test = test 31 | self.clusters = clusters 32 | 33 | def __enter__(self): 34 | global active_context 35 | log.info(f'Entering context {self.test_name}') 36 | ovn_stats.clear() 37 | active_context = self 38 | return self 39 | 40 | def __exit__(self, type, value, traceback): 41 | log.info('Waiting for the OVN state synchronization') 42 | for cluster in self.clusters: 43 | cluster.nbctl.sync(timeout=1800) 44 | ovn_stats.report(self.test_name, brief=self.brief_report) 45 | log.info(f'Exiting context {self.test_name}') 46 | 47 | def __iter__(self): 48 | return self 49 | 50 | def __next__(self): 51 | now = time.perf_counter() 52 | if self.iteration_start: 53 | duration = now - self.iteration_start 54 | ovn_stats.add(ITERATION_STAT_NAME, duration, failed=self.failed) 55 | log.log( 56 | logging.WARNING if self.failed else logging.INFO, 57 | f'Context {self.test_name}, Iteration {self.iteration}, ' 58 | f'Result: {"FAILURE" if self.failed else "SUCCESS"}', 59 | ) 60 | self.failed = False 61 | if self.test: 62 | # exec external cmd 63 | self.test.exec_cmd(self.iteration, self.test_name) 64 | self.iteration_start = now 65 | if self.iteration < self.max_iterations - 1: 66 | self.iteration += 1 67 | log.info(f'Context {self.test_name}, Iteration {self.iteration}') 68 | return self.iteration 69 | raise StopIteration 70 | 71 | def fail(self): 72 | self.failed = True 73 | -------------------------------------------------------------------------------- /ovn-tester/cms/openstack/tests/base_openstack.py: -------------------------------------------------------------------------------- 1 | import logging 2 | 3 | from dataclasses import dataclass 4 | from typing import List 5 | 6 | from ovn_ext_cmd import ExtCmd 7 | from ovn_context import Context 8 | from ovn_workload import ChassisNode 9 | 10 | from cms.openstack import OpenStackCloud, ExternalNetworkSpec 11 | 12 | log = logging.getLogger(__name__) 13 | 14 | 15 | @dataclass 16 | class BaseOpenstackConfig: 17 | n_projects: int = 1 18 | n_chassis_per_gw_lrp: int = 1 19 | n_vms_per_project: int = 3 20 | 21 | 22 | class BaseOpenstack(ExtCmd): 23 | def __init__(self, config, cluster, global_cfg): 24 | super().__init__(config, cluster) 25 | test_config = config.get("base_openstack") 26 | self.config = BaseOpenstackConfig(**test_config) 27 | 28 | def run(self, clouds: List[OpenStackCloud], global_cfg): 29 | # create ovn topology 30 | with Context(clouds, "base_openstack_bringup", len(clouds)) as ctx: 31 | for i in ctx: 32 | ovn = clouds[i] 33 | worker_count = len(ovn.worker_nodes) 34 | for i in range(worker_count): 35 | worker_node: ChassisNode = ovn.worker_nodes[i] 36 | log.info( 37 | f"Provisioning {worker_node.__class__.__name__} " 38 | f"({i + 1}/{worker_count})" 39 | ) 40 | worker_node.provision(ovn) 41 | 42 | with Context(clouds, "base_openstack_provision", len(clouds)) as ctx: 43 | for i in ctx: 44 | ovn = clouds[i] 45 | ext_net = ExternalNetworkSpec( 46 | neutron_net=ovn.new_external_network(), 47 | num_gw_nodes=self.config.n_chassis_per_gw_lrp, 48 | ) 49 | 50 | for _ in range(self.config.n_projects): 51 | _ = ovn.new_project(ext_net=ext_net) 52 | 53 | for project in ovn.projects: 54 | for index in range(self.config.n_vms_per_project): 55 | ovn.add_vm_to_project( 56 | project, f"{project.uuid[:6]}-{index}" 57 | ) 58 | 59 | with Context(clouds, "base_openstack", len(clouds)) as ctx: 60 | for i in ctx: 61 | ovn = clouds[i] 62 | for project in ovn.projects: 63 | ovn.mesh_ping_ports(project.vm_ports) 64 | -------------------------------------------------------------------------------- /utils/latency.py: -------------------------------------------------------------------------------- 1 | from datetime import datetime 2 | import numpy as np 3 | import sys 4 | 5 | if len(sys.argv) < 3: 6 | print(f'Usage {sys.argv[0]} ' f'ovn-binding.log ovn-installed.log') 7 | 8 | with open(sys.argv[2], 'r') as installed_file: 9 | ovn_installed = installed_file.read().strip().splitlines() 10 | 11 | with open(sys.argv[1], 'r') as binding_file: 12 | ovn_binding = binding_file.read().strip().splitlines() 13 | 14 | binding_times = dict() 15 | for record in ovn_binding: 16 | date, time, port = record.split(' ') 17 | 18 | date = datetime.strptime(f'{date} {time}', '%Y-%m-%d %H:%M:%S,%f') 19 | binding_times[port] = date 20 | 21 | latency_per_port = dict() 22 | for record in ovn_installed: 23 | date, time, port = record.split(' ') 24 | 25 | if port in latency_per_port: 26 | # ovn-installed more than once, ignoring. 27 | continue 28 | 29 | binding_time = binding_times.pop(port) 30 | 31 | date = datetime.strptime(f'{date} {time}', '%Y-%m-%d %H:%M:%S.%fZ') 32 | latency_per_port[port] = date - binding_time 33 | 34 | failures = len(binding_times) 35 | latencies = [] 36 | 37 | for latency in latency_per_port.values(): 38 | ms = int(latency.total_seconds() * 1000) 39 | latencies.append(ms) 40 | 41 | print( 42 | ''' 43 | Latency between logical port binding, i.e. creation of the corresponding 44 | OVS port on the worker node, and ovn-controller setting up ovn-installed 45 | flag for that port according to test-log and ovn-controller logs. 46 | 47 | Note: 48 | This data can not be directly interpreted as OVN end-to-end latency, 49 | because port bindings are always happening after logical ports were already 50 | created in Northbound database. And for bulk creations, bindings performed 51 | also in bulk after all creations are done. 52 | 53 | Look for 'Not installed' below to find for which ports ovn-installed was 54 | never set (at least, there is no evidence in logs). 55 | ''' 56 | ) 57 | 58 | print('min :', min(latencies, default=0), 'ms') 59 | print('max :', max(latencies, default=0), 'ms') 60 | print('avg :', sum(latencies) // len(latencies), 'ms') 61 | print('95% :', int(np.percentile(latencies, 95)), 'ms') 62 | print('total :', len(latency_per_port)) 63 | print('failures:', failures) 64 | print() 65 | 66 | for port, latency in latency_per_port.items(): 67 | if port in binding_times: 68 | print(f'{port:<10}: Not installed') 69 | continue 70 | ms = int(latency.total_seconds() * 1000) 71 | print(f'{port:<10}: {ms}') 72 | -------------------------------------------------------------------------------- /ovn-tester/ovn_ext_cmd.py: -------------------------------------------------------------------------------- 1 | from collections import defaultdict 2 | from fnmatch import fnmatch 3 | from io import StringIO 4 | from itertools import chain 5 | from typing import Dict, List 6 | 7 | 8 | class ExtCmdUnit: 9 | def __init__(self, conf: Dict, clusters: List): 10 | self.iteration = conf.get('iteration') 11 | self.cmd = conf.get('cmd') 12 | self.test = conf.get('test') 13 | self.pid_name = conf.get('pid_name') 14 | self.background_opt = conf.get('background_opt') 15 | self.pid_opt = conf.get('pid_opt', '') 16 | 17 | node = conf.get('node') 18 | 19 | central_nodes = [c.central_nodes for c in clusters] 20 | worker_nodes = [c.worker_nodes for c in clusters] 21 | self.nodes = [ 22 | n 23 | for n in list(chain.from_iterable(worker_nodes)) 24 | if fnmatch(n.container, node) 25 | ] 26 | self.nodes.extend( 27 | [ 28 | n 29 | for n in list(chain.from_iterable(central_nodes)) 30 | if fnmatch(n.container, node) 31 | ] 32 | ) 33 | 34 | def is_valid(self): 35 | return ( 36 | self.iteration is not None 37 | and self.cmd 38 | and self.test 39 | and self.nodes 40 | ) 41 | 42 | def exec(self): 43 | return [self._node_exec(node) for node in self.nodes] 44 | 45 | def _node_exec(self, node): 46 | cmd = self.cmd 47 | 48 | if self.pid_name: 49 | stdout = StringIO() 50 | node.run(f'pidof -s {self.pid_name}', stdout=stdout) 51 | cmd += f' {self.pid_opt} {stdout.getvalue().strip()}' 52 | 53 | if self.background_opt: 54 | cmd += ' >/dev/null 2>&1 &' 55 | 56 | stdout = StringIO() 57 | node.run(cmd, stdout=stdout) 58 | return stdout.getvalue().strip() 59 | 60 | 61 | class ExtCmd: 62 | def __init__(self, config, clusters): 63 | self.cmd_map = defaultdict(list) 64 | for ext_cmd in config.get('ext_cmd', list()): 65 | cmd_unit = ExtCmdUnit(ext_cmd, clusters) 66 | if cmd_unit.is_valid(): 67 | self.cmd_map[(cmd_unit.iteration, cmd_unit.test)].append( 68 | cmd_unit 69 | ) 70 | 71 | def exec_cmd(self, iteration, test): 72 | ext_cmds = self.cmd_map.get((iteration, test)) 73 | if not ext_cmds: 74 | return 75 | 76 | return {ext_cmd: ext_cmd.exec() for ext_cmd in ext_cmds} 77 | -------------------------------------------------------------------------------- /utils/mine-db-poll-intervals.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | function mine_data_for_poll_intervals { 4 | res=$(echo "$1" \ 5 | | sed '/\(writes\|involuntary\)/s/$/\n\n/' \ 6 | | grep -A 2 -B 1 'Unreasonably' \ 7 | | grep -v '\--' | cut -c 45- | paste -d " " - - - - \ 8 | | sed 's/.*took \([0-9]*\)ms .*long \([0-9]*\)ms .* \([0-9]*\)ms system.* \([0-9]*\) writes.*/\2 \3 \4 compaction \1/' \ 9 | | sed 's/.*long \([0-9]*\)ms .* \([0-9]*\)ms system.*/\1 \2/' \ 10 | | sort -h | grep -v compaction | cut -f 1 -d ' ' \ 11 | | datamash count 1 min 1 max 1 median 1 mean 1 perc 1 \ 12 | -t ' ' --format="%10.1f" ) 13 | if [ -z "$res" ]; then 14 | printf "%10.1f\n" 0 15 | else 16 | echo "$res" 17 | fi 18 | } 19 | 20 | function mine_data_for_compaction { 21 | res=$(echo "$1" \ 22 | | grep 'compaction took' | cut -c 43- \ 23 | | sed 's/.*Database compaction took \(.*\)ms/\1/' | sort -h \ 24 | | datamash count 1 min 1 max 1 median 1 mean 1 perc 1 \ 25 | -t ' ' --format="%10.1f" ) 26 | if [ -z "$res" ]; then 27 | printf "%10.1f\n" 0 28 | else 29 | echo "$res" 30 | fi 31 | } 32 | 33 | n_files=$# 34 | echo 35 | echo "Unreasonably long poll intervals that didn't involve database compaction:" 36 | echo 37 | echo "Note:" 38 | echo "It's not possible to exclude compactions under 1 second long, so these," 39 | echo "if any, are accounted in below statistics. Mistakes are also possible." 40 | echo 41 | echo "---------------------------------------------------------------------" 42 | echo " Count Min Max Median Mean 95 percentile" 43 | echo "---------------------------------------------------------------------" 44 | 45 | for file in "$@"; do 46 | mine_data_for_poll_intervals "$(cat $file)" 47 | done 48 | 49 | if test ${n_files} -gt 1; then 50 | echo "---------------------------------------------------------------------" 51 | mine_data_for_poll_intervals "$(cat $@)" 52 | fi 53 | echo 54 | 55 | echo "Database compaction:" 56 | echo 57 | echo "Note: Compactions under 1 second long are not counted." 58 | echo 59 | echo "---------------------------------------------------------------------" 60 | echo " Count Min Max Median Mean 95 percentile" 61 | echo "---------------------------------------------------------------------" 62 | 63 | for file in "$@"; do 64 | mine_data_for_compaction "$(cat $file)" 65 | done 66 | 67 | if test ${n_files} -gt 1; then 68 | echo "---------------------------------------------------------------------" 69 | mine_data_for_compaction "$(cat $@)" 70 | fi 71 | echo 72 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/process-monitor.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import json 5 | import os 6 | import psutil 7 | import time 8 | 9 | from typing import Dict 10 | 11 | process_names = ['ovn-', 'ovs-', 'ovsdb-', 'etcd'] 12 | 13 | 14 | def monitor(suffix: str, out_file: str, exit_file: str) -> None: 15 | data: Dict = {} 16 | while True: 17 | try: 18 | if os.path.exists(exit_file): 19 | raise KeyboardInterrupt 20 | 21 | processes = set() 22 | for p in psutil.process_iter(): 23 | if any(name in p.name() for name in process_names): 24 | processes.add(p) 25 | elif p.name() != 'monitor' and any( 26 | name in part 27 | for part in p.cmdline() 28 | for name in process_names 29 | ): 30 | processes.add(p) 31 | 32 | if len(processes) == 0: 33 | time.sleep(0.5) 34 | continue 35 | 36 | tme = time.time() 37 | for p in processes: 38 | try: 39 | name = p.name() 40 | for arg in p.cmdline(): 41 | if arg.endswith('.pid') or arg.endswith('.py'): 42 | name = arg.split('/')[-1].split('.')[0] 43 | break 44 | 45 | name = name + "|" + suffix + "|" + str(p.pid) 46 | 47 | # cpu_percent(seconds) call will block 48 | # for the amount of seconds specified. 49 | cpu = p.cpu_percent(0.5) 50 | mem = p.memory_info().rss 51 | except psutil.NoSuchProcess: 52 | # Process went away. Skipping. 53 | continue 54 | 55 | if not data.get(tme): 56 | data[tme] = {} 57 | 58 | data[tme][name] = {'cpu': cpu, 'rss': mem} 59 | 60 | except KeyboardInterrupt: 61 | with open(out_file, "w") as f: 62 | json.dump(data, f, indent=4, sort_keys=True) 63 | break 64 | 65 | except Exception: 66 | # Ignoring all unexpected exceptions to avoid loosing data. 67 | continue 68 | 69 | 70 | if __name__ == '__main__': 71 | parser = argparse.ArgumentParser(description='OVS/OVN process monitor') 72 | parser.add_argument( 73 | '-s', '--suffix', help='Process name suffix to add', default='' 74 | ) 75 | parser.add_argument( 76 | '-o', '--output', help='Output file name', default='process-stats.json' 77 | ) 78 | parser.add_argument( 79 | '-x', 80 | '--exit-file', 81 | help='File that signals to exit', 82 | default='process-monitor.exit', 83 | ) 84 | 85 | args = parser.parse_args() 86 | monitor(args.suffix, args.output, args.exit_file) 87 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/deploy-minimal.yml: -------------------------------------------------------------------------------- 1 | - name: Prepare the setup 2 | hosts: all 3 | environment: 4 | RUNC_CMD: podman 5 | tasks: 6 | - name: Start openvswitch 7 | ansible.builtin.systemd: 8 | name: openvswitch 9 | state: started 10 | when: ansible_os_family == "RedHat" 11 | 12 | - name: Start openvswitch 13 | ansible.builtin.systemd: 14 | name: openvswitch-switch 15 | state: started 16 | when: ansible_os_family == "Debian" 17 | 18 | - name: Delete old container volumes 19 | shell: | 20 | podman volume prune -f 21 | 22 | - name: Stop tester container if running 23 | when: ovn_tester is defined 24 | ignore_errors: yes 25 | shell: | 26 | podman rm -f --volumes ovn-tester 27 | 28 | - name: Delete old containers if any 29 | shell: | 30 | set -e 31 | cd {{ ovn_fake_multinode_target_path }}/ovn-fake-multinode 32 | export CHASSIS_PREFIX={{ node_name }} 33 | export CHASSIS_COUNT=100 34 | ./ovn_cluster.sh stop 35 | podman system prune -f 36 | 37 | - name: Ensure number of inotify instances for containers 38 | shell: | 39 | sysctl -w fs.inotify.max_user_instances=2048 40 | 41 | - name: Ensure the number of allowed SSH sessions 42 | shell: | 43 | sed -i '/^MaxSessions /d' /etc/ssh/sshd_config 44 | echo "MaxSessions 1024" >> /etc/ssh/sshd_config 45 | 46 | - name: Reload SSH service 47 | ansible.builtin.systemd: 48 | name: sshd 49 | state: reloaded 50 | when: ansible_os_family == "RedHat" 51 | 52 | - name: Reload SSH service 53 | ansible.builtin.systemd: 54 | name: ssh 55 | state: reloaded 56 | when: ansible_os_family == "Debian" 57 | 58 | - name: Ensure the neighbor cache size on central nodes 59 | when: ovn_central is defined 60 | shell: | 61 | sysctl -w net.ipv4.neigh.default.gc_thresh1=8192 62 | sysctl -w net.ipv4.neigh.default.gc_thresh2=32768 63 | sysctl -w net.ipv4.neigh.default.gc_thresh3=65536 64 | 65 | - name: Create ovs bridges for ovn fake multinode 66 | shell: | 67 | set -e 68 | ovs-vsctl --if-exists del-br br-ovn -- --if-exists del-br br-ovn-ext 69 | ovs-vsctl add-br br-ovn -- add-br br-ovn-ext 70 | 71 | - name: Add nics to br-ovn if configured 72 | when: internal_iface is defined 73 | shell: | 74 | ovs-vsctl add-port br-ovn {{ internal_iface }} 75 | 76 | - name: Add nics to br-ovn-ext if configured 77 | when: external_iface is defined 78 | shell: | 79 | ovs-vsctl add-port br-ovn-ext {{ external_iface }} 80 | 81 | - name: Deploy stats collection script 82 | copy: 83 | src: "{{ rundir }}/log-collector.sh" 84 | dest: /tmp/log-collector.sh 85 | mode: '0744' 86 | 87 | - name: Deploy perf script 88 | copy: 89 | src: "{{ rundir }}/perf.sh" 90 | dest: /tmp/perf.sh 91 | mode: '0744' 92 | 93 | - name: Deploy process monitor script 94 | copy: 95 | src: "{{ rundir }}/process-monitor.py" 96 | dest: /tmp/process-monitor.py 97 | mode: '0744' 98 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/generate-hosts.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/python3 2 | 3 | from __future__ import print_function 4 | 5 | import helpers 6 | import yaml 7 | import sys 8 | from pathlib import Path 9 | from typing import Dict 10 | 11 | 12 | def usage(name): 13 | print( 14 | f""" 15 | {name} DEPLOYMENT ovn-fake-multinode-target github-repo branch 16 | where DEPLOYMENT is the YAML file defining the deployment. 17 | """, 18 | file=sys.stderr, 19 | ) 20 | 21 | 22 | def generate_node_string(host: str, **kwargs) -> None: 23 | args = ' '.join(f"{key}={value}" for key, value in kwargs.items()) 24 | print(f"{host} {args}") 25 | 26 | 27 | def generate_node(config: Dict, internal_iface: str, **kwargs) -> None: 28 | host: str = config['name'] 29 | internal_iface = config.get('internal-iface', internal_iface) 30 | generate_node_string( 31 | host, 32 | internal_iface=internal_iface, 33 | **kwargs, 34 | ) 35 | 36 | 37 | def generate_tester(config: Dict, internal_iface: str) -> None: 38 | ssh_key = config["ssh_key"] 39 | ssh_key = Path(ssh_key).resolve() 40 | generate_node( 41 | config, 42 | internal_iface, 43 | ovn_tester="true", 44 | ssh_key=str(ssh_key), 45 | ) 46 | 47 | 48 | def generate_nodes(nodes_config: Dict, internal_iface: str, **kwargs): 49 | for node_config in nodes_config: 50 | host, node_config = helpers.get_node_config(node_config) 51 | iface = node_config.get('internal-iface', internal_iface) 52 | generate_node_string( 53 | host, 54 | internal_iface=iface, 55 | **kwargs, 56 | ) 57 | 58 | 59 | def generate(input_file: str, target: str, repo: str, branch: str) -> None: 60 | with open(input_file, 'r') as yaml_file: 61 | config = yaml.safe_load(yaml_file) 62 | user = config.get('user', 'root') 63 | prefix = config.get('prefix', 'ovn-scale') 64 | tester_config = config['tester-node'] 65 | internal_iface = config['internal-iface'] 66 | 67 | print('[tester_hosts]') 68 | generate_tester(tester_config, internal_iface) 69 | print('\n[central_hosts]') 70 | generate_nodes( 71 | config['central-nodes'], internal_iface, ovn_central="true" 72 | ) 73 | print('\n[worker_hosts]') 74 | generate_nodes(config['worker-nodes'], internal_iface) 75 | print() 76 | 77 | print('[all:vars]') 78 | print('ansible_user=' + user) 79 | print('become=true') 80 | print('node_name=' + prefix) 81 | print('ovn_fake_multinode_target_path=' + target) 82 | print('ovn_fake_multinode_path=' + target + '/ovn-fake-multinode') 83 | print('ovn_fake_multinode_repo=' + repo) 84 | print('ovn_fake_multinode_branch=' + branch) 85 | print('rundir=' + target) 86 | 87 | 88 | def main(): 89 | if len(sys.argv) != 5: 90 | usage(sys.argv[0]) 91 | sys.exit(1) 92 | 93 | generate(sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]) 94 | 95 | 96 | if __name__ == "__main__": 97 | main() 98 | -------------------------------------------------------------------------------- /.cirrus.yml: -------------------------------------------------------------------------------- 1 | low_scale_task: 2 | 3 | compute_engine_instance: 4 | matrix: 5 | - image_project: fedora-cloud 6 | image: family/fedora-cloud-38 7 | - image_project: ubuntu-os-cloud 8 | image: family/ubuntu-2404-lts-amd64 9 | platform: linux 10 | memory: 8G 11 | disk: 40 12 | 13 | env: 14 | DEPENDENCIES: git ansible podman 15 | FEDORA_DEP: ansible-collection-ansible-posix 16 | ansible-collection-ansible-utils 17 | CIRRUS_WORKING_DIR: /root/ovn-heater 18 | PHYS_DEPLOYMENT: ${CIRRUS_WORKING_DIR}/physical-deployments/ci.yml 19 | 20 | runtime_cache: 21 | folder: runtime-cache 22 | 23 | # Make sure we use all the disk available to us. Similar to the steps 24 | # described at: 25 | # https://cloud.google.com/compute/docs/disks/resize-persistent-disk 26 | # 27 | # Use "findmnt -n -o SOURCE /" and parse outputs of the form: 28 | # /dev/ 29 | # OR 30 | # /dev/[text] 31 | resize_disk_script: 32 | - d=$(findmnt -n -o SOURCE / | cut -f 1 -d '[' | cut -f 3 -d '/' | grep -oE '[a-zA-Z]+') 33 | - p=$(findmnt -n -o SOURCE / | cut -f 1 -d '[' | cut -f 3 -d '/' | grep -oE '[0-9]+') 34 | - t=$(df --output=fstype /root | grep -v Type) 35 | - growpart /dev/$d $p || true 36 | - '[ "$t" = "ext4" ] && resize2fs /dev/$d$p || true' 37 | - '[ "$t" = "xfs" ] && xfs_growfs -d /root || true' 38 | - '[ "$t" = "btrfs" ] && btrfs filesystem resize max /root || true' 39 | 40 | configure_ssh_script: 41 | - mkdir -p /root/.ssh/ 42 | - ssh-keygen -t rsa -N '' -q -f /root/.ssh/id_rsa 43 | - ssh-keyscan $(hostname) >> /root/.ssh/known_hosts 44 | - cat /root/.ssh/id_rsa.pub >> /root/.ssh/authorized_keys 45 | - chmod og-wx /root/.ssh/authorized_keys 46 | - ssh root@$(hostname) -v echo Hello 47 | 48 | install_dependencies_script: 49 | - 'if [ $(which dnf) ]; then 50 | dnf install -y ${DEPENDENCIES} ${FEDORA_DEP}; 51 | fi' 52 | - 'if [ $(which apt) ]; then 53 | apt update && apt install -y ${DEPENDENCIES}; 54 | fi' 55 | 56 | unpack_caches_script: 57 | - mkdir -p runtime runtime-cache 58 | - tar -xzf runtime-cache/runtime.tar.gz || true 59 | - podman load -i runtime/ovn-fake-multinode/ovn-multi-node-image.tar || true 60 | 61 | install_script: 62 | - 'sed -i "s//$(hostname)/g" ${PHYS_DEPLOYMENT}' 63 | # test with ubuntu-based ovn-fake-multinode if test runs on Ubuntu image 64 | - 'if [ $(which apt) ]; then 65 | export OS_BASE=ubuntu; 66 | export OS_IMAGE_OVERRIDE=ubuntu:rolling; 67 | fi; 68 | ./do.sh install' 69 | 70 | pack_caches_script: 71 | - rm -rf runtime-cache/* 72 | - tar -czf runtime-cache/runtime.tar.gz runtime 73 | 74 | upload_caches: 75 | - runtime 76 | 77 | test_ovn_kubernetes_script: 78 | - 'sed -i "s/^ log_cmds\: False/ log_cmds\: True/" 79 | test-scenarios/ovn-low-scale*.yml' 80 | - ./do.sh run test-scenarios/ovn-low-scale.yml low-scale 81 | - ./do.sh run test-scenarios/ovn-low-scale-ic.yml low-scale-ic 82 | 83 | test_openstack_script: 84 | - 'sed -i "s/^ log_cmds\: false/ log_cmds\: true/" 85 | test-scenarios/openstack-low-scale.yml' 86 | - ./do.sh run test-scenarios/openstack-low-scale.yml openstack-low-scale 87 | 88 | check_logs_script: 89 | - ./utils/logs-checker.sh 90 | 91 | always: 92 | test_logs_artifacts: 93 | path: test_results/** 94 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/scripts/log-collector.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | host=$1 4 | node_name=$2 5 | limit=2 6 | 7 | mkdir /tmp/${host} 8 | pushd /tmp 9 | for c in $(podman ps --format "{{.Names}}" --filter "name=${node_name}"); do 10 | mkdir ${host}/$c 11 | podman exec $c ps -aux > ${host}/$c/ps 12 | podman exec $c bash -c 'touch /tmp/process-monitor.exit && sleep 5' 13 | podman cp $c:/var/log/ovn/ovn-controller.log ${host}/$c/ 14 | podman cp $c:/var/log/openvswitch/ovs-vswitchd.log ${host}/$c/ 15 | podman cp $c:/var/log/openvswitch/ovsdb-server.log ${host}/$c/ 16 | podman cp $c:/etc/openvswitch/conf.db ${host}/$c/ 17 | podman cp $c:/var/log/process-stats.json ${host}/$c/ 18 | done 19 | 20 | # Dump ovs groups just for latest ${limit} nodes 21 | for c in $(podman ps --format "{{.Names}}" --filter "name=${node_name}" --last "${limit}"); do 22 | podman exec $c bash -c 'ovs-ofctl dump-flows br-int > /tmp/open-flows.log' 23 | podman exec $c bash -c 'ovs-ofctl dump-groups br-int > /tmp/groups.log' 24 | podman cp $c:/tmp/open-flows.log ${host}/$c/ 25 | podman cp $c:/tmp/groups.log ${host}/$c/ 26 | 27 | # OF rules/group dumps can be very large text files. 28 | # Pre-archive them immediately. 29 | pushd ${host}/$c 30 | tar cvfz OF.tgz open-flows.log groups.log 31 | rm -f open-flows.log groups.log 32 | popd 33 | done 34 | 35 | for c in $(podman ps --format "{{.Names}}" --filter "name=ovn-central"); do 36 | mkdir ${host}/$c 37 | podman exec $c ps -aux > ${host}/$c/ps-before-compaction 38 | podman exec $c ovs-appctl --timeout=30 -t /var/run/ovn/ovnsb_db.ctl ovsdb-server/compact 39 | podman exec $c ovs-appctl --timeout=30 -t /var/run/ovn/ovnnb_db.ctl ovsdb-server/compact 40 | podman exec $c ps -aux > ${host}/$c/ps-after-compaction 41 | podman exec $c bash -c 'touch /tmp/process-monitor.exit && sleep 5' 42 | podman cp $c:/var/log/ovn/ovn-controller.log ${host}/$c/ 43 | podman cp $c:/var/log/ovn/ovn-northd.log ${host}/$c/ 44 | podman cp $c:/var/log/ovn/ovsdb-server-nb.log ${host}/$c/ 45 | podman cp $c:/var/log/ovn/ovsdb-server-sb.log ${host}/$c/ 46 | podman cp $c:/etc/ovn/ovnnb_db.db ${host}/$c/ 47 | podman cp $c:/etc/ovn/ovnsb_db.db ${host}/$c/ 48 | podman cp $c:/var/log/openvswitch/ovs-vswitchd.log ${host}/$c/ 49 | podman cp $c:/var/log/openvswitch/ovsdb-server.log ${host}/$c/ 50 | podman cp $c:/var/log/openvswitch/ovn-nbctl.log ${host}/$c/ 51 | podman cp $c:/var/log/process-stats.json ${host}/$c/ 52 | done 53 | 54 | for c in $(podman ps --format "{{.Names}}" --filter "name=ovn-relay"); do 55 | mkdir ${host}/$c 56 | podman exec $c ps -aux > ${host}/$c/ps-before-compaction 57 | podman exec $c ovs-appctl --timeout=30 -t /var/run/ovn/ovnsb_db.ctl ovsdb-server/compact 58 | podman exec $c ps -aux > ${host}/$c/ps-after-compaction 59 | podman exec $c bash -c 'touch /tmp/process-monitor.exit && sleep 5' 60 | podman cp $c:/var/log/ovn/ovsdb-server-sb.log ${host}/$c/ 61 | podman cp $c:/var/log/process-stats.json ${host}/$c/ 62 | done 63 | 64 | for c in $(podman ps --format "{{.Names}}" --filter "name=ovn-tester"); do 65 | mkdir ${host}/$c 66 | podman exec $c bash -c 'touch /tmp/process-monitor.exit && sleep 5' 67 | podman exec $c bash -c "mkdir -p /htmls; cp -f /*.html /htmls" 68 | podman cp $c:/htmls/. ${host}/$c/ 69 | podman cp $c:/var/log/process-stats.json ${host}/$c/ 70 | done 71 | 72 | journalctl --since "8 hours ago" -a > ${host}/messages 73 | 74 | tar cvfz ${host}.tgz ${host} 75 | rm -rf ${host} 76 | popd 77 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/netpol.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from ovn_context import Context 3 | from cms.ovn_kubernetes import Namespace 4 | from ovn_ext_cmd import ExtCmd 5 | from itertools import chain 6 | import ovn_exceptions 7 | 8 | NpCfg = namedtuple('NpCfg', ['n_ns', 'n_labels', 'pods_ns_ratio']) 9 | 10 | 11 | class NetPol(ExtCmd): 12 | def __init__(self, name, config, clusters): 13 | super().__init__(config, clusters) 14 | test_config = config.get(name, dict()) 15 | self.config = NpCfg( 16 | n_ns=test_config.get('n_ns', 0), 17 | n_labels=test_config.get('n_labels', 0), 18 | pods_ns_ratio=test_config.get('pods_ns_ratio', 0), 19 | ) 20 | n_ports = self.config.pods_ns_ratio * self.config.n_ns 21 | if self.config.n_labels >= n_ports or self.config.n_labels <= 2: 22 | raise ovn_exceptions.OvnInvalidConfigException() 23 | 24 | self.name = name 25 | self.all_labels = dict() 26 | self.all_ns = [] 27 | self.ports = [[] for _ in range(self.config.n_ns)] 28 | 29 | def init(self, clusters, global_cfg): 30 | with Context(clusters, f'{self.name}_startup', brief_report=True) as _: 31 | for i in range(self.config.n_ns): 32 | az_index = i % len(clusters) 33 | ovn = clusters[az_index] 34 | self.ports[i] = ovn.provision_ports(self.config.pods_ns_ratio) 35 | ns = Namespace(clusters, f'NS_{self.name}_{i}', global_cfg) 36 | ns.add_ports(self.ports[i], az_index) 37 | ns.default_deny(4, az_index) 38 | self.all_ns.append(ns) 39 | for i, port in enumerate( 40 | list(chain.from_iterable(self.ports)) 41 | ): 42 | self.all_labels.setdefault( 43 | i % self.config.n_labels, [] 44 | ).append(port) 45 | 46 | def run(self, clusters, global_cfg, exclude=False): 47 | with Context(clusters, self.name, self.config.n_ns, test=self) as ctx: 48 | for i in ctx: 49 | ns = self.all_ns[i] 50 | for lbl in range(self.config.n_labels): 51 | label = self.all_labels[lbl] 52 | sub_ns_src = ns.create_sub_ns(label, global_cfg) 53 | 54 | n = (lbl + 1) % self.config.n_labels 55 | if exclude: 56 | ex_label = label + self.all_labels[n] 57 | nlabel = [ 58 | p for p in self.ports[i] if p not in ex_label 59 | ] 60 | else: 61 | nlabel = self.all_labels[n] 62 | sub_ns_dst = ns.create_sub_ns(nlabel, global_cfg) 63 | 64 | if global_cfg.run_ipv4: 65 | ns.allow_sub_namespace(sub_ns_src, sub_ns_dst, 4) 66 | if global_cfg.run_ipv6: 67 | ns.allow_sub_namespace(sub_ns_src, sub_ns_dst, 6) 68 | worker = label[0].metadata 69 | if label[0].ip and nlabel[0].ip: 70 | worker.ping_port(clusters[0], label[0], nlabel[0].ip) 71 | if label[0].ip6 and nlabel[0].ip6: 72 | worker.ping_port(clusters[0], label[0], nlabel[0].ip6) 73 | 74 | if not global_cfg.cleanup: 75 | return 76 | with Context( 77 | clusters, f'{self.name}_cleanup', brief_report=True 78 | ) as ctx: 79 | for ns in self.all_ns: 80 | ns.unprovision() 81 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/service_route.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from ovn_context import Context 3 | from cms.ovn_kubernetes import Namespace 4 | from ovn_ext_cmd import ExtCmd 5 | 6 | import netaddr 7 | import ovn_load_balancer as lb 8 | 9 | ServiceRouteCfg = namedtuple('ServiceRouteCfg', ['n_lb', 'n_backends']) 10 | 11 | 12 | DEFAULT_VIP_SUBNET = netaddr.IPNetwork('90.0.0.0/8') 13 | DEFAULT_VIP_SUBNET6 = netaddr.IPNetwork('9::/32') 14 | DEFAULT_VIP_PORT = 80 15 | DEFAULT_BACKEND_PORT = 8080 16 | 17 | 18 | class ServiceRoute(ExtCmd): 19 | def __init__(self, config, clusters, global_cfg): 20 | super().__init__(config, clusters) 21 | test_config = config.get('service_route', dict()) 22 | self.config = ServiceRouteCfg( 23 | n_lb=test_config.get('n_lb', 16), 24 | n_backends=test_config.get('n_backends', 4), 25 | ) 26 | self.vips = DEFAULT_VIP_SUBNET.iter_hosts() 27 | self.vips6 = DEFAULT_VIP_SUBNET6.iter_hosts() 28 | 29 | def provide_cluster_lb(self, name, cluster, vip, backends, version): 30 | load_balancer = lb.OvnLoadBalancer(name, cluster.nbctl) 31 | cluster.provision_lb(load_balancer) 32 | 33 | load_balancer.add_vip( 34 | vip, 35 | DEFAULT_VIP_PORT, 36 | backends, 37 | DEFAULT_BACKEND_PORT, 38 | version, 39 | ) 40 | 41 | def provide_node_load_balancer( 42 | self, name, cluster, node, backends, version 43 | ): 44 | load_balancer = lb.OvnLoadBalancer(name, cluster.nbctl) 45 | vip = node.ext_rp.ip.ip6 if version == 6 else node.ext_rp.ip.ip4 46 | load_balancer.add_vip( 47 | vip, DEFAULT_VIP_PORT, backends, DEFAULT_BACKEND_PORT, version 48 | ) 49 | load_balancer.add_to_routers([node.gw_router.name]) 50 | load_balancer.add_to_switches([node.switch.name]) 51 | 52 | def run(self, clusters, global_cfg): 53 | ns = Namespace(clusters, 'ns_service_route', global_cfg) 54 | with Context( 55 | clusters, 'service_route', self.config.n_lb, test=self 56 | ) as ctx: 57 | for i in ctx: 58 | ovn = clusters[i % len(clusters)] 59 | ports = ovn.provision_ports(self.config.n_backends + 1) 60 | ns.add_ports(ports, i % len(clusters)) 61 | 62 | if ports[1].ip: 63 | self.provide_cluster_lb( 64 | f'slb-cluster-{i}', 65 | ovn, 66 | next(self.vips), 67 | ports[1:], 68 | 4, 69 | ) 70 | if ports[1].ip6: 71 | self.provide_cluster_lb( 72 | f'slb6-cluster-{i}', 73 | ovn, 74 | next(self.vips6), 75 | ports[1:], 76 | 6, 77 | ) 78 | 79 | for w in ovn.worker_nodes: 80 | index = i * self.config.n_lb + w.id 81 | if ports[1].ip: 82 | self.provide_node_load_balancer( 83 | f'slb-{index}', ovn, w, ports[1:], 4 84 | ) 85 | if ports[1].ip6: 86 | self.provide_node_load_balancer( 87 | f'slb6-{index}', ovn, w, ports[1:], 6 88 | ) 89 | 90 | if not global_cfg.cleanup: 91 | return 92 | with Context( 93 | clusters, 'service_route_cleanup', brief_report=True 94 | ) as ctx: 95 | ns.unprovision() 96 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/cluster_density.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from ovn_context import Context 3 | from cms.ovn_kubernetes import Namespace 4 | from ovn_ext_cmd import ExtCmd 5 | import ovn_exceptions 6 | 7 | 8 | DENSITY_N_BUILD_PODS = 6 9 | DENSITY_N_PODS = 4 10 | DENSITY_N_TOT_PODS = DENSITY_N_BUILD_PODS + DENSITY_N_PODS 11 | 12 | # In ClusterDensity.run_iteration() we assume at least 4 different pods 13 | # can be used as backends. 14 | assert DENSITY_N_PODS >= 4 15 | 16 | ClusterDensityCfg = namedtuple('ClusterDensityCfg', ['n_runs', 'n_startup']) 17 | 18 | 19 | class ClusterDensity(ExtCmd): 20 | def __init__(self, config, clusters, global_cfg): 21 | super().__init__(config, clusters) 22 | test_config = config.get('cluster_density', dict()) 23 | self.config = ClusterDensityCfg( 24 | n_runs=test_config.get('n_runs', 0), 25 | n_startup=test_config.get('n_startup', 0), 26 | ) 27 | if self.config.n_startup > self.config.n_runs: 28 | raise ovn_exceptions.OvnInvalidConfigException() 29 | 30 | def run_iteration(self, clusters, index, global_cfg, passive): 31 | ns = Namespace(clusters, f'NS_density_{index}', global_cfg) 32 | az_index = index % len(clusters) 33 | ovn = clusters[az_index] 34 | # Create DENSITY_N_BUILD_PODS short lived "build" pods. 35 | if not passive: 36 | build_ports = ovn.provision_ports(DENSITY_N_BUILD_PODS, passive) 37 | ns.add_ports(build_ports, az_index) 38 | ovn.ping_ports(build_ports) 39 | 40 | # Add DENSITY_N_PODS test pods and provision them as backends 41 | # to the namespace load balancer. 42 | ports = ovn.provision_ports(DENSITY_N_PODS, passive) 43 | ns.add_ports(ports, az_index) 44 | ns.create_load_balancer(az_index) 45 | ovn.provision_lb(ns.load_balancer) 46 | if global_cfg.run_ipv4: 47 | ns.provision_vips_to_load_balancers( 48 | [ports[0:2], ports[2:3], ports[3:4]], 49 | 4, 50 | az_index, 51 | ) 52 | if global_cfg.run_ipv6: 53 | ns.provision_vips_to_load_balancers( 54 | [ports[0:2], ports[2:3], ports[3:4]], 55 | 6, 56 | az_index, 57 | ) 58 | 59 | # Ping the test pods and remove the short lived ones. 60 | if not passive: 61 | ovn.ping_ports(ports) 62 | ns.unprovision_ports(build_ports, az_index) 63 | return ns 64 | 65 | def run(self, clusters, global_cfg): 66 | all_ns = [] 67 | with Context( 68 | clusters, 'cluster_density_startup', brief_report=True 69 | ) as ctx: 70 | for index in range(self.config.n_startup): 71 | all_ns.append( 72 | self.run_iteration( 73 | clusters, index, global_cfg, passive=True 74 | ) 75 | ) 76 | 77 | with Context( 78 | clusters, 79 | 'cluster_density', 80 | self.config.n_runs - self.config.n_startup, 81 | test=self, 82 | ) as ctx: 83 | for i in ctx: 84 | index = self.config.n_startup + i 85 | all_ns.append( 86 | self.run_iteration( 87 | clusters, index, global_cfg, passive=False 88 | ) 89 | ) 90 | 91 | if not global_cfg.cleanup: 92 | return 93 | with Context( 94 | clusters, 'cluster_density_cleanup', brief_report=True 95 | ) as ctx: 96 | for ns in all_ns: 97 | ns.unprovision() 98 | -------------------------------------------------------------------------------- /ovn-tester/ovn_stats.py: -------------------------------------------------------------------------------- 1 | import collections 2 | import functools 3 | import numpy 4 | import ovn_context 5 | import ovn_exceptions 6 | import pandas as pd 7 | import plotly.express as px 8 | import time 9 | 10 | timed_functions = collections.defaultdict(list) 11 | 12 | 13 | def timeit(func): 14 | @functools.wraps(func) 15 | def _timeit(*args, **kwargs): 16 | start = time.perf_counter() 17 | failed = False 18 | value = None 19 | try: 20 | value = func(*args, **kwargs) 21 | except ovn_exceptions.OvnTestException: 22 | failed = True 23 | finally: 24 | duration = time.perf_counter() - start 25 | add(func.__qualname__, duration, failed) 26 | return value 27 | 28 | return _timeit 29 | 30 | 31 | def clear(): 32 | timed_functions.clear() 33 | 34 | 35 | def add(fname, duration, failed): 36 | if failed: 37 | ovn_context.active_context.fail() 38 | iteration = ovn_context.active_context.iteration 39 | elem = (duration, failed) 40 | timed_functions[(fname, iteration)].append(elem) 41 | 42 | 43 | def report(test_name, brief=False): 44 | all_stats = collections.defaultdict(list) 45 | fail_stats = collections.defaultdict(list) 46 | chart_stats = collections.defaultdict(list) 47 | headings = [ 48 | 'Min (s)', 49 | 'Median (s)', 50 | '90%ile (s)', 51 | '99%ile (s)', 52 | 'Max (s)', 53 | 'Mean (s)', 54 | 'Total (s)', 55 | 'Count', 56 | 'Failed', 57 | ] 58 | for (f, i), measurements in timed_functions.items(): 59 | for (d, r) in measurements: 60 | all_stats[f].append(d) 61 | chart_stats[f].append([f'{i}', f, d]) 62 | if r: 63 | fail_stats[f].append(i) 64 | 65 | if len(all_stats.items()) == 0: 66 | return 67 | 68 | all_avgs = [] 69 | all_f = [] 70 | for f, measurements in sorted(all_stats.items()): 71 | all_avgs.append( 72 | [ 73 | numpy.min(measurements), 74 | numpy.median(measurements), 75 | numpy.percentile(measurements, 90), 76 | numpy.percentile(measurements, 99), 77 | numpy.max(measurements), 78 | numpy.mean(measurements), 79 | numpy.sum(measurements), 80 | len(measurements), 81 | len(fail_stats[f]), 82 | ] 83 | ) 84 | all_f.append(f) 85 | 86 | df = pd.DataFrame(all_avgs, index=all_f, columns=headings) 87 | stats_html = df.to_html() 88 | 89 | with open(f'{test_name}-report.html', 'w') as report_file: 90 | report_file.write('') 91 | report_file.write(stats_html) 92 | 93 | if brief: 94 | report_file.write('') 95 | return 96 | 97 | for f, values in sorted(chart_stats.items()): 98 | df = pd.DataFrame( 99 | values, columns=['Iteration', 'Counter', 'Value (s)'] 100 | ) 101 | chart = px.bar( 102 | df, x='Iteration', y='Value (s)', color='Counter', title=f 103 | ) 104 | chart.update_traces( 105 | marker_color='#005cb8', 106 | opacity=1.0, 107 | marker_line_width=1.5, 108 | marker_line_color='#005cb8', 109 | ) 110 | report_file.write( 111 | chart.to_html( 112 | full_html=False, 113 | include_plotlyjs='cdn', 114 | default_width='90%', 115 | default_height='90%', 116 | ) 117 | ) 118 | 119 | report_file.write('') 120 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/base_cluster_bringup.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | 3 | from randmac import RandMac 4 | from ovn_utils import LSwitch, OvnIcNbctl 5 | from ovn_context import Context 6 | from ovn_ext_cmd import ExtCmd 7 | 8 | ClusterBringupCfg = namedtuple('ClusterBringupCfg', ['n_pods_per_node']) 9 | 10 | 11 | class BaseClusterBringup(ExtCmd): 12 | def __init__(self, config, clusters, global_cfg): 13 | super().__init__(config, clusters) 14 | test_config = config.get('base_cluster_bringup', dict()) 15 | self.config = ClusterBringupCfg( 16 | n_pods_per_node=test_config.get('n_pods_per_node', 0), 17 | ) 18 | self.ic_cluster = clusters[0] if len(clusters) > 1 else None 19 | 20 | def create_transit_switch(self): 21 | if self.ic_cluster is None: 22 | return 23 | 24 | inactivity_probe = ( 25 | self.ic_cluster.cluster_cfg.db_inactivity_probe // 1000 26 | ) 27 | ic_remote = f'tcp:{self.ic_cluster.cluster_cfg.node_net.ip + 2}:6645' 28 | OvnIcNbctl(None, ic_remote, inactivity_probe).ts_add() 29 | 30 | def connect_transit_switch(self, cluster): 31 | if self.ic_cluster is None: 32 | return 33 | uuid = cluster.nbctl.ls_get_uuid('ts', 10) 34 | cluster.ts_switch = LSwitch( 35 | name='ts', 36 | cidr=cluster.cluster_cfg.ts_net.n4, 37 | cidr6=cluster.cluster_cfg.ts_net.n6, 38 | uuid=uuid, 39 | ) 40 | rp = cluster.nbctl.lr_port_add( 41 | cluster.router, 42 | f'lr-cluster{cluster.az}-to-ts', 43 | RandMac(), 44 | cluster.cluster_cfg.ts_net.forward(cluster.az), 45 | ) 46 | cluster.nbctl.ls_port_add( 47 | cluster.ts_switch, f'ts-to-lr-cluster{cluster.az}', rp 48 | ) 49 | cluster.nbctl.lr_port_set_gw_chassis( 50 | rp, cluster.worker_nodes[0].container 51 | ) 52 | cluster.worker_nodes[0].vsctl.set_global_external_id( 53 | 'ovn-is-interconn', 'true' 54 | ) 55 | 56 | def check_ic_connectivity(self, clusters): 57 | if self.ic_cluster is None: 58 | return 59 | for cluster in clusters: 60 | if self.ic_cluster == cluster: 61 | continue 62 | for w in cluster.worker_nodes: 63 | port = w.lports[0] 64 | if port.ip: 65 | self.ic_cluster.worker_nodes[0].run_ping( 66 | self.ic_cluster, 67 | self.ic_cluster.worker_nodes[0].lports[0].name, 68 | port.ip, 69 | ) 70 | if port.ip6: 71 | self.ic_cluster.worker_nodes[0].run_ping( 72 | self.ic_cluster, 73 | self.ic_cluster.worker_nodes[0].lports[0].name, 74 | port.ip6, 75 | ) 76 | 77 | def run(self, clusters, global_cfg): 78 | self.create_transit_switch() 79 | 80 | for c, cluster in enumerate(clusters): 81 | # create ovn topology 82 | with Context( 83 | clusters, 'base_cluster_bringup', len(cluster.worker_nodes) 84 | ) as ctx: 85 | cluster.create_cluster_router(f'lr-cluster{c + 1}') 86 | cluster.create_cluster_join_switch(f'ls-join{c + 1}') 87 | cluster.create_cluster_load_balancer( 88 | f'lb-cluster{c + 1}', global_cfg 89 | ) 90 | self.connect_transit_switch(cluster) 91 | for i in ctx: 92 | worker = cluster.worker_nodes[i] 93 | worker.provision(cluster) 94 | ports = worker.provision_ports( 95 | cluster, self.config.n_pods_per_node 96 | ) 97 | worker.provision_load_balancers(cluster, ports, global_cfg) 98 | worker.ping_ports(cluster, ports) 99 | cluster.provision_lb_group(f'cluster-lb-group{c + 1}') 100 | 101 | # check ic connectivity 102 | self.check_ic_connectivity(clusters) 103 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/density_heavy.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | from ovn_context import Context 3 | from cms.ovn_kubernetes import Namespace 4 | from ovn_ext_cmd import ExtCmd 5 | from ovn_utils import distribute_n_tasks_per_clusters 6 | import ovn_load_balancer as lb 7 | import ovn_exceptions 8 | import netaddr 9 | 10 | 11 | # By default simulate a deployment with 2 pods, one of which is the 12 | # backend of a service. 13 | DENSITY_PODS_VIP_RATIO = 2 14 | 15 | 16 | DensityCfg = namedtuple( 17 | 'DensityCfg', ['n_pods', 'n_startup', 'pods_vip_ratio'] 18 | ) 19 | 20 | DEFAULT_VIP_SUBNET = netaddr.IPNetwork('100.0.0.0/8') 21 | DEFAULT_VIP_SUBNET6 = netaddr.IPNetwork('100::/32') 22 | DEFAULT_VIP_PORT = 80 23 | DEFAULT_BACKEND_PORT = 8080 24 | 25 | 26 | class DensityHeavy(ExtCmd): 27 | def __init__(self, config, clusters, global_cfg): 28 | super().__init__(config, clusters) 29 | test_config = config.get('density_heavy', dict()) 30 | pods_vip_ratio = test_config.get( 31 | 'pods_vip_ratio', DENSITY_PODS_VIP_RATIO 32 | ) 33 | self.config = DensityCfg( 34 | n_pods=test_config.get('n_pods', 0), 35 | n_startup=test_config.get('n_startup', 0), 36 | pods_vip_ratio=pods_vip_ratio, 37 | ) 38 | if self.config.n_startup > self.config.n_pods: 39 | raise ovn_exceptions.OvnInvalidConfigException() 40 | self.lb_list = [] 41 | self.vips = DEFAULT_VIP_SUBNET.iter_hosts() 42 | self.vips6 = DEFAULT_VIP_SUBNET6.iter_hosts() 43 | 44 | def create_lb(self, cluster, name, vip, backends, version): 45 | load_balancer = lb.OvnLoadBalancer(f'lb_{name}', cluster.nbctl) 46 | cluster.provision_lb(load_balancer) 47 | 48 | load_balancer.add_vip( 49 | vip, 50 | DEFAULT_VIP_PORT, 51 | backends, 52 | DEFAULT_BACKEND_PORT, 53 | version, 54 | ) 55 | self.lb_list.append(load_balancer) 56 | 57 | def run_iteration(self, clusters, ns, index, global_cfg, passive): 58 | ovn = clusters[index % len(clusters)] 59 | ports = ovn.provision_ports(self.config.pods_vip_ratio, passive) 60 | ns.add_ports(ports, index % len(clusters)) 61 | backends = ports[0:1] 62 | if global_cfg.run_ipv4: 63 | name = f'density_heavy_{index}' 64 | self.create_lb(ovn, name, next(self.vips), backends, 4) 65 | if global_cfg.run_ipv6: 66 | name = f'density_heavy6_{index}' 67 | self.create_lb(ovn, name, next(self.vips6), backends, 6) 68 | if not passive: 69 | ovn.ping_ports(ports) 70 | 71 | def run(self, clusters, global_cfg): 72 | if self.config.pods_vip_ratio == 0: 73 | return 74 | 75 | ns = Namespace(clusters, 'ns_density_heavy', global_cfg) 76 | n_startup_per_cluster = distribute_n_tasks_per_clusters( 77 | self.config.n_startup, len(clusters) 78 | ) 79 | 80 | with Context( 81 | clusters, 'density_heavy_startup', brief_report=True 82 | ) as ctx: 83 | for i in range(len(clusters)): 84 | for j in range( 85 | 0, n_startup_per_cluster[i], self.config.pods_vip_ratio 86 | ): 87 | self.run_iteration( 88 | clusters, ns, j, global_cfg, passive=True 89 | ) 90 | 91 | with Context( 92 | clusters, 93 | 'density_heavy', 94 | (self.config.n_pods - self.config.n_startup) 95 | / self.config.pods_vip_ratio, 96 | test=self, 97 | ) as ctx: 98 | for i in ctx: 99 | index = i * self.config.pods_vip_ratio + self.config.n_startup 100 | self.run_iteration( 101 | clusters, ns, index, global_cfg, passive=False 102 | ) 103 | 104 | if not global_cfg.cleanup: 105 | return 106 | with Context( 107 | clusters, 'density_heavy_cleanup', len(clusters), brief_report=True 108 | ) as ctx: 109 | for i in ctx: 110 | clusters[i].unprovision_vips() 111 | ns.unprovision() 112 | -------------------------------------------------------------------------------- /ovn-tester/ovn_load_balancer.py: -------------------------------------------------------------------------------- 1 | import itertools 2 | from typing import List, Dict, Optional 3 | 4 | VALID_PROTOCOLS = ['tcp', 'udp', 'sctp'] 5 | 6 | 7 | class InvalidProtocol(Exception): 8 | def __init__(self, invalid_protocols): 9 | self.args = invalid_protocols 10 | 11 | def __str__(self): 12 | return f"Invalid Protocol: {self.args}" 13 | 14 | 15 | class OvnLoadBalancer: 16 | def __init__( 17 | self, lb_name: str, nbctl, vips=None, protocols: List = VALID_PROTOCOLS 18 | ): 19 | ''' 20 | Create load balancers with optional vips. 21 | lb_name: String used as basis for load balancer name. 22 | nbctl: Connection used for ovn-nbctl commands 23 | vips: Optional dictionary mapping VIPs to a list of backend IPs. 24 | protocols: List of protocols to use when creating Load Balancers. 25 | ''' 26 | self.nbctl = nbctl 27 | self.protocols = [ 28 | prot for prot in protocols if prot in VALID_PROTOCOLS 29 | ] 30 | if len(self.protocols) == 0: 31 | raise InvalidProtocol(protocols) 32 | self.name = lb_name 33 | self.vips: Dict = {} 34 | self.lbs: List = [] 35 | for protocol in self.protocols: 36 | self.lbs.append(self.nbctl.create_lb(self.name, protocol)) 37 | if vips: 38 | self.add_vips(vips) 39 | 40 | def add_vip( 41 | self, vip: str, vport: str, backends, backend_port: str, version: int 42 | ) -> None: 43 | self.add_vips( 44 | OvnLoadBalancer.get_vip_map( 45 | vip, vport, backends, backend_port, version 46 | ) 47 | ) 48 | 49 | def add_vips(self, vips: Dict) -> None: 50 | ''' 51 | Add VIPs to a load balancer. 52 | vips: Dictionary with key being a VIP string, and value being a list of 53 | backend IP address strings. It's perfectly acceptable for the VIP to 54 | have no backends. 55 | ''' 56 | MAX_VIPS_IN_BATCH = 500 57 | for i in range(0, len(vips), MAX_VIPS_IN_BATCH): 58 | updated_vips = {} 59 | for vip, backends in itertools.islice( 60 | vips.items(), i, i + MAX_VIPS_IN_BATCH 61 | ): 62 | cur_backends = self.vips.setdefault(vip, []) 63 | if backends: 64 | cur_backends.extend(backends) 65 | updated_vips[vip] = cur_backends 66 | 67 | for lb in self.lbs: 68 | self.nbctl.lb_set_vips(lb, updated_vips) 69 | 70 | def clear_vips(self) -> None: 71 | ''' 72 | Clear all VIPs from the load balancer. 73 | ''' 74 | self.vips.clear() 75 | for lb in self.lbs: 76 | self.nbctl.lb_clear_vips(lb) 77 | 78 | def add_backends_to_vip( 79 | self, backends, vips: Optional[Dict] = None 80 | ) -> None: 81 | ''' 82 | Add backends to existing load balancer VIPs. 83 | backends: A list of IP addresses to add as backends to VIPs. 84 | vips: An iterable of VIPs to which backends should be added. If this is 85 | 'None' then the backends are added to all VIPs. 86 | ''' 87 | for cur_vip, cur_backends in self.vips.items(): 88 | if not vips or cur_vip in vips: 89 | cur_backends.extend(backends) 90 | 91 | for lb in self.lbs: 92 | self.nbctl.lb_set_vips(lb, self.vips) 93 | 94 | def add_to_routers(self, routers: List) -> None: 95 | for lb in self.lbs: 96 | self.nbctl.lb_add_to_routers(lb, routers) 97 | 98 | def add_to_switches(self, switches: List) -> None: 99 | for lb in self.lbs: 100 | self.nbctl.lb_add_to_switches(lb, switches) 101 | 102 | def remove_from_routers(self, routers: List) -> None: 103 | for lb in self.lbs: 104 | self.nbctl.lb_remove_from_routers(lb, routers) 105 | 106 | def remove_from_switches(self, switches: List) -> None: 107 | for lb in self.lbs: 108 | self.nbctl.lb_remove_from_switches(lb, switches) 109 | 110 | @staticmethod 111 | def get_vip_map( 112 | vip: str, vport: str, backends: Dict, backend_port: str, version: int 113 | ) -> Dict: 114 | if version == 6: 115 | return { 116 | f'[{vip}]:{vport}': [ 117 | f'[{b.ip6}]:{backend_port}' for b in backends 118 | ] 119 | } 120 | else: 121 | return { 122 | f'{vip}:{vport}': [f'{b.ip}:{backend_port}' for b in backends] 123 | } 124 | 125 | 126 | class OvnLoadBalancerGroup: 127 | def __init__(self, group_name: str, nbctl): 128 | self.nbctl = nbctl 129 | self.name = group_name 130 | self.lbg = self.nbctl.create_lbg(self.name) 131 | 132 | def add_lb(self, ovn_lb) -> None: 133 | for lb in ovn_lb.lbs: 134 | self.nbctl.lbg_add_lb(self.lbg, lb) 135 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # How to submit code changes for ovn-heater 2 | 3 | Please create github pull requests for the repo. More details are 4 | included below. 5 | 6 | ## Before You Start 7 | 8 | Before you open PRs, make sure that each commit to be included in the 9 | PR makes sense. In particular: 10 | 11 | - A given commit should not break anything, even if later 12 | commits fix the problems that it causes. The source tree 13 | should still work after each commit is applied. (This enables 14 | `git bisect` to work best.) 15 | 16 | - A commit should make one logical change. Don't make 17 | multiple, logically unconnected changes to disparate 18 | subsystems in a single commit. 19 | 20 | - A commit that adds or removes user-visible features should 21 | also update the appropriate user documentation or manpages. 22 | 23 | ## Commit Summary 24 | 25 | The summary line of your commit should be in the following format: 26 | `: ` 27 | 28 | - `:` indicates the ovn-heater area to which the 29 | change applies (often the name of a source file or a 30 | directory). You may omit it if the change crosses multiple 31 | distinct pieces of code. 32 | 33 | - `` briefly describes the change. 34 | 35 | ## Commit Description 36 | 37 | The body of the commit message should start with a more thorough 38 | description of the change. This becomes the body of the commit 39 | message, following the subject. There is no need to duplicate the 40 | summary given in the subject. 41 | 42 | Please limit lines in the description to 79 characters in width. 43 | 44 | The description should include: 45 | 46 | - The rationale for the change. 47 | 48 | - Design description and rationale (but this might be better 49 | added as code comments). 50 | 51 | - Testing that you performed (or testing that should be done 52 | but you could not for whatever reason). 53 | 54 | - Tags (see below). 55 | 56 | There is no need to describe what the commit actually changed, if 57 | readers can see it for themselves. 58 | 59 | If the commit refers to a commit already in the ovn-heater 60 | repository, please include both the commit number and the subject of 61 | the commit, e.g. 'commit 7bbeecf09bf5 ("cluster_density: Skip build pods 62 | in the startup stage.")'. 63 | 64 | ## Tags 65 | 66 | The description ends with a series of tags, written one to a line as 67 | the last paragraph of the email. Each tag indicates some property of 68 | the commit in an easily machine-parseable manner. 69 | 70 | Examples of common tags follow. 71 | 72 | ``` 73 | Signed-off-by: Author Name 74 | ``` 75 | 76 | Informally, this indicates that Author Name is the author or 77 | submitter of a commit and has the authority to submit it under 78 | the terms of the license. The formal meaning is to agree to 79 | the Developer's Certificate of Origin (see below). 80 | 81 | If the author and submitter are different, each must sign off. 82 | If the commit has more than one author, all must sign off: 83 | 84 | ``` 85 | Signed-off-by: Author Name 86 | Signed-off-by: Submitter Name 87 | ``` 88 | 89 | Git can only record a single person as the author of a given patch. 90 | In the rare event that a patch has multiple authors, one must be given 91 | the credit in Git and the others must be credited via `Co-authored-by:` 92 | tags (all co-authors must also sign off): 93 | 94 | ``` 95 | Co-authored-by: Author Name 96 | ``` 97 | 98 | ## Developer's Certificate of Origin 99 | 100 | To help track the author of a commit as well as the submission chain, 101 | and be clear that the developer has authority to submit a commit for 102 | inclusion in ovn-heater please sign off your work. The sign off 103 | certifies the following: 104 | 105 | Developer's Certificate of Origin 1.1 106 | 107 | By making a contribution to this project, I certify that: 108 | 109 | (a) The contribution was created in whole or in part by me and I 110 | have the right to submit it under the open source license 111 | indicated in the file; or 112 | 113 | (b) The contribution is based upon previous work that, to the best 114 | of my knowledge, is covered under an appropriate open source 115 | license and I have the right under that license to submit that 116 | work with modifications, whether created in whole or in part 117 | by me, under the same open source license (unless I am 118 | permitted to submit under a different license), as indicated 119 | in the file; or 120 | 121 | (c) The contribution was provided directly to me by some other 122 | person who certified (a), (b) or (c) and I have not modified 123 | it. 124 | 125 | (d) I understand and agree that this project and the contribution 126 | are public and that a record of the contribution (including all 127 | personal information I submit with it, including my sign-off) is 128 | maintained indefinitely and may be redistributed consistent with 129 | this project or the open source license(s) involved. 130 | 131 | See also https://developercertificate.org. 132 | -------------------------------------------------------------------------------- /ovn-tester/ovn_sandbox.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import paramiko 3 | import socket 4 | 5 | from io import StringIO 6 | from ovn_exceptions import SSHError 7 | 8 | log = logging.getLogger(__name__) 9 | 10 | DEFAULT_SANDBOX_TIMEOUT = 60 11 | 12 | 13 | class SSH: 14 | def __init__(self, hostname: str, cmd_log: bool): 15 | self.hostname = hostname 16 | self.ssh = paramiko.SSHClient() 17 | self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) 18 | self.ssh.connect(hostname) 19 | self.cmd_log = cmd_log 20 | 21 | @staticmethod 22 | def printable_result(out: str) -> str: 23 | if '\n' in out or '\r' in out: 24 | out = "---\n" + out 25 | return out 26 | 27 | def run(self, cmd="", stdout=None, raise_on_error: bool = False) -> None: 28 | if self.cmd_log: 29 | log.info(f'Logging command: ssh {self.hostname} "{cmd}"') 30 | 31 | ssh_stdin, ssh_stdout, ssh_stderr = self.ssh.exec_command(cmd) 32 | exit_status = ssh_stdout.channel.recv_exit_status() 33 | 34 | if exit_status != 0 and raise_on_error: 35 | out = self.printable_result(ssh_stderr.read().decode().strip()) 36 | if len(out): 37 | log.warning(out) 38 | raise SSHError( 39 | f'Command "{cmd}" failed with exit_status {exit_status}.' 40 | ) 41 | 42 | if not ssh_stdout.channel.recv_ready(): 43 | return 44 | 45 | if stdout: 46 | stdout.write(ssh_stdout.read().decode('ascii')) 47 | else: 48 | out = self.printable_result(ssh_stdout.read().decode().strip()) 49 | if len(out): 50 | log.info(out) 51 | 52 | 53 | class PhysicalNode: 54 | def __init__(self, hostname: str, log_cmds: bool): 55 | self.ssh = SSH(hostname, log_cmds) 56 | 57 | def run(self, cmd="", stdout=None, raise_on_error: bool = False) -> None: 58 | self.ssh.run(cmd=cmd, stdout=stdout, raise_on_error=raise_on_error) 59 | 60 | 61 | class Sandbox: 62 | def __init__(self, phys_node, container): 63 | self.phys_node = phys_node 64 | self.container = container 65 | self.channel = None 66 | 67 | def ensure_channel(self) -> None: 68 | if self.channel: 69 | return 70 | 71 | self.channel = self.phys_node.ssh.ssh.invoke_shell( 72 | width=10000, height=10000 73 | ) 74 | if self.container: 75 | dcmd = 'podman exec -it ' + self.container + ' bash' 76 | self.channel.sendall(f"{dcmd}\n".encode()) 77 | 78 | stdout = StringIO() 79 | # Checking + consuming all the unwanted output from the shell. 80 | self.run(cmd="echo Hello", stdout=stdout, raise_on_error=True) 81 | 82 | def run( 83 | self, 84 | cmd: str = "", 85 | stdout=None, 86 | raise_on_error: bool = False, 87 | timeout: int = DEFAULT_SANDBOX_TIMEOUT, 88 | ) -> None: 89 | if self.phys_node.ssh.cmd_log: 90 | log.info(f'Logging command: ssh {self.container} "{cmd}"') 91 | 92 | self.ensure_channel() 93 | # Fail if command didn't finish after 'timeout' seconds. 94 | self.channel.settimeout(timeout) 95 | 96 | # Can't have ';' right after '&'. 97 | if not cmd.endswith('&'): 98 | cmd = cmd + ' ;' 99 | 100 | self.channel.sendall( 101 | f"echo '++++start'; " 102 | f"{cmd} echo $? ; " 103 | f"echo '++++end' \n".encode() 104 | ) 105 | timed_out = False 106 | out = '' 107 | try: 108 | out = self.channel.recv(10240).decode() 109 | while '++++end' not in out.splitlines(): 110 | out = out + self.channel.recv(10240).decode() 111 | except (paramiko.buffered_pipe.PipeTimeout, socket.timeout): 112 | if '++++start' not in out.splitlines(): 113 | out = '++++start\n' + out 114 | out = out + '\n42\n++++end' 115 | timed_out = True 116 | log.warning(f'Command "{cmd}" timed out!') 117 | # Can't trust this shell anymore. 118 | self.channel.close() 119 | self.channel = None 120 | pass 121 | 122 | # Splitting and removing all lines with terminal control chars. 123 | out = out.splitlines() 124 | start = out.index('++++start') + 1 125 | end = out.index('++++end') - 1 126 | exit_status = int(out[end]) 127 | out = [s for s in out[start:end] if '\x1b' not in s] 128 | 129 | if self.phys_node.ssh.cmd_log or timed_out: 130 | log.info(f'Result: {out}, Exit status: {exit_status}') 131 | 132 | out = '\n'.join(out).strip() 133 | 134 | if exit_status != 0 and raise_on_error: 135 | if len(out): 136 | log.warning(SSH.printable_result(out)) 137 | raise SSHError( 138 | f'Command "{cmd}" failed with exit_status {exit_status}.' 139 | ) 140 | 141 | if stdout: 142 | stdout.write(out) 143 | else: 144 | out = SSH.printable_result(out) 145 | if len(out): 146 | log.info(out) 147 | -------------------------------------------------------------------------------- /ovn-tester/cms/ovn_kubernetes/tests/netpol_multitenant.py: -------------------------------------------------------------------------------- 1 | from collections import namedtuple 2 | import netaddr 3 | from ovn_context import Context 4 | from cms.ovn_kubernetes import Namespace 5 | from ovn_ext_cmd import ExtCmd 6 | 7 | 8 | NsRange = namedtuple('NsRange', ['start', 'n_pods']) 9 | 10 | 11 | NsMultitenantCfg = namedtuple( 12 | 'NsMultitenantCfg', 13 | ['n_namespaces', 'ranges', 'n_external_ips1', 'n_external_ips2'], 14 | ) 15 | 16 | 17 | class NetpolMultitenant(ExtCmd): 18 | def __init__(self, config, clusters, global_cfg): 19 | super().__init__(config, clusters) 20 | test_config = config.get('netpol_multitenant', dict()) 21 | ranges = [ 22 | NsRange( 23 | start=range_args.get('start', 0), 24 | n_pods=range_args.get('n_pods', 5), 25 | ) 26 | for range_args in test_config.get('ranges', list()) 27 | ] 28 | ranges.sort(key=lambda x: x.start, reverse=True) 29 | self.config = NsMultitenantCfg( 30 | n_namespaces=test_config.get('n_namespaces', 0), 31 | n_external_ips1=test_config.get('n_external_ips1', 3), 32 | n_external_ips2=test_config.get('n_external_ips2', 20), 33 | ranges=ranges, 34 | ) 35 | 36 | def run(self, clusters, global_cfg): 37 | """ 38 | Run a multitenant network policy test, for example: 39 | 40 | for i in range(n_namespaces): 41 | create address set AS_ns_i 42 | create port group PG_ns_i 43 | if i < 200: 44 | n_pods = 1 # 200 pods 45 | elif i < 480: 46 | n_pods = 5 # 1400 pods 47 | elif i < 495: 48 | n_pods = 20 # 300 pods 49 | else: 50 | n_pods = 100 # 500 pods 51 | create n_pods 52 | add n_pods to AS_ns_i 53 | add n_pods to PG_ns_i 54 | create acls: 55 | 56 | to-lport, ip.src == $AS_ns_i && outport == @PG_ns_i, 57 | allow-related 58 | to-lport, ip.src == {ip1, ip2, ip3} && outport == @PG_ns_i, 59 | allow-related 60 | to-lport, ip.src == {ip1, ..., ip20} && outport == @PG_ns_i, 61 | allow-related 62 | """ 63 | if global_cfg.run_ipv4: 64 | external_ips1 = [ 65 | netaddr.IPAddress('42.42.42.1') + i 66 | for i in range(self.config.n_external_ips1) 67 | ] 68 | external_ips2 = [ 69 | netaddr.IPAddress('43.43.43.1') + i 70 | for i in range(self.config.n_external_ips2) 71 | ] 72 | if global_cfg.run_ipv6: 73 | external6_ips1 = [ 74 | netaddr.IPAddress('42:42:42::1') + i 75 | for i in range(self.config.n_external_ips1) 76 | ] 77 | external6_ips2 = [ 78 | netaddr.IPAddress('43:43:43::1') + i 79 | for i in range(self.config.n_external_ips2) 80 | ] 81 | 82 | all_ns = [] 83 | with Context( 84 | clusters, 'netpol_multitenant', self.config.n_namespaces, test=self 85 | ) as ctx: 86 | for i in ctx: 87 | # Get the number of pods from the "highest" range that 88 | # includes i. 89 | ranges = self.config.ranges 90 | n_ports = next((r.n_pods for r in ranges if i >= r.start), 1) 91 | az_index = i % len(clusters) 92 | ovn = clusters[az_index] 93 | ns = Namespace( 94 | clusters, f'ns_netpol_multitenant_{i}', global_cfg 95 | ) 96 | for _ in range(n_ports): 97 | worker = ovn.select_worker_for_port() 98 | for p in worker.provision_ports(ovn, 1): 99 | ns.add_ports([p], az_index) 100 | ns.default_deny(4, az_index) 101 | if global_cfg.run_ipv4: 102 | ns.allow_within_namespace(4, az_index) 103 | if global_cfg.run_ipv6: 104 | ns.allow_within_namespace(6, az_index) 105 | ns.check_enforcing_internal(az_index) 106 | if global_cfg.run_ipv4: 107 | ns.allow_from_external(external_ips1, az=az_index) 108 | ns.allow_from_external( 109 | external_ips2, include_ext_gw=True, az=az_index 110 | ) 111 | if global_cfg.run_ipv6: 112 | ns.allow_from_external( 113 | external6_ips1, family=6, az=az_index 114 | ) 115 | ns.allow_from_external( 116 | external6_ips2, 117 | include_ext_gw=True, 118 | family=6, 119 | az=az_index, 120 | ) 121 | ns.check_enforcing_external(az_index) 122 | all_ns.append(ns) 123 | 124 | if not global_cfg.cleanup: 125 | return 126 | with Context( 127 | clusters, 'netpol_multitenant_cleanup', brief_report=True 128 | ) as ctx: 129 | for ns in all_ns: 130 | ns.unprovision() 131 | -------------------------------------------------------------------------------- /utils/process-stats.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import logging 4 | import os 5 | import pandas as pd 6 | import plotly.express as px 7 | import sys 8 | 9 | from typing import Dict, List 10 | 11 | 12 | FORMAT = '%(asctime)s |%(levelname)s| %(message)s' 13 | logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=FORMAT) 14 | log = logging.getLogger(__name__) 15 | 16 | 17 | def read_file(filename: str) -> Dict: 18 | with open(filename, "r") as file: 19 | return json.load(file) 20 | 21 | 22 | def aggregated(df: pd.DataFrame) -> (pd.DataFrame, int): 23 | column_names = list(df.columns) 24 | value_name = column_names[2] 25 | 26 | log.info(f'Pivot and interpolate {value_name} ...') 27 | df = df.pivot_table( 28 | index='Time', columns='Process', values=value_name, aggfunc='mean' 29 | ).interpolate(method='time', limit_direction='both') 30 | 31 | result = pd.DataFrame(index=df.index) 32 | processes = {p.split('|')[0] for p in df.columns} 33 | 34 | log.info(f'Aggregating {value_name} ...') 35 | for p in processes: 36 | df_filtered = df.filter(regex='^' + p) 37 | result[p + '|sum'] = df_filtered.sum(axis=1) 38 | result[p + '|mean'] = df_filtered.mean(axis=1) 39 | result[p + '|max'] = df_filtered.max(axis=1) 40 | result[p + '|min'] = df_filtered.min(axis=1) 41 | 42 | result['ovn|sum'] = df.filter(regex=r'^ovn.*\|ovn-(central|scale).*').sum( 43 | axis=1 44 | ) 45 | ovn_max = result['ovn|sum'].astype('int').max() 46 | 47 | result['ovs|sum'] = df.filter(regex=r'^ovs.*\|ovn-(central|scale).*').sum( 48 | axis=1 49 | ) 50 | 51 | result = result.astype('int').reset_index().melt(id_vars=['Time']) 52 | result.columns = column_names 53 | result = result.sort_values(['Process', 'Time']) 54 | 55 | return result, ovn_max 56 | 57 | 58 | def resource_stats_generate( 59 | filename: str, data: Dict, aggregate: bool 60 | ) -> None: 61 | rss: List[List] = [] 62 | cpu: List[List] = [] 63 | 64 | log.info('Preprocessing ...') 65 | for ts, time_slice in sorted(data.items()): 66 | tme = pd.Timestamp.fromtimestamp(float(ts)).round('1s') 67 | for name, res in time_slice.items(): 68 | rss_mb = int(res['rss']) >> 20 69 | rss.append([tme, name, rss_mb]) 70 | cpu.append([tme, name, float(res['cpu'])]) 71 | 72 | log.info('Creating DataFrame ...') 73 | df_rss = pd.DataFrame(rss, columns=['Time', 'Process', 'RSS (MB)']) 74 | df_cpu = pd.DataFrame(cpu, columns=['Time', 'Process', 'CPU (%)']) 75 | 76 | if aggregate: 77 | df_rss, max_sum_rss = aggregated(df_rss) 78 | df_cpu, max_sum_cpu = aggregated(df_cpu) 79 | 80 | log.info('Creating charts ...') 81 | rss_chart = px.line( 82 | df_rss, 83 | x='Time', 84 | y='RSS (MB)', 85 | color='Process', 86 | title=('Aggregate ' if aggregate else '') + 'Resident Set Size', 87 | ) 88 | cpu_chart = px.line( 89 | df_cpu, 90 | x='Time', 91 | y='CPU (%)', 92 | color='Process', 93 | title=('Aggregate ' if aggregate else '') + 'CPU usage', 94 | ) 95 | 96 | log.info(f'Writing HTML to {filename} ...') 97 | with open(filename, 'w') as report_file: 98 | report_file.write('') 99 | if aggregate: 100 | report_file.write( 101 | f''' 102 | 103 | 104 | 105 | 106 | 107 | 108 | 109 | 110 | 111 | 112 | 113 |
Max(Sum(OVN RSS)) {max_sum_rss} MB
Max(Sum(OVN CPU)) {max_sum_cpu} %
114 | ''' 115 | ) 116 | report_file.write( 117 | rss_chart.to_html( 118 | full_html=False, 119 | include_plotlyjs='cdn', 120 | default_width='90%', 121 | default_height='90%', 122 | ) 123 | ) 124 | report_file.write( 125 | cpu_chart.to_html( 126 | full_html=False, 127 | include_plotlyjs='cdn', 128 | default_width='90%', 129 | default_height='90%', 130 | ) 131 | ) 132 | report_file.write('') 133 | 134 | 135 | if __name__ == '__main__': 136 | parser = argparse.ArgumentParser( 137 | description='Generate resource usage charts.' 138 | ) 139 | parser.add_argument( 140 | '--aggregate', action='store_true', help='generate aggregate charts' 141 | ) 142 | parser.add_argument( 143 | '-o', '--output', required=True, help='file to write an HTML result' 144 | ) 145 | parser.add_argument( 146 | 'input_files', 147 | metavar='input-file', 148 | type=str, 149 | nargs='+', 150 | help='JSON file with recorded process statistics', 151 | ) 152 | 153 | args = parser.parse_args() 154 | 155 | if os.path.isfile(args.output): 156 | log.fatal(f'Output file {args.output} already exists') 157 | sys.exit(2) 158 | 159 | log.info(f'Processing stats from {len(args.input_files)} files.') 160 | 161 | log.info('Reading ...') 162 | data: Dict = {} 163 | for f in args.input_files: 164 | d = read_file(f) 165 | data.update(d) 166 | 167 | resource_stats_generate(args.output, data, args.aggregate) 168 | log.info('Done.') 169 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/translate_yaml.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | from pathlib import Path 4 | from dataclasses import dataclass 5 | from typing import Dict 6 | import yaml 7 | import netaddr 8 | import sys 9 | 10 | 11 | def load_yaml(orig_yaml_file_name): 12 | with open(orig_yaml_file_name, "r") as orig_yaml_file: 13 | return yaml.safe_load(orig_yaml_file) 14 | 15 | 16 | @dataclass 17 | class GlobalConfig: 18 | """This contains all "global" level configuration options and their 19 | default values. If you want to add a new global level option then it needs 20 | to be listed here and have its type and default value specified.""" 21 | 22 | log_cmds: bool = False 23 | cleanup: bool = False 24 | run_ipv4: bool = True 25 | run_ipv6: bool = False 26 | cms_name: str = '' 27 | 28 | 29 | DEFAULT_N_VIPS = 2 30 | DEFAULT_VIP_PORT = 80 31 | 32 | 33 | def calculate_vips(subnet: str) -> Dict: 34 | vip_subnet = netaddr.IPNetwork(subnet) 35 | vip_gen = vip_subnet.iter_hosts() 36 | vip_range = range(0, DEFAULT_N_VIPS) 37 | prefix = '[' if vip_subnet.version == 6 else '' 38 | suffix = ']' if vip_subnet.version == 6 else '' 39 | return { 40 | f'{prefix}{next(vip_gen)}{suffix}:{DEFAULT_VIP_PORT}': None 41 | for _ in vip_range 42 | } 43 | 44 | 45 | DEFAULT_N_STATIC_VIPS = 65 46 | DEFAULT_N_STATIC_BACKENDS = 2 47 | DEFAULT_STATIC_BACKEND_SUBNET = netaddr.IPNetwork('6.0.0.0/8') 48 | DEFAULT_STATIC_BACKEND_SUBNET6 = netaddr.IPNetwork('6::/32') 49 | DEFAULT_BACKEND_PORT = 8080 50 | 51 | 52 | def calculate_static_vips(vip_subnet: str) -> Dict: 53 | vip_subnet = netaddr.IPNetwork(vip_subnet) 54 | if vip_subnet.version == 6: 55 | backend_subnet = DEFAULT_STATIC_BACKEND_SUBNET6 56 | else: 57 | backend_subnet = DEFAULT_STATIC_BACKEND_SUBNET 58 | 59 | vip_gen = vip_subnet.iter_hosts() 60 | vip_range = range(0, DEFAULT_N_STATIC_VIPS) 61 | 62 | backend_gen = backend_subnet.iter_hosts() 63 | backend_range = range(0, DEFAULT_N_STATIC_BACKENDS) 64 | 65 | prefix = '[' if vip_subnet.version == 6 else '' 66 | suffix = ']' if vip_subnet.version == 6 else '' 67 | 68 | # This assumes it's OK to use the same backend list for each 69 | # VIP. If we need to use different backends for each VIP, 70 | # then this will need to be updated 71 | backend_list = [ 72 | f'{prefix}{next(backend_gen)}{suffix}:{DEFAULT_BACKEND_PORT}' 73 | for _ in backend_range 74 | ] 75 | 76 | return { 77 | f'{prefix}{next(vip_gen)}{suffix}:{DEFAULT_VIP_PORT}': backend_list 78 | for _ in vip_range 79 | } 80 | 81 | 82 | @dataclass 83 | class ClusterConfig: 84 | """This contains all "cluster" level configuration options and their 85 | default values. If you want to add a new cluster level option then it needs 86 | to be listed here and have its type and default value specified. 87 | 88 | Fields with "None" as their default are calculated in the __post_init__ 89 | method.""" 90 | 91 | monitor_all: bool = True 92 | logical_dp_groups: bool = True 93 | clustered_db: bool = True 94 | log_txns_db: bool = False 95 | datapath_type: str = "system" 96 | raft_election_to: int = 16 97 | northd_probe_interval: int = 16000 98 | northd_threads: int = 4 99 | db_inactivity_probe: int = 60000 100 | node_net: str = "192.16.0.0/16" 101 | enable_ssl: bool = True 102 | node_timeout_s: int = 20 103 | internal_net: str = "16.0.0.0/16" 104 | internal_net6: str = "16::/64" 105 | external_net: str = "20.0.0.0/16" 106 | external_net6: str = "20::/64" 107 | gw_net: str = "30.0.0.0/16" 108 | gw_net6: str = "30::/64" 109 | ts_net: str = "40.0.0.0/16" 110 | ts_net6: str = "40::/64" 111 | cluster_net: str = "16.0.0.0/4" 112 | cluster_net6: str = "16::/32" 113 | n_workers: int = 2 114 | n_relays: int = 0 115 | n_az: int = 1 116 | vips: Dict = None 117 | vips6: Dict = None 118 | vip_subnet: str = "4.0.0.0/8" 119 | vip_subnet6: str = "4::/32" 120 | static_vips: Dict = None 121 | static_vips6: Dict = None 122 | use_ovsdb_etcd: bool = False 123 | 124 | def __post_init__(self, **kwargs): 125 | # Some defaults have to be calculated 126 | if self.vips is None: 127 | self.vips = calculate_vips(self.vip_subnet) 128 | 129 | if self.vips6 is None: 130 | self.vips6 = calculate_vips(self.vip_subnet6) 131 | 132 | if self.static_vips is None: 133 | self.static_vips = calculate_static_vips(self.vip_subnet) 134 | 135 | if self.static_vips6 is None: 136 | self.static_vips6 = calculate_static_vips(self.vip_subnet6) 137 | 138 | 139 | def translate_yaml(orig_yaml): 140 | global_cfg = GlobalConfig(**orig_yaml["global"]) 141 | cluster_cfg = ClusterConfig(**orig_yaml["cluster"]) 142 | 143 | dest_yaml = dict() 144 | dest_yaml["global"] = vars(global_cfg) 145 | dest_yaml["cluster"] = vars(cluster_cfg) 146 | 147 | for section, values in orig_yaml.items(): 148 | if section != "global" and section != "cluster": 149 | dest_yaml[section] = values 150 | 151 | return dest_yaml 152 | 153 | 154 | def write_yaml(dest_yaml, dest_yaml_file_name): 155 | with open(dest_yaml_file_name, "w") as dest_yaml_file: 156 | yaml.dump(dest_yaml, dest_yaml_file) 157 | 158 | 159 | def main(): 160 | orig_yaml_file_name = Path(sys.argv[1]) 161 | dest_yaml_file_name = Path(sys.argv[2]) 162 | 163 | orig_yaml = load_yaml(orig_yaml_file_name) 164 | dest_yaml = translate_yaml(orig_yaml) 165 | write_yaml(dest_yaml, dest_yaml_file_name) 166 | 167 | return 0 168 | 169 | 170 | if __name__ == "__main__": 171 | main() 172 | -------------------------------------------------------------------------------- /ovn-fake-multinode-utils/playbooks/bringup-cluster.yml: -------------------------------------------------------------------------------- 1 | - name: Bring up the tester 2 | hosts: tester_hosts 3 | tasks: 4 | - name: Start tester container 5 | ansible.builtin.shell: | 6 | podman run -dt --name=ovn-tester --hostname=ovn-tester \ 7 | --pids-limit -1 --privileged ovn/ovn-tester 8 | 9 | - name: Add tester container interfaces to OVS bridges 10 | environment: 11 | RUNC_CMD: podman 12 | ansible.builtin.shell: | 13 | ./ovs-docker add-port br-ovn eth1 ovn-tester \ 14 | --ipaddress={{ node_net|ansible.utils.ipaddr('1') }} 15 | ./ovs-docker add-port br-ovn-ext eth2 ovn-tester 16 | args: 17 | chdir: "{{ ovn_fake_multinode_target_path }}/ovn-fake-multinode" 18 | 19 | 20 | - name: Compute central facts 21 | hosts: central_hosts, worker_hosts 22 | tasks: 23 | - name: Compute central facts (standalone) 24 | when: clustered_db == "no" 25 | ansible.builtin.set_fact: 26 | n_ips: '{{ n_relays|int + 1 }}' 27 | 28 | - name: Compute central facts (clustered) 29 | when: clustered_db == "yes" 30 | ansible.builtin.set_fact: 31 | n_ips: '{{ n_relays|int + 3 }}' 32 | 33 | 34 | - name: Bring up central nodes 35 | hosts: central_hosts 36 | tasks: 37 | - name: Compute central facts (standalone) 38 | when: clustered_db == "no" 39 | ansible.builtin.set_fact: 40 | n_ips: '{{ n_relays|int + 1 }}' 41 | central_ic_id: 'ovn-central-az0' 42 | 43 | - name: Compute central facts (clustered) 44 | when: clustered_db == "yes" 45 | ansible.builtin.set_fact: 46 | n_ips: '{{ n_relays|int + 3 }}' 47 | central_ic_id: 'ovn-central-az0-1' 48 | 49 | - name: Determine node to run IC DBs 50 | ansible.builtin.set_fact: ic_db_node="{{ groups['central_hosts'] | first }}" 51 | 52 | - name: Start central containers 53 | environment: 54 | CENTRAL_COUNT: 1 55 | CHASSIS_COUNT: 0 56 | CENTRAL_NAME: 'ovn-central-az{{ item|int }}' 57 | CENTRAL_PREFIX: 'ovn-central-az{{ item|int }}' 58 | CREATE_FAKE_VMS: no 59 | ENABLE_ETCD: '{{ use_ovsdb_etcd }}' 60 | ENABLE_SSL: '{{ enable_ssl }}' 61 | GW_COUNT: 0 62 | IP_CIDR: "{{ node_net|ansible.utils.ipaddr('prefix') }}" 63 | IP_HOST: "{{ node_net|ansible.utils.ipaddr('network') }}" 64 | IP_START: "{{ node_net|ansible.utils.ipmath(2 + item|int * n_ips|int) }}" 65 | OVN_START_IC_DBS: '{{ (item|int == 0) | ansible.builtin.ternary("yes", "no") }}' 66 | CENTRAL_IC_ID: '{{ central_ic_id }}' 67 | CENTRAL_IC_IP: '{{ node_net|ansible.utils.ipmath(2) }}' 68 | OVN_DB_CLUSTER: '{{ clustered_db }}' 69 | OVN_DP_TYPE: '{{ datapath_type }}' 70 | OVN_MONITOR_ALL: '{{ monitor_all }}' 71 | RELAY_COUNT: '{{ n_relays }}' 72 | RUNC_CMD: podman 73 | ansible.builtin.shell: | 74 | ./ovn_cluster.sh start 75 | args: 76 | chdir: "{{ ovn_fake_multinode_target_path }}/ovn-fake-multinode" 77 | loop: "{{ range(groups['central_hosts'].index(inventory_hostname), 78 | n_az|int, groups['central_hosts']|length) | list }}" 79 | 80 | 81 | - name: Bring up worker nodes 82 | hosts: worker_hosts 83 | tasks: 84 | - name: Start worker containers 85 | environment: 86 | CENTRAL_COUNT: 1 87 | CHASSIS_COUNT: 0 88 | CREATE_FAKE_VMS: no 89 | ENABLE_ETCD: '{{ use_ovsdb_etcd }}' 90 | ENABLE_SSL: '{{ enable_ssl }}' 91 | GW_COUNT: 0 92 | IP_CIDR: "{{ node_net|ansible.utils.ipaddr('prefix') }}" 93 | IP_HOST: "{{ node_net|ansible.utils.ipaddr('network') }}" 94 | IP_START: "{{ node_net|ansible.utils.ipmath(2 + n_az|int * n_ips|int + item|int) }}" 95 | OVN_DB_CLUSTER: '{{ clustered_db }}' 96 | OVN_DP_TYPE: '{{ datapath_type }}' 97 | OVN_MONITOR_ALL: '{{ monitor_all }}' 98 | RELAY_COUNT: '{{ n_relays }}' 99 | RUNC_CMD: podman 100 | ansible.builtin.shell: | 101 | ./ovn_cluster.sh add-chassis \ 102 | {{ node_name }}-{{ item|int }} tcp:0.0.0.1:6642 103 | args: 104 | chdir: "{{ ovn_fake_multinode_target_path }}/ovn-fake-multinode" 105 | loop: "{{ range(groups['worker_hosts'].index(inventory_hostname), 106 | n_workers|int, groups['worker_hosts']|length) | list }}" 107 | 108 | 109 | - name: Start process monitoring 110 | hosts: all 111 | tasks: 112 | - name: Enumerate containers 113 | ansible.builtin.shell: | 114 | podman ps --filter='name=ovn-' --noheading --format={{ '{{.Names}}' }} 115 | register: containers 116 | 117 | - name: Copy and start the process monitoring script in containers 118 | ansible.builtin.shell: | 119 | podman cp /tmp/process-monitor.py {{ item }}:/tmp/ 120 | podman exec {{ item }} bash -c " 121 | nohup python3 /tmp/process-monitor.py \\ 122 | -s {{ item }} \\ 123 | -o /var/log/process-stats.json \\ 124 | -x /tmp/process-monitor.exit & 125 | " 126 | loop: "{{ containers.stdout_lines }}" 127 | 128 | 129 | - name: Set up SSL remote for local ovsdb-server 130 | hosts: worker_hosts 131 | tasks: 132 | - name: Enumerate containers 133 | ansible.builtin.shell: | 134 | podman ps --filter='name={{ node_name }}-' \ 135 | --noheading --format={{ '{{.Names}}' }} 136 | register: containers 137 | 138 | - name: Set up SSL 139 | when: enable_ssl == "yes" 140 | ansible.builtin.shell: | 141 | # SSL files are installed by ovn-fake-multinode in these locations. 142 | podman exec {{ item }} bash -c " 143 | ovs-vsctl --id=@foo create SSL \\ 144 | private_key=/opt/ovn/ovn-privkey.pem \\ 145 | certificate=/opt/ovn/ovn-cert.pem \\ 146 | ca_cert=/opt/ovn/pki/switchca/cacert.pem \\ 147 | -- set Open_vSwitch . ssl=@foo 148 | " 149 | loop: "{{ containers.stdout_lines }}" 150 | 151 | - name: Add secure OVSDB remote 152 | when: enable_ssl == "yes" 153 | ansible.builtin.shell: | 154 | podman exec {{ item }} bash -c " 155 | ovs-appctl -t ovsdb-server ovsdb-server/add-remote pssl:6640 156 | " 157 | loop: "{{ containers.stdout_lines }}" 158 | 159 | - name: Add insecure OVSDB remote 160 | when: enable_ssl == "no" 161 | ansible.builtin.shell: | 162 | podman exec {{ item }} bash -c " 163 | ovs-appctl -t ovsdb-server ovsdb-server/add-remote ptcp:6640 164 | " 165 | loop: "{{ containers.stdout_lines }}" 166 | -------------------------------------------------------------------------------- /ovn-tester/ovn_tester.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import logging 4 | import sys 5 | import netaddr 6 | import yaml 7 | import importlib 8 | import ovn_exceptions 9 | import gc 10 | import time 11 | 12 | from collections import namedtuple 13 | from ovn_context import Context 14 | from ovn_sandbox import PhysicalNode 15 | from ovn_workload import ( 16 | BrExConfig, 17 | ClusterConfig, 18 | ) 19 | from ovn_utils import DualStackSubnet 20 | from ovs.stream import Stream 21 | from typing import List, Tuple, Dict, Callable 22 | 23 | 24 | GlobalCfg = namedtuple( 25 | 'GlobalCfg', ['log_cmds', 'cleanup', 'run_ipv4', 'run_ipv6', 'cms_name'] 26 | ) 27 | 28 | 29 | def usage(name): 30 | print( 31 | f''' 32 | {name} PHYSICAL_DEPLOYMENT TEST_CONF 33 | where PHYSICAL_DEPLOYMENT is the YAML file defining the deployment. 34 | where TEST_CONF is the YAML file defining the test parameters. 35 | ''', 36 | file=sys.stderr, 37 | ) 38 | 39 | 40 | def read_physical_deployment( 41 | deployment: str, global_cfg: GlobalCfg 42 | ) -> Tuple[List[PhysicalNode], List[PhysicalNode]]: 43 | with open(deployment, 'r') as yaml_file: 44 | dep = yaml.safe_load(yaml_file) 45 | 46 | central_nodes = [ 47 | PhysicalNode(central, global_cfg.log_cmds) 48 | for central in dep['central-nodes'] 49 | ] 50 | worker_nodes = [ 51 | PhysicalNode(worker, global_cfg.log_cmds) 52 | for worker in dep['worker-nodes'] 53 | ] 54 | return central_nodes, worker_nodes 55 | 56 | 57 | # SSL files are installed by ovn-fake-multinode in these locations. 58 | SSL_KEY_FILE = "/opt/ovn/ovn-privkey.pem" 59 | SSL_CERT_FILE = "/opt/ovn/ovn-cert.pem" 60 | SSL_CACERT_FILE = "/opt/ovn/pki/switchca/cacert.pem" 61 | 62 | 63 | def read_config(config: Dict) -> Tuple[GlobalCfg, ClusterConfig, BrExConfig]: 64 | global_args = config.get('global', dict()) 65 | global_cfg = GlobalCfg(**global_args) 66 | 67 | cluster_args = config.get('cluster') 68 | cluster_cfg = ClusterConfig( 69 | monitor_all=cluster_args['monitor_all'], 70 | logical_dp_groups=cluster_args['logical_dp_groups'], 71 | clustered_db=cluster_args['clustered_db'], 72 | log_txns_db=cluster_args['log_txns_db'], 73 | datapath_type=cluster_args['datapath_type'], 74 | raft_election_to=cluster_args['raft_election_to'], 75 | node_net=netaddr.IPNetwork(cluster_args['node_net']), 76 | n_relays=cluster_args['n_relays'], 77 | n_az=cluster_args['n_az'], 78 | enable_ssl=cluster_args['enable_ssl'], 79 | northd_probe_interval=cluster_args['northd_probe_interval'], 80 | db_inactivity_probe=cluster_args['db_inactivity_probe'], 81 | node_timeout_s=cluster_args['node_timeout_s'], 82 | internal_net=DualStackSubnet( 83 | netaddr.IPNetwork(cluster_args['internal_net']) 84 | if global_cfg.run_ipv4 85 | else None, 86 | netaddr.IPNetwork(cluster_args['internal_net6']) 87 | if global_cfg.run_ipv6 88 | else None, 89 | ), 90 | external_net=DualStackSubnet( 91 | netaddr.IPNetwork(cluster_args['external_net']) 92 | if global_cfg.run_ipv4 93 | else None, 94 | netaddr.IPNetwork(cluster_args['external_net6']) 95 | if global_cfg.run_ipv6 96 | else None, 97 | ), 98 | gw_net=DualStackSubnet( 99 | netaddr.IPNetwork(cluster_args['gw_net']) 100 | if global_cfg.run_ipv4 101 | else None, 102 | netaddr.IPNetwork(cluster_args['gw_net6']) 103 | if global_cfg.run_ipv6 104 | else None, 105 | ), 106 | cluster_net=DualStackSubnet( 107 | netaddr.IPNetwork(cluster_args['cluster_net']) 108 | if global_cfg.run_ipv4 109 | else None, 110 | netaddr.IPNetwork(cluster_args['cluster_net6']) 111 | if global_cfg.run_ipv6 112 | else None, 113 | ), 114 | ts_net=DualStackSubnet( 115 | netaddr.IPNetwork(cluster_args['ts_net']) 116 | if global_cfg.run_ipv4 117 | else None, 118 | netaddr.IPNetwork(cluster_args['ts_net6']) 119 | if global_cfg.run_ipv6 120 | else None, 121 | ), 122 | n_workers=cluster_args['n_workers'], 123 | vips=cluster_args['vips'], 124 | vips6=cluster_args['vips6'], 125 | vip_subnet=cluster_args['vip_subnet'], 126 | static_vips=cluster_args['static_vips'], 127 | static_vips6=cluster_args['static_vips6'], 128 | use_ovsdb_etcd=cluster_args['use_ovsdb_etcd'], 129 | northd_threads=cluster_args['northd_threads'], 130 | ssl_private_key=SSL_KEY_FILE, 131 | ssl_cert=SSL_CERT_FILE, 132 | ssl_cacert=SSL_CACERT_FILE, 133 | ) 134 | 135 | brex_cfg = BrExConfig( 136 | physical_net=cluster_args.get('physical_net', 'providernet'), 137 | ) 138 | 139 | return global_cfg, cluster_cfg, brex_cfg 140 | 141 | 142 | def setup_logging(global_cfg: GlobalCfg) -> None: 143 | FORMAT = '%(asctime)s | %(name)-12s |%(levelname)s| %(message)s' 144 | logging.basicConfig(stream=sys.stdout, level=logging.INFO, format=FORMAT) 145 | logging.Formatter.converter = time.gmtime 146 | 147 | if gc.isenabled(): 148 | # If the garbage collector is enabled, it runs from time to time, and 149 | # interrupts ovn-tester to do so. If we are timing an operation, then 150 | # the gc can distort the amount of time something actually takes to 151 | # complete, resulting in graphs with spikes. 152 | # 153 | # Disabling the garbage collector runs the theoretical risk of leaking 154 | # a lot of memory, but in practical tests, this has not been a 155 | # problem. If gigantic-scale tests end up introducing memory issues, 156 | # then we may want to manually run the garbage collector between test 157 | # iterations or between test runs. 158 | gc.disable() 159 | gc.set_threshold(0) 160 | 161 | if not global_cfg.log_cmds: 162 | return 163 | 164 | modules = [ 165 | "ovsdbapp.backend.ovs_idl.transaction", 166 | ] 167 | for module_name in modules: 168 | logging.getLogger(module_name).setLevel(logging.DEBUG) 169 | 170 | 171 | RESERVED = [ 172 | 'global', 173 | 'cluster', 174 | 'ext_cmd', 175 | ] 176 | 177 | 178 | def load_cms(cms_name: str) -> Callable: 179 | mod = importlib.import_module(f'cms.{cms_name}') 180 | class_name = getattr(mod, 'OVN_HEATER_CMS_PLUGIN') 181 | cls = getattr(mod, class_name) 182 | return cls 183 | 184 | 185 | def configure_tests(yaml: Dict, clusters: List, global_cfg: GlobalCfg) -> List: 186 | tests = [] 187 | for section, cfg in yaml.items(): 188 | if section in RESERVED: 189 | continue 190 | 191 | mod = importlib.import_module( 192 | f'cms.{global_cfg.cms_name}.tests.{section}' 193 | ) 194 | class_name = ''.join(s.title() for s in section.split('_')) 195 | cls = getattr(mod, class_name) 196 | tests.append(cls(yaml, clusters, global_cfg)) 197 | return tests 198 | 199 | 200 | def set_ssl_keys(cluster_cfg: ClusterConfig) -> None: 201 | Stream.ssl_set_private_key_file(cluster_cfg.ssl_private_key) 202 | Stream.ssl_set_certificate_file(cluster_cfg.ssl_cert) 203 | Stream.ssl_set_ca_cert_file(cluster_cfg.ssl_cacert) 204 | 205 | 206 | if __name__ == '__main__': 207 | if len(sys.argv) != 3: 208 | usage(sys.argv[0]) 209 | sys.exit(1) 210 | 211 | with open(sys.argv[2], 'r') as yaml_file: 212 | config = yaml.safe_load(yaml_file) 213 | 214 | global_cfg, cluster_cfg, brex_cfg = read_config(config) 215 | 216 | setup_logging(global_cfg) 217 | 218 | if not global_cfg.cms_name or ( 219 | not global_cfg.run_ipv4 and not global_cfg.run_ipv6 220 | ): 221 | raise ovn_exceptions.OvnInvalidConfigException() 222 | 223 | cms_cls = load_cms(global_cfg.cms_name) 224 | centrals, workers = read_physical_deployment(sys.argv[1], global_cfg) 225 | clusters = [ 226 | cms_cls(cluster_cfg, centrals[i % len(centrals)], brex_cfg, i) 227 | for i in range(cluster_cfg.n_az) 228 | ] 229 | for c in clusters: 230 | c.add_cluster_worker_nodes(workers) 231 | 232 | tests = configure_tests(config, clusters, global_cfg) 233 | 234 | if cluster_cfg.enable_ssl: 235 | set_ssl_keys(cluster_cfg) 236 | 237 | with Context(clusters, 'prepare_test clusters'): 238 | for c in clusters: 239 | c.prepare_test() 240 | for test in tests: 241 | test.run(clusters, global_cfg) 242 | sys.exit(0) 243 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | 2 | Apache License 3 | Version 2.0, January 2004 4 | http://www.apache.org/licenses/ 5 | 6 | TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION 7 | 8 | 1. Definitions. 9 | 10 | "License" shall mean the terms and conditions for use, reproduction, 11 | and distribution as defined by Sections 1 through 9 of this document. 12 | 13 | "Licensor" shall mean the copyright owner or entity authorized by 14 | the copyright owner that is granting the License. 15 | 16 | "Legal Entity" shall mean the union of the acting entity and all 17 | other entities that control, are controlled by, or are under common 18 | control with that entity. For the purposes of this definition, 19 | "control" means (i) the power, direct or indirect, to cause the 20 | direction or management of such entity, whether by contract or 21 | otherwise, or (ii) ownership of fifty percent (50%) or more of the 22 | outstanding shares, or (iii) beneficial ownership of such entity. 23 | 24 | "You" (or "Your") shall mean an individual or Legal Entity 25 | exercising permissions granted by this License. 26 | 27 | "Source" form shall mean the preferred form for making modifications, 28 | including but not limited to software source code, documentation 29 | source, and configuration files. 30 | 31 | "Object" form shall mean any form resulting from mechanical 32 | transformation or translation of a Source form, including but 33 | not limited to compiled object code, generated documentation, 34 | and conversions to other media types. 35 | 36 | "Work" shall mean the work of authorship, whether in Source or 37 | Object form, made available under the License, as indicated by a 38 | copyright notice that is included in or attached to the work 39 | (an example is provided in the Appendix below). 40 | 41 | "Derivative Works" shall mean any work, whether in Source or Object 42 | form, that is based on (or derived from) the Work and for which the 43 | editorial revisions, annotations, elaborations, or other modifications 44 | represent, as a whole, an original work of authorship. For the purposes 45 | of this License, Derivative Works shall not include works that remain 46 | separable from, or merely link (or bind by name) to the interfaces of, 47 | the Work and Derivative Works thereof. 48 | 49 | "Contribution" shall mean any work of authorship, including 50 | the original version of the Work and any modifications or additions 51 | to that Work or Derivative Works thereof, that is intentionally 52 | submitted to Licensor for inclusion in the Work by the copyright owner 53 | or by an individual or Legal Entity authorized to submit on behalf of 54 | the copyright owner. For the purposes of this definition, "submitted" 55 | means any form of electronic, verbal, or written communication sent 56 | to the Licensor or its representatives, including but not limited to 57 | communication on electronic mailing lists, source code control systems, 58 | and issue tracking systems that are managed by, or on behalf of, the 59 | Licensor for the purpose of discussing and improving the Work, but 60 | excluding communication that is conspicuously marked or otherwise 61 | designated in writing by the copyright owner as "Not a Contribution." 62 | 63 | "Contributor" shall mean Licensor and any individual or Legal Entity 64 | on behalf of whom a Contribution has been received by Licensor and 65 | subsequently incorporated within the Work. 66 | 67 | 2. Grant of Copyright License. Subject to the terms and conditions of 68 | this License, each Contributor hereby grants to You a perpetual, 69 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 70 | copyright license to reproduce, prepare Derivative Works of, 71 | publicly display, publicly perform, sublicense, and distribute the 72 | Work and such Derivative Works in Source or Object form. 73 | 74 | 3. Grant of Patent License. Subject to the terms and conditions of 75 | this License, each Contributor hereby grants to You a perpetual, 76 | worldwide, non-exclusive, no-charge, royalty-free, irrevocable 77 | (except as stated in this section) patent license to make, have made, 78 | use, offer to sell, sell, import, and otherwise transfer the Work, 79 | where such license applies only to those patent claims licensable 80 | by such Contributor that are necessarily infringed by their 81 | Contribution(s) alone or by combination of their Contribution(s) 82 | with the Work to which such Contribution(s) was submitted. If You 83 | institute patent litigation against any entity (including a 84 | cross-claim or counterclaim in a lawsuit) alleging that the Work 85 | or a Contribution incorporated within the Work constitutes direct 86 | or contributory patent infringement, then any patent licenses 87 | granted to You under this License for that Work shall terminate 88 | as of the date such litigation is filed. 89 | 90 | 4. Redistribution. You may reproduce and distribute copies of the 91 | Work or Derivative Works thereof in any medium, with or without 92 | modifications, and in Source or Object form, provided that You 93 | meet the following conditions: 94 | 95 | (a) You must give any other recipients of the Work or 96 | Derivative Works a copy of this License; and 97 | 98 | (b) You must cause any modified files to carry prominent notices 99 | stating that You changed the files; and 100 | 101 | (c) You must retain, in the Source form of any Derivative Works 102 | that You distribute, all copyright, patent, trademark, and 103 | attribution notices from the Source form of the Work, 104 | excluding those notices that do not pertain to any part of 105 | the Derivative Works; and 106 | 107 | (d) If the Work includes a "NOTICE" text file as part of its 108 | distribution, then any Derivative Works that You distribute must 109 | include a readable copy of the attribution notices contained 110 | within such NOTICE file, excluding those notices that do not 111 | pertain to any part of the Derivative Works, in at least one 112 | of the following places: within a NOTICE text file distributed 113 | as part of the Derivative Works; within the Source form or 114 | documentation, if provided along with the Derivative Works; or, 115 | within a display generated by the Derivative Works, if and 116 | wherever such third-party notices normally appear. The contents 117 | of the NOTICE file are for informational purposes only and 118 | do not modify the License. You may add Your own attribution 119 | notices within Derivative Works that You distribute, alongside 120 | or as an addendum to the NOTICE text from the Work, provided 121 | that such additional attribution notices cannot be construed 122 | as modifying the License. 123 | 124 | You may add Your own copyright statement to Your modifications and 125 | may provide additional or different license terms and conditions 126 | for use, reproduction, or distribution of Your modifications, or 127 | for any such Derivative Works as a whole, provided Your use, 128 | reproduction, and distribution of the Work otherwise complies with 129 | the conditions stated in this License. 130 | 131 | 5. Submission of Contributions. Unless You explicitly state otherwise, 132 | any Contribution intentionally submitted for inclusion in the Work 133 | by You to the Licensor shall be under the terms and conditions of 134 | this License, without any additional terms or conditions. 135 | Notwithstanding the above, nothing herein shall supersede or modify 136 | the terms of any separate license agreement you may have executed 137 | with Licensor regarding such Contributions. 138 | 139 | 6. Trademarks. This License does not grant permission to use the trade 140 | names, trademarks, service marks, or product names of the Licensor, 141 | except as required for reasonable and customary use in describing the 142 | origin of the Work and reproducing the content of the NOTICE file. 143 | 144 | 7. Disclaimer of Warranty. Unless required by applicable law or 145 | agreed to in writing, Licensor provides the Work (and each 146 | Contributor provides its Contributions) on an "AS IS" BASIS, 147 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or 148 | implied, including, without limitation, any warranties or conditions 149 | of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A 150 | PARTICULAR PURPOSE. You are solely responsible for determining the 151 | appropriateness of using or redistributing the Work and assume any 152 | risks associated with Your exercise of permissions under this License. 153 | 154 | 8. Limitation of Liability. In no event and under no legal theory, 155 | whether in tort (including negligence), contract, or otherwise, 156 | unless required by applicable law (such as deliberate and grossly 157 | negligent acts) or agreed to in writing, shall any Contributor be 158 | liable to You for damages, including any direct, indirect, special, 159 | incidental, or consequential damages of any character arising as a 160 | result of this License or out of the use or inability to use the 161 | Work (including but not limited to damages for loss of goodwill, 162 | work stoppage, computer failure or malfunction, or any and all 163 | other commercial damages or losses), even if such Contributor 164 | has been advised of the possibility of such damages. 165 | 166 | 9. Accepting Warranty or Additional Liability. While redistributing 167 | the Work or Derivative Works thereof, You may choose to offer, 168 | and charge a fee for, acceptance of support, warranty, indemnity, 169 | or other liability obligations and/or rights consistent with this 170 | License. However, in accepting such obligations, You may act only 171 | on Your own behalf and on Your sole responsibility, not on behalf 172 | of any other Contributor, and only if You agree to indemnify, 173 | defend, and hold each Contributor harmless for any liability 174 | incurred by, or claims asserted against, such Contributor by reason 175 | of your accepting any such warranty or additional liability. 176 | 177 | END OF TERMS AND CONDITIONS 178 | 179 | APPENDIX: How to apply the Apache License to your work. 180 | 181 | To apply the Apache License to your work, attach the following 182 | boilerplate notice, with the fields enclosed by brackets "[]" 183 | replaced with your own identifying information. (Don't include 184 | the brackets!) The text should be enclosed in the appropriate 185 | comment syntax for the file format. We also recommend that a 186 | file or class name and description of purpose be included on the 187 | same "printed page" as the copyright notice for easier 188 | identification within third-party archives. 189 | 190 | Copyright [yyyy] [name of copyright owner] 191 | 192 | Licensed under the Apache License, Version 2.0 (the "License"); 193 | you may not use this file except in compliance with the License. 194 | You may obtain a copy of the License at 195 | 196 | http://www.apache.org/licenses/LICENSE-2.0 197 | 198 | Unless required by applicable law or agreed to in writing, software 199 | distributed under the License is distributed on an "AS IS" BASIS, 200 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 201 | See the License for the specific language governing permissions and 202 | limitations under the License. 203 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # ovn-heater 2 | 3 | Mega script to install/configure/run 4 | a simulated OVN cluster deployed with 5 | [ovn-fake-multinode](https://github.com/ovn-org/ovn-fake-multinode). 6 | 7 | **NOTE**: This script is designed to be used on test machines only. It 8 | performs disruptive changes to the machines it is run on (e.g.,cleanup 9 | existing containers). 10 | 11 | # Prerequisites 12 | 13 | ## Physical topology 14 | 15 | * ORCHESTRATOR: One machine that needs to be able to SSH paswordless 16 | (preferably as `root`) to all other machines in the topology. Performs the 17 | following: 18 | - prepares the test enviroment: clone the specified versions of `OVS` and 19 | `OVN` and build the `ovn-fake-multinode` image to be used by the `OVN` 20 | nodes. 21 | - provisions all other `OVN` nodes with the required software packages 22 | and with the correct version of `ovn-fake-multinode` to run simulated/fake 23 | `OVN` chassis. 24 | 25 | * TESTER: One machine to run the `ovn-tester` container which runs the python 26 | ovn-tester code. Like the ORCHESTRATOR, the TESTER also needs to be able to 27 | SSH passwordless to all other machines in the topology. 28 | * OVN-CENTRAL: One machine to run the `ovn-central` container(s) which 29 | run `ovn-northd` and the `Northbound` and `Southbound` databases. 30 | * OVN-WORKER-NODE(s): Machines to run `ovn-netlab` container(s), each of 31 | which will simulate an `OVN` chassis. 32 | 33 | The initial provisioning for all the nodes is performed by the `do.sh install` 34 | command. The simulated `OVN` chassis containers and central container are 35 | spawned by the test scripts in `ovn-tester/`. 36 | 37 | **NOTE**: `ovn-fake-multinode` assumes that all nodes (OVN-CENTRAL, TESTER and 38 | OVN-WORKER-NODEs) have an additional Ethernet interface connected to a 39 | single L2 switch. This interface will be used for traffic to/from the 40 | `Northbound` and `Southbound` databases and for tunneled traffic. 41 | 42 | **NOTE**: there's no restriction regarding physical machine roles so for 43 | debugging issues the ORCHESTRATOR, TESTER, OVN-CENTRAL and OVN-WORKER-NODEs can 44 | all be the same physical machine in which case there's no need for the 45 | secondary Ethernet interface to exist. 46 | 47 | ## Sample physical topology: 48 | * ORCHESTRATOR: `host01.mydomain.com` 49 | * TESTER: `host02.mydomain.com` 50 | * OVN-CENTRAL: `host03.mydomain.com` 51 | * OVN-WORKER-NODEs: 52 | - `host04.mydomain.com` 53 | - `host05.mydomain.com` 54 | 55 | TESTER, OVN-CENTRAL and OVN-WORKER-NODEs all have Ethernet interface `eno1` 56 | connected to a physical switch in a separate VLAN, as untagged interfaces. 57 | 58 | **NOTE**: The hostnames specified in the physical topology are used by both 59 | the ORCHESTRATOR and by the `ovn-tester` container running in the TESTER. 60 | Therefore, the values need to be resolvable by both of these entities and 61 | need to resolve to the same host. `localhost` will not work since this does 62 | not resolve to a unique host. 63 | 64 | ## Minimal requirements on the ORCHESTRATOR node (tested on Fedora 38 and Ubuntu 22.10) 65 | 66 | ### Install required packages: 67 | 68 | #### RPM-based 69 | ``` 70 | dnf install -y git ansible \ 71 | ansible-collection-ansible-posix ansible-collection-ansible-utils 72 | ``` 73 | 74 | #### DEB-based 75 | ``` 76 | sudo apt -y install ansible 77 | ``` 78 | 79 | # Installation 80 | 81 | All the following installation steps are run on ORCHESTRATOR. 82 | 83 | ## Ensure all nodes can be accessed passwordless via SSH by ORCHESTRATOR and TESTER 84 | 85 | On Fedora 33 RSA keys are not considered secure enough, an alternative is: 86 | 87 | ``` 88 | ssh-keygen -t ed25519 -a 64 -N '' -f ~/.ssh/id_ed25519 89 | ``` 90 | 91 | Then append `~/.ssh/id_ed25519.pub` to `~/.ssh/authorized_keys` on all physical 92 | nodes. 93 | 94 | ## Get the code: 95 | 96 | ``` 97 | cd 98 | git clone https://github.com/ovn-org/ovn-heater.git 99 | ``` 100 | 101 | ## Write the physical deployment description yaml file: 102 | 103 | A sample file written for the deployment described above is available at 104 | `physical-deployments/physical-deployment.yml`. 105 | 106 | The file should contain the following mandatory sections and fields: 107 | - `internal-iface`: the name of the Ethernet interface used by the underlay 108 | (DB and tunnel traffic). This can be overridden per node if needed. 109 | - `tester-node`: 110 | - `name`: the hostname (or IP) of the node that will run `ovn-tester` (the 111 | python code that performs the actual test) 112 | - `ssh_key`: An ssh private key to install in the TESTER that can be used 113 | to communicate with the other machines in the cluster. 114 | Default: `~/.ssh/id_rsa` 115 | - `central-nodes`: 116 | - `name`: the hostnames (or IPs) of the nodes that will run `ovn-central` 117 | (`ovn-northd` and databases). 118 | - `worker-nodes`: 119 | - the list of worker node hostnames (or IPs). If needed, worker nodes can 120 | be further customized using the per-node optional fields described below. 121 | 122 | Global optional fields: 123 | - `user`: the username to be used when connecting from the tester node. 124 | Default: `root`. 125 | - `prefix`: a string (no constraints) that will be used to prefix container 126 | names for all containers that run `OVN` fake chassis. For example, 127 | `prefix: ovn-test` will generate container names of the form 128 | `ovn-test-X-Y` where `X` is the unique part of the worker hostname and `Y` 129 | is the worker node local index. Default: `ovn-scale`. 130 | - `max-containers`: the maximum number of containers allowed to run on one 131 | host. Default: 100. 132 | 133 | In case some of the physical machines in the setup have different 134 | capabilities (e.g, could host more containers, or use a different ethernet 135 | interface), the following per-node fields can be used to customize the 136 | deployment. Except for `fake-nodes` which is valid only in the context of 137 | worker nodes, all others are valid both for the `central-nodes` and also for 138 | `worker-nodes`: 139 | - `user`: the username to be used when connecting from the tester node. 140 | - `internal-iface`: the name of the Ethernet interface used for DB and 141 | tunnel traffic. This overrides the `internal-iface` global configuration. 142 | - `fake-nodes`: the maximum number of containers allowed to run on this 143 | host. If not specified, the value of `max-containers` from the global 144 | section is used instead. 145 | 146 | ## Perform the installation step: 147 | 148 | This must be run on the ORCHESTRATOR node and generates a `runtime` directory, a 149 | `runtime/hosts` ansible inventory and installs all test components on 150 | all other nodes. 151 | 152 | ``` 153 | cd ~/ovn-heater 154 | ./do.sh install 155 | ``` 156 | 157 | This step will: 158 | - clone OVS, OVN and ovn-fake-multinode upstream main branches in the 159 | `runtime` directory. 160 | - build the `ovn/ovn-multi-node` container image which will be used by the 161 | fake nodes spawned during the tests. OVS/OVN binaries are built with 162 | `CFLAGS="-g -O2 -fno-omit-frame-pointer"`. More aggressive optimizations 163 | can be enabled by setting the `EXTRA_OPTIMIZE=yes` environment variable 164 | (`EXTRA_OPTIMIZE=yes ./do.sh install`). 165 | - push the container image to all other nodes and prepare the test environment. 166 | - build the `ovn/ovn-tester` container image which will be used by the TESTER 167 | node to run the ovn-tester application. 168 | - push the `ovn/ovn-tester` container image to the TESTER node. 169 | 170 | To override the OVS, OVN or ovn-fake-multinode repos/branches use the 171 | following environment variables: 172 | - OVS_REPO, OVS_BRANCH 173 | - OVN_REPO, OVN_BRANCH 174 | - OVN_FAKE_MULTINODE_REPO, OVN_FAKE_MULTINODE_BRANCH 175 | 176 | For example, installing components with custom OVS/OVN code: 177 | 178 | ``` 179 | cd ~/ovn-heater 180 | OVS_REPO=https://github.com/dceara/ovs OVS_BRANCH=tmp-branch OVN_REPO=https://github.com/dceara/ovn OVN_BRANCH=tmp-branch-2 ./do.sh install 181 | ``` 182 | 183 | To override base image of ovn-fake-multinode, which is by default 184 | `fedora:latest`, you can use following environment variables: 185 | - OS_BASE 186 | - OS_IMAGE_OVERRIDE 187 | 188 | For example, to use latest Ubuntu image you can run: 189 | 190 | ``` 191 | cd ~/ovn-heater 192 | OS_BASE=ubuntu OS_IMAGE_OVERRIDE=ubuntu:rolling ./do.sh install 193 | ``` 194 | 195 | ## Perform a reinstallation (e.g., new OVS/OVN versions are needed): 196 | 197 | For OVS, OVN or ovn-fake-multinode code changes to be reflected the 198 | `ovn/ovn-multi-node` container image must be rebuilt. The simplest 199 | way to achieve that is to remove the current `runtime` directory and 200 | reinstall: 201 | 202 | ``` 203 | cd ~/ovn-heater 204 | rm -rf runtime 205 | OVS_REPO=... OVS_BRANCH=... OVN_REPO=... OVN_BRANCH=... ./do.sh install 206 | ``` 207 | 208 | ## Perform a reinstallation (e.g., install OVS/OVN from rpm packages): 209 | 210 | ``` 211 | cd ~/ovn-heater 212 | rm -rf runtime 213 | ``` 214 | 215 | Run the installation with rpm packages parameters specified: 216 | 217 | ``` 218 | cd ~/ovn-heater 219 | RPM_SELINUX=$rpm_url_openvswitch-selinux-extra-policy RPM_OVS=$rpm_url_openvswitch RPM_OVN_COMMON=$rpm_url_ovn RPM_OVN_HOST=$rpm_url_ovn-host RPM_OVN_CENTRAL=$rpm_url_ovn-central ./do.sh install 220 | ``` 221 | 222 | ## Update Tester code 223 | 224 | To update code in Tester container run: 225 | 226 | ``` 227 | cd ~/ovn-heater 228 | ./do.sh refresh-tester 229 | ``` 230 | 231 | This is handy if you are just making changes to the code inside `ovn-tester` 232 | package, and you don't need to rebuild `OVN`/`OVS` packages or 233 | `fake-multinode` image. 234 | 235 | ## Regenerate the ansible inventory: 236 | 237 | If the physical topology has changed then update 238 | `physical-deployment/physical-deployment.yml` to reflect the new physical 239 | deployment. 240 | 241 | Then generate the new ansible inventory: 242 | 243 | ``` 244 | cd ~/ovn-heater 245 | ./do.sh generate 246 | ``` 247 | 248 | # Running tests: 249 | 250 | Testing steps are executed on ORCHESTRATOR node. 251 | 252 | ## Scenario definitions 253 | 254 | Scenarios are defined in `ovn-tester/ovn_tester.py` and are configurable 255 | through YAML files. Sample scenario configurations are available in 256 | `test-scenarios/*.yml`. 257 | 258 | ## Scenario execution 259 | 260 | ``` 261 | cd ~/ovn-heater 262 | ./do.sh run 263 | ``` 264 | 265 | This executes `` on the physical deployment (specifically on the 266 | `ovn-tester` container on the TESTER). Current scenarios also cleanup the 267 | environment, i.e., remove all containers from all physical nodes. 268 | **NOTE**: If the environment needs to be explictly cleaned up, we can also 269 | execute before running the scenario: 270 | 271 | ``` 272 | cd ~/ovn-heater 273 | ./do.sh init 274 | ``` 275 | 276 | The results will be stored in `test_results/`. The results 277 | consist of: 278 | - a `config` file where remote urls and SHA/branch-name of all test components 279 | (ovn-fake-multinode, ovs, ovn) are stored. 280 | - an `installer-log` where the ouptut of the `./do.sh install` command is 281 | stored. 282 | - html reports 283 | - a copy of the `hosts` ansible inventory used for the test. 284 | - OVN container logs (i.e., ovn-northd, ovn-controller, ovs-vswitchd, 285 | ovsdb-server logs). 286 | - physical nodes journal files. 287 | - perf sampling results if enabled 288 | 289 | ## Example: run 20 nodes "density light" 290 | 291 | ``` 292 | cd ~/ovn-heater 293 | ./do.sh run test-scenarios/ocp-20-density-light.yml test-20-density-light 294 | ``` 295 | 296 | This test consists of two stages: 297 | - bring up a base cluster having 20 worker nodes (`n_workers`) and 10 simulated 298 | pods/vms (`n_pods_per_node`) on each of the nodes. 299 | - provision 4000 initial pods (`n_startup`) distributed across the 20 workers. 300 | - provision the remaining 1000 pods (up to `n_pods`) and measure the time it 301 | takes for each of them to become reachable. 302 | 303 | Results will be stored in `~ovn-heater/test_results/test-20-density-light*/`: 304 | - `config`: remote urls and SHA/branch-names of components used by the test. 305 | - `hosts`: the autogenerated ansible host inventory. 306 | - `logs`: the OVN container logs and journal files from each physical node. 307 | - `*html`: the html reports for each of the scenarios run. 308 | 309 | ## Example: run 20 nodes "density heavy" 310 | 311 | ``` 312 | cd ~/ovn-heater 313 | ./do.sh run test-scenarios/ocp-20-density-heavy.yml test-20-density-heavy 314 | ``` 315 | 316 | This test consists of two stages: 317 | - bring up a base cluster having 20 worker nodes (`n_workers`) and 10 simulated 318 | pods/vms (`n_pods_per_node`) on each of the nodes. 319 | - provision 4000 initial pods (`n_startup`) distributed across the 20 workers. 320 | - for every other pod (`pods_vip_ratio`), provision a load balancer VIP using 321 | the pod as backend. 322 | - provision the remaining 1000 pods (up to `n_pods`) and 500 VIPs and measure 323 | the time it takes for each of them to become reachable. 324 | 325 | Results will be stored in `~ovn-heater/test_results/test-20-density-heavy*/`: 326 | - `config`: remote urls and SHA/branch-names of components used by the test. 327 | - `hosts`: the autogenerated ansible host inventory. 328 | - `logs`: the OVN container logs and journal files from each physical node. 329 | - `*html`: the html reports for each of the scenarios run. 330 | 331 | ## Example: run 20 nodes "cluster density" 332 | 333 | ``` 334 | cd ~/ovn-heater 335 | ./do.sh run test-scenarios/ocp-20-cluster-density.yml test-20-cluster-density 336 | ``` 337 | 338 | This test consists of two stages: 339 | - bring up a base cluster having 20 worker nodes (`n_workers`) and 10 simulated 340 | pods/vms (`n_pods_per_node`) on each of the nodes. 341 | - run 500 iterations (`n_runs`) each of which: 342 | - provisions 6 short-lived pods (removed at the end of the iteration) 343 | - provisions 4 long-lived pods (survive the end of the iteration) 344 | - creates a VIP with 2 backends (2 of the long-lived pods) 345 | - creates two VIPs with one backend each (the remaining 2 long-lived pods) 346 | - for the last 100 iterations (`n_runs` - `n_startup`) measure the time it 347 | takes for the pods to become reachable. 348 | 349 | Results will be stored in `~ovn-heater/test_results/test-20-cluster-density*/`: 350 | - `config`: remote urls and SHA/branch-names of components used by the test. 351 | - `hosts`: the autogenerated ansible host inventory. 352 | - `logs`: the OVN container logs and journal files from each physical node. 353 | - `*html`: the html reports for each of the scenarios run. 354 | 355 | ## Scenario execution with DBs in standalone mode 356 | 357 | By default tests configure NB/SB ovsdb-servers to run in clustered mode 358 | (RAFT). If instead tests should be run in standalone mode then the test 359 | scenarios must be adapted by setting `clustered_db: false` in the `cluster` 360 | section of the test scenario YAML file. 361 | 362 | ## Scenario execution with ovsdb-etcd in standalone node 363 | 364 | This test requires ovn-fake-multinode, etcd and ovsdb-etcd 365 | 366 | to build and run with ETCD 367 | 368 | ``` 369 | USE_OVSDB_ETCD=yes ./do.sh install 370 | 371 | cd ~/ovn-heater 372 | ./do.sh run test-scenarios/ovn-etcd-low-scale.yml etcd-test-low-scale 373 | ``` 374 | 375 | The following fields are important for ovn-fake-node to detect and run ovsdb-etcd 376 | ``` 377 | enable_ssl: False 378 | use_ovsdb_etcd: true 379 | ``` 380 | 381 | # Contributing to ovn-heater 382 | 383 | Please check out our [contributing guidelines](./CONTRIBUTING.md) for 384 | instructions about contributing patches to ovn-heater. Please open 385 | [GitHub issues](https://github.com/ovn-org/ovn-heater/issues) for 386 | reporting any potential bugs or for requesting new ovn-heater 387 | features. 388 | -------------------------------------------------------------------------------- /ovn-tester/ovn_workload.py: -------------------------------------------------------------------------------- 1 | import logging 2 | import ovn_exceptions 3 | import ovn_sandbox 4 | import ovn_stats 5 | import ovn_utils 6 | import time 7 | import netaddr 8 | from collections import namedtuple 9 | from collections import defaultdict 10 | from datetime import datetime 11 | from typing import List, Optional 12 | 13 | log = logging.getLogger(__name__) 14 | 15 | 16 | ClusterConfig = namedtuple( 17 | 'ClusterConfig', 18 | [ 19 | 'monitor_all', 20 | 'logical_dp_groups', 21 | 'clustered_db', 22 | 'log_txns_db', 23 | 'datapath_type', 24 | 'raft_election_to', 25 | 'northd_probe_interval', 26 | 'northd_threads', 27 | 'db_inactivity_probe', 28 | 'node_net', 29 | 'enable_ssl', 30 | 'node_timeout_s', 31 | 'internal_net', 32 | 'external_net', 33 | 'gw_net', 34 | 'ts_net', 35 | 'cluster_net', 36 | 'n_workers', 37 | 'n_relays', 38 | 'n_az', 39 | 'vips', 40 | 'vips6', 41 | 'vip_subnet', 42 | 'static_vips', 43 | 'static_vips6', 44 | 'use_ovsdb_etcd', 45 | 'ssl_private_key', 46 | 'ssl_cert', 47 | 'ssl_cacert', 48 | ], 49 | ) 50 | 51 | 52 | BrExConfig = namedtuple('BrExConfig', ['physical_net']) 53 | 54 | 55 | class Node(ovn_sandbox.Sandbox): 56 | def __init__(self, phys_node, container: str, mgmt_ip: str, protocol: str): 57 | super().__init__(phys_node, container) 58 | self.container = container 59 | self.mgmt_ip = netaddr.IPAddress(mgmt_ip) 60 | self.protocol = protocol 61 | 62 | 63 | class CentralNode(Node): 64 | def __init__(self, phys_node, container: str, mgmt_ip: str, protocol: str): 65 | super().__init__(phys_node, container, mgmt_ip, protocol) 66 | 67 | def start( 68 | self, cluster_cfg: ClusterConfig, update_election_timeout: bool = False 69 | ): 70 | log.info('Configuring central node') 71 | if cluster_cfg.clustered_db and update_election_timeout: 72 | self.set_raft_election_timeout(cluster_cfg.raft_election_to) 73 | self.enable_trim_on_compaction() 74 | self.set_northd_threads(cluster_cfg.northd_threads) 75 | if cluster_cfg.log_txns_db: 76 | self.enable_txns_db_logging() 77 | 78 | def set_northd_threads(self, n_threads: int): 79 | log.info(f'Configuring northd to use {n_threads} threads') 80 | self.phys_node.run( 81 | f'podman exec {self.container} ovn-appctl -t ' 82 | f'ovn-northd parallel-build/set-n-threads ' 83 | f'{n_threads}' 84 | ) 85 | 86 | def set_raft_election_timeout(self, timeout_s: int): 87 | for timeout in range(1000, (timeout_s + 1) * 1000, 1000): 88 | log.info(f'Setting RAFT election timeout to {timeout}ms') 89 | self.run( 90 | cmd=f'ovs-appctl -t ' 91 | f'/run/ovn/ovnnb_db.ctl cluster/change-election-timer ' 92 | f'OVN_Northbound {timeout}' 93 | ) 94 | self.run( 95 | cmd=f'ovs-appctl -t ' 96 | f'/run/ovn/ovnsb_db.ctl cluster/change-election-timer ' 97 | f'OVN_Southbound {timeout}' 98 | ) 99 | time.sleep(1) 100 | 101 | def enable_trim_on_compaction(self): 102 | log.info('Setting DB trim-on-compaction') 103 | self.phys_node.run( 104 | f'podman exec {self.container} ovs-appctl -t ' 105 | f'/run/ovn/ovnnb_db.ctl ' 106 | f'ovsdb-server/memory-trim-on-compaction on' 107 | ) 108 | self.phys_node.run( 109 | f'podman exec {self.container} ovs-appctl -t ' 110 | f'/run/ovn/ovnsb_db.ctl ' 111 | f'ovsdb-server/memory-trim-on-compaction on' 112 | ) 113 | 114 | def enable_txns_db_logging(self): 115 | log.info('Enable DB txn logging') 116 | self.run( 117 | cmd='ovs-appctl -t /run/ovn/ovnnb_db.ctl ' 118 | 'ovsdb-server/tlog-set OVN_Northbound:Logical_Switch_Port on' 119 | ) 120 | self.run( 121 | cmd='ovs-appctl -t /run/ovn/ovnnb_db.ctl ' 122 | 'vlog/disable-rate-limit transaction' 123 | ) 124 | self.run( 125 | cmd='ovs-appctl -t /run/ovn/ovnsb_db.ctl ' 126 | 'ovsdb-server/tlog-set OVN_Southbound:Port_Binding on' 127 | ) 128 | self.run( 129 | cmd='ovs-appctl -t /run/ovn/ovnsb_db.ctl ' 130 | 'vlog/disable-rate-limit transaction' 131 | ) 132 | 133 | def get_connection_string(self, port: int): 134 | return f'{self.protocol}:{self.mgmt_ip}:{port}' 135 | 136 | 137 | class RelayNode(Node): 138 | def __init__(self, phys_node, container: str, mgmt_ip: str, protocol: str): 139 | super().__init__(phys_node, container, mgmt_ip, protocol) 140 | 141 | def start(self): 142 | log.info(f'Configuring relay node {self.container}') 143 | self.enable_trim_on_compaction() 144 | 145 | def get_connection_string(self, port: int): 146 | return f'{self.protocol}:{self.mgmt_ip}:{port}' 147 | 148 | def enable_trim_on_compaction(self): 149 | log.info('Setting DB trim-on-compaction') 150 | self.phys_node.run( 151 | f'podman exec {self.container} ovs-appctl -t ' 152 | f'/run/ovn/ovnsb_db.ctl ' 153 | f'ovsdb-server/memory-trim-on-compaction on' 154 | ) 155 | 156 | 157 | class ChassisNode(Node): 158 | def __init__( 159 | self, 160 | phys_node, 161 | container: str, 162 | mgmt_ip: str, 163 | protocol: str, 164 | ): 165 | super().__init__(phys_node, container, mgmt_ip, protocol) 166 | self.switch = None 167 | self.gw_router = None 168 | self.ext_switch = None 169 | self.lports = [] 170 | self.next_lport_index = 0 171 | self.vsctl: Optional[ovn_utils.OvsVsctl] = None 172 | 173 | def start(self, cluster_cfg: ClusterConfig): 174 | self.vsctl = ovn_utils.OvsVsctl( 175 | self, 176 | self.get_connection_string(6640), 177 | cluster_cfg.db_inactivity_probe // 1000, 178 | ) 179 | 180 | @ovn_stats.timeit 181 | def connect(self, remote: str): 182 | log.info( 183 | f'Connecting worker {self.container}: ' f'ovn-remote = {remote}' 184 | ) 185 | self.vsctl.set_global_external_id('ovn-remote', f'{remote}') 186 | 187 | def configure_localnet(self, physical_net: str): 188 | log.info(f'Creating localnet on {self.container}') 189 | self.vsctl.set_global_external_id( 190 | 'ovn-bridge-mappings', f'{physical_net}:br-ex' 191 | ) 192 | 193 | @ovn_stats.timeit 194 | def wait(self, sbctl, timeout_s: int): 195 | for _ in range(timeout_s * 10): 196 | if sbctl.chassis_bound(self.container): 197 | return 198 | time.sleep(0.1) 199 | raise ovn_exceptions.OvnChassisTimeoutException() 200 | 201 | @ovn_stats.timeit 202 | def unprovision_port(self, cluster, port: ovn_utils.LSPort): 203 | cluster.nbctl.ls_port_del(port) 204 | self.unbind_port(port) 205 | self.lports.remove(port) 206 | 207 | @ovn_stats.timeit 208 | def bind_port( 209 | self, port: ovn_utils.LSPort, mtu_request: Optional[int] = None 210 | ): 211 | log.info(f'Binding lport {port.name} on {self.container}') 212 | self.vsctl.add_port( 213 | port, 214 | 'br-int', 215 | internal=True, 216 | ifaceid=port.name, 217 | mtu_request=mtu_request, 218 | ) 219 | # Skip creating a netns for "passive" ports, we won't be sending 220 | # traffic on those. 221 | if not port.passive: 222 | self.vsctl.bind_vm_port(port) 223 | 224 | @ovn_stats.timeit 225 | def unbind_port(self, port: ovn_utils.LSPort): 226 | if not port.passive: 227 | self.vsctl.unbind_vm_port(port) 228 | self.vsctl.del_port(port) 229 | 230 | def provision_ports( 231 | self, cluster, n_ports: int, passive: bool = False 232 | ) -> List[ovn_utils.LSPort]: 233 | ports = [self.provision_port(cluster, passive) for i in range(n_ports)] 234 | for port in ports: 235 | self.bind_port(port) 236 | return ports 237 | 238 | def run_ping(self, cluster, src: str, dest: str): 239 | log.info(f'Pinging from {src} to {dest}') 240 | 241 | # FIXME 242 | # iputils is inconsistent when working with sub-second timeouts. 243 | # The behavior of ping's "-W" option changed a couple of times already. 244 | # https://github.com/iputils/iputils/issues/290 245 | # Until that's stable use "timeout 0.1s" instead. 246 | cmd = f'ip netns exec {src} timeout 0.1s ping -q -c 1 {dest}' 247 | start_time = datetime.now() 248 | while True: 249 | try: 250 | self.run(cmd=cmd, raise_on_error=True) 251 | break 252 | except ovn_exceptions.SSHError: 253 | pass 254 | 255 | duration = (datetime.now() - start_time).seconds 256 | if duration > cluster.cluster_cfg.node_timeout_s: 257 | log.error( 258 | f'Timeout waiting for {src} ' f'to be able to ping {dest}' 259 | ) 260 | raise ovn_exceptions.OvnPingTimeoutException() 261 | 262 | @ovn_stats.timeit 263 | def ping_port(self, cluster, port: ovn_utils.LSPort, dest: str): 264 | self.run_ping(cluster, port.name, dest) 265 | 266 | def ping_ports(self, cluster, ports: List[ovn_utils.LSPort]): 267 | for port in ports: 268 | if port.ip: 269 | self.ping_port(cluster, port, dest=port.ext_gw) 270 | if port.ip6: 271 | self.ping_port(cluster, port, dest=port.ext_gw6) 272 | 273 | def get_connection_string(self, port: int): 274 | return f"{self.protocol}:{self.mgmt_ip}:{port}" 275 | 276 | def configure(self, physical_net: str): 277 | raise NotImplementedError 278 | 279 | @ovn_stats.timeit 280 | def provision(self, cluster): 281 | raise NotImplementedError 282 | 283 | @ovn_stats.timeit 284 | def provision_port(self, cluster, passive=False): 285 | raise NotImplementedError 286 | 287 | @ovn_stats.timeit 288 | def ping_external(self, cluster, port): 289 | raise NotImplementedError 290 | 291 | 292 | class Cluster: 293 | def __init__( 294 | self, 295 | cluster_cfg: ClusterConfig, 296 | central, 297 | brex_cfg: BrExConfig, 298 | az: int, 299 | ): 300 | # In clustered mode use the first node for provisioning. 301 | self.worker_nodes = [] 302 | self.cluster_cfg = cluster_cfg 303 | self.brex_cfg = brex_cfg 304 | self.nbctl: Optional[ovn_utils.OvnNbctl] = None 305 | self.sbctl: Optional[ovn_utils.OvnSbctl] = None 306 | self.az = az 307 | 308 | protocol = "ssl" if cluster_cfg.enable_ssl else "tcp" 309 | db_containers = ( 310 | [ 311 | f'ovn-central-az{self.az}-1', 312 | f'ovn-central-az{self.az}-2', 313 | f'ovn-central-az{self.az}-3', 314 | ] 315 | if cluster_cfg.clustered_db 316 | else [f'ovn-central-az{self.az}-1'] 317 | ) 318 | 319 | mgmt_ip = ( 320 | cluster_cfg.node_net.ip 321 | + 2 322 | + self.az * (len(db_containers) + cluster_cfg.n_relays) 323 | ) 324 | self.central_nodes = [ 325 | CentralNode(central, c, mgmt_ip + i, protocol) 326 | for i, c in enumerate(db_containers) 327 | ] 328 | 329 | mgmt_ip += len(db_containers) 330 | self.relay_nodes = [ 331 | RelayNode( 332 | central, 333 | f'ovn-relay-az{self.az}-{i + 1}', 334 | mgmt_ip + i, 335 | protocol, 336 | ) 337 | for i in range(cluster_cfg.n_relays) 338 | ] 339 | 340 | def add_cluster_worker_nodes(self, workers): 341 | raise NotImplementedError 342 | 343 | def add_workers(self, worker_nodes): 344 | self.worker_nodes.extend(worker_nodes) 345 | 346 | def prepare_test(self): 347 | self.start() 348 | 349 | def start(self): 350 | for c in self.central_nodes: 351 | c.start( 352 | self.cluster_cfg, 353 | update_election_timeout=(c is self.central_nodes[0]), 354 | ) 355 | nb_conn = self.get_nb_connection_string() 356 | inactivity_probe = self.cluster_cfg.db_inactivity_probe // 1000 357 | self.nbctl = ovn_utils.OvnNbctl( 358 | self.central_nodes[0], nb_conn, inactivity_probe 359 | ) 360 | 361 | sb_conn = self.get_sb_connection_string() 362 | self.sbctl = ovn_utils.OvnSbctl( 363 | self.central_nodes[0], sb_conn, inactivity_probe 364 | ) 365 | 366 | # ovn-ic configuration: enable route learning/advertising to allow 367 | # automatic pinging between cluster_net subnets in different AZs. 368 | # This is required for IC connectivity checks. 369 | self.nbctl.set_global('ic-route-learn', 'true') 370 | self.nbctl.set_global('ic-route-adv', 'true') 371 | 372 | for r in self.relay_nodes: 373 | r.start() 374 | 375 | for w in self.worker_nodes: 376 | w.start(self.cluster_cfg) 377 | w.configure(self.brex_cfg.physical_net) 378 | 379 | self.nbctl.set_global( 380 | 'use_logical_dp_groups', self.cluster_cfg.logical_dp_groups 381 | ) 382 | self.nbctl.set_global( 383 | 'northd_probe_interval', self.cluster_cfg.northd_probe_interval 384 | ) 385 | self.nbctl.set_global_name(f'az{self.az}') 386 | self.nbctl.set_inactivity_probe(self.cluster_cfg.db_inactivity_probe) 387 | self.sbctl.set_inactivity_probe(self.cluster_cfg.db_inactivity_probe) 388 | 389 | def get_nb_connection_string(self): 390 | return ','.join( 391 | [db.get_connection_string(6641) for db in self.central_nodes] 392 | ) 393 | 394 | def get_sb_connection_string(self): 395 | return ','.join( 396 | [db.get_connection_string(6642) for db in self.central_nodes] 397 | ) 398 | 399 | def get_relay_connection_string(self): 400 | if len(self.relay_nodes) > 0: 401 | return ','.join( 402 | [db.get_connection_string(6642) for db in self.relay_nodes] 403 | ) 404 | return self.get_sb_connection_string() 405 | 406 | def provision_ports(self, n_ports, passive=False): 407 | return [ 408 | self.select_worker_for_port().provision_ports(self, 1, passive)[0] 409 | for _ in range(n_ports) 410 | ] 411 | 412 | def unprovision_ports(self, ports: List[ovn_utils.LSPort]): 413 | for port in ports: 414 | worker = port.metadata 415 | worker.unprovision_port(self, port) 416 | 417 | def ping_ports(self, ports: List[ovn_utils.LSPort]): 418 | ports_per_worker = defaultdict(list) 419 | for p in ports: 420 | ports_per_worker[p.metadata].append(p) 421 | for w, ports in ports_per_worker.items(): 422 | w.ping_ports(self, ports) 423 | 424 | @ovn_stats.timeit 425 | def mesh_ping_ports(self, ports: List[ovn_utils.LSPort]) -> None: 426 | """Perform full-mesh ping test between ports.""" 427 | all_ips = [port.ip for port in ports] 428 | 429 | for port in ports: 430 | chassis: Optional[ChassisNode] = port.metadata 431 | if chassis is None: 432 | log.error( 433 | f"Port {port.name} is missing 'metadata' attribute. " 434 | f"Can't perform ping." 435 | ) 436 | continue 437 | 438 | for dest_ip in all_ips: 439 | if dest_ip == port.ip: 440 | continue 441 | chassis.ping_port(self, port, dest_ip) 442 | 443 | def select_worker_for_port(self): 444 | self.last_selected_worker += 1 445 | self.last_selected_worker %= len(self.worker_nodes) 446 | return self.worker_nodes[self.last_selected_worker] 447 | -------------------------------------------------------------------------------- /do.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | set -o errexit -o pipefail 4 | 5 | topdir=$(pwd) 6 | rundir_name=runtime 7 | rundir=${topdir}/${rundir_name} 8 | deployment_dir=${topdir}/physical-deployments 9 | result_dir=${topdir}/test_results 10 | 11 | phys_deployment="${PHYS_DEPLOYMENT:-${deployment_dir}/physical-deployment.yml}" 12 | ovn_heater_venv=venv 13 | 14 | clustered_db=${CLUSTERED_DB:-True} 15 | 16 | ovn_fmn_utils=${topdir}/ovn-fake-multinode-utils 17 | ovn_fmn_playbooks=${ovn_fmn_utils}/playbooks 18 | ovn_fmn_generate=${ovn_fmn_utils}/generate-hosts.py 19 | ovn_fmn_get=${ovn_fmn_utils}/get-config-value.py 20 | ovn_fmn_ip=${rundir}/ovn-fake-multinode/ip_gen.py 21 | ovn_fmn_translate=${ovn_fmn_utils}/translate_yaml.py 22 | hosts_file=${rundir}/hosts 23 | installer_log_file=${rundir}/installer-log 24 | log_collector_file=${rundir}/log-collector.sh 25 | log_perf_file=${rundir}/perf.sh 26 | process_monitor_file=${rundir}/process-monitor.py 27 | 28 | ovn_tester=${topdir}/ovn-tester 29 | 30 | EXTRA_OPTIMIZE=${EXTRA_OPTIMIZE:-no} 31 | USE_OVSDB_ETCD=${USE_OVSDB_ETCD:-no} 32 | 33 | # We want values from both the `ID` and `ID_LIKE` fields to ensure successful 34 | # categorization. The shell will happily accept both spaces and newlines as 35 | # separators: 36 | # https://pubs.opengroup.org/onlinepubs/9699919799/utilities/V3_chap02.html#tag_18_06_05 37 | DISTRO_IDS=$(awk -F= '/^ID/{print$2}' /etc/os-release | tr -d '"') 38 | 39 | DISTRO_VERSION_ID=$(awk -F= '/^VERSION_ID/{print$2}' /etc/os-release | tr -d '"') 40 | 41 | function is_rpm_based() { 42 | for id in $DISTRO_IDS; do 43 | case $id in 44 | centos* | rhel* | fedora*) 45 | true 46 | return 47 | ;; 48 | esac 49 | done 50 | false 51 | } 52 | 53 | function is_rhel() { 54 | for id in $DISTRO_IDS; do 55 | case $id in 56 | centos* | rhel*) 57 | true 58 | return 59 | ;; 60 | esac 61 | done 62 | false 63 | } 64 | 65 | function is_fedora() { 66 | for id in $DISTRO_IDS; do 67 | case $id in 68 | fedora*) 69 | true 70 | return 71 | ;; 72 | esac 73 | done 74 | false 75 | } 76 | 77 | function is_deb_based() { 78 | for id in $DISTRO_IDS; do 79 | case $id in 80 | debian* | ubuntu*) 81 | true 82 | return 83 | ;; 84 | esac 85 | done 86 | false 87 | } 88 | 89 | function die() { 90 | echo $1 91 | exit 1 92 | } 93 | 94 | function die_distro() { 95 | die "Unable to determine distro type, rpm- and deb-based are supported." 96 | } 97 | 98 | function generate() { 99 | # Make sure rundir exists. 100 | mkdir -p ${rundir} 101 | 102 | PYTHONPATH=${topdir}/utils ${ovn_fmn_generate} ${phys_deployment} ${rundir} ${ovn_fmn_repo} ${ovn_fmn_branch} > ${hosts_file} 103 | cp ${ovn_fmn_utils}/process-monitor.py ${process_monitor_file} 104 | cp ${ovn_fmn_utils}/scripts/log-collector.sh ${log_collector_file} 105 | cp ${ovn_fmn_utils}/scripts/perf.sh ${log_perf_file} 106 | } 107 | 108 | function install_deps_local_rpm() { 109 | echo "-- Installing local dependencies" 110 | yum install redhat-lsb-core datamash \ 111 | python3-netaddr python3 python3-devel \ 112 | podman \ 113 | --skip-broken -y 114 | } 115 | 116 | function install_deps_local_deb() { 117 | echo "-- Installing local dependencies" 118 | apt -y install datamash podman \ 119 | python3-netaddr python3 python3-all-dev python3-venv 120 | } 121 | 122 | function install_deps_remote() { 123 | echo "-- Installing dependencies on all nodes" 124 | ansible-playbook ${ovn_fmn_playbooks}/install-dependencies.yml \ 125 | -i ${hosts_file} 126 | } 127 | 128 | function install_venv() { 129 | pushd ${rundir} 130 | if [ ! -f ${ovn_heater_venv}/bin/activate ]; then 131 | rm -rf ${ovn_heater_venv} 132 | python3 -m venv ${ovn_heater_venv} 133 | fi 134 | source ${ovn_heater_venv}/bin/activate 135 | python3 -m ensurepip --upgrade 136 | python3 -m pip install --upgrade pip 137 | python3 -m pip install -r ${topdir}/utils/requirements.txt 138 | deactivate 139 | popd 140 | } 141 | 142 | function clone_component() { 143 | local comp_name=$1 144 | local comp_repo=$2 145 | local comp_branch=$3 146 | 147 | pushd ${rundir} 148 | local comp_exists="0" 149 | if [ -d ${comp_name} ]; then 150 | pushd ${comp_name} 151 | local remote=$(git config --get remote.origin.url) 152 | if [ "${remote}" = "${comp_repo}" ]; then 153 | git fetch origin 154 | 155 | if $(git show-ref --verify refs/tags/${comp_branch} &> /dev/null); then 156 | local branch_diff=$(git diff ${comp_branch} HEAD --stat | wc -l) 157 | else 158 | local branch_diff=$(git diff origin/${comp_branch} HEAD --stat | wc -l) 159 | fi 160 | if [ "${branch_diff}" = "0" ]; then 161 | comp_exists="1" 162 | fi 163 | fi 164 | popd 165 | fi 166 | 167 | if [ ${comp_exists} = "1" ]; then 168 | echo "-- Component ${comp_name} already installed" 169 | return 0 170 | else 171 | rm -rf ${comp_name} 172 | echo "-- Cloning ${comp_name} from ${comp_repo} at revision ${comp_branch}" 173 | git clone ${comp_repo} ${comp_name} --branch ${comp_branch} --single-branch --depth 1 174 | return 1 175 | fi 176 | popd 177 | } 178 | 179 | # OVS/OVN env vars 180 | ovs_repo="${OVS_REPO:-https://github.com/openvswitch/ovs.git}" 181 | ovs_branch="${OVS_BRANCH:-main}" 182 | ovn_repo="${OVN_REPO:-https://github.com/ovn-org/ovn.git}" 183 | ovn_branch="${OVN_BRANCH:-main}" 184 | 185 | # ovn-fake-multinode env vars 186 | ovn_fmn_repo="${OVN_FAKE_MULTINODE_REPO:-https://github.com/ovn-org/ovn-fake-multinode.git}" 187 | ovn_fmn_branch="${OVN_FAKE_MULTINODE_BRANCH:-main}" 188 | 189 | OS_BASE="${OS_BASE:-fedora}" 190 | OS_IMAGE_OVERRIDE="${OS_IMAGE_OVERRIDE}" 191 | OS_IMAGE_DEFAULT="registry.fedoraproject.org/fedora:latest" 192 | 193 | function install_ovn_fake_multinode() { 194 | echo "-- Cloning ${ovn_fmn_repo} on all nodes, revision ${ovn_fmn_branch}" 195 | 196 | local rebuild_needed=0 197 | 198 | # Clone repo locally 199 | clone_component ovn-fake-multinode ${ovn_fmn_repo} ${ovn_fmn_branch} || rebuild_needed=1 200 | 201 | # Copy repo to all hosts. 202 | ansible-playbook ${ovn_fmn_playbooks}/install-fake-multinode.yml -i ${hosts_file} \ 203 | --extra-vars="ovn_fake_multinode_local_path=${rundir}/ovn-fake-multinode" 204 | 205 | if [ -n "$RPM_OVS" ] 206 | then 207 | [ -d ovs ] || { rm -rf ovs; mkdir ovs; } 208 | rebuild_needed=1 209 | else 210 | clone_component ovs ${ovs_repo} ${ovs_branch} || rebuild_needed=1 211 | fi 212 | 213 | if [ -n "$RPM_OVN_COMMON" ] 214 | then 215 | [ -d ovn ] || { rm -rf ovn; mkdir ovn; } 216 | rebuild_needed=1 217 | else 218 | clone_component ovn ${ovn_repo} ${ovn_branch} || rebuild_needed=1 219 | fi 220 | 221 | 222 | pushd ${rundir}/ovn-fake-multinode 223 | 224 | [ -n "$RPM_OVS" ] && wget $RPM_OVS 225 | [ -n "$RPM_SELINUX" ] && wget $RPM_SELINUX 226 | if [ -n "$RPM_OVN_COMMON" ] 227 | then 228 | wget $RPM_OVN_COMMON 229 | rpm_v=`basename $RPM_OVN_COMMON | awk -F '-' '{print $1}'` 230 | rpm_b=`basename $RPM_OVN_COMMON | sed 's/^'"$rpm_v"'\(.*\)/\1/'` 231 | RPM_OVN_CENTRAL=${RPM_OVN_CENTRAL:-"$(dirname $RPM_OVN_COMMON)/$rpm_v-central$rpm_b"} 232 | RPM_OVN_HOST=${RPM_OVN_HOST:-"$(dirname $RPM_OVN_COMMON)/$rpm_v-host$rpm_b"} 233 | [ -n "$RPM_OVN_CENTRAL" ] && wget $RPM_OVN_CENTRAL 234 | [ -n "$RPM_OVN_HOST" ] && wget $RPM_OVN_HOST 235 | fi 236 | 237 | podman images | grep -q 'ovn/ovn-multi-node' || rebuild_needed=1 238 | 239 | if [ ${rebuild_needed} -eq 1 ]; then 240 | if [ -z "${OS_IMAGE_OVERRIDE}" ]; then 241 | if is_fedora; then 242 | os_image="fedora:${DISTRO_VERSION_ID}" 243 | elif is_rhel; then 244 | [[ "${DISTRO_VERSION_ID}" =~ 7\..* ]] && os_image="registry.access.redhat.com/ubi7/ubi:${DISTRO_VERSION_ID}" 245 | [[ "${DISTRO_VERSION_ID}" =~ 8\..* ]] && os_image="registry.access.redhat.com/ubi8/ubi:${DISTRO_VERSION_ID}" 246 | [[ "${DISTRO_VERSION_ID}" =~ 9\..* ]] && os_image="registry.access.redhat.com/ubi9/ubi:${DISTRO_VERSION_ID}" 247 | else 248 | os_image=${OS_IMAGE_DEFAULT} 249 | fi 250 | else 251 | os_image=${OS_IMAGE_OVERRIDE} 252 | fi 253 | 254 | # Build images locally. 255 | OS_IMAGE=$os_image OS_BASE=${OS_BASE} OVS_SRC_PATH=${rundir}/ovs OVN_SRC_PATH=${rundir}/ovn \ 256 | EXTRA_OPTIMIZE=${EXTRA_OPTIMIZE} USE_OVSDB_ETCD=${USE_OVSDB_ETCD} \ 257 | RUNC_CMD=podman ./ovn_cluster.sh build 258 | fi 259 | 260 | popd 261 | } 262 | 263 | function install_ovn_tester() { 264 | ssh_key=$(${ovn_fmn_get} ${phys_deployment} tester-node ssh_key) 265 | # We need to copy the files into a known directory within the container 266 | # runtime context directory. Otherwise, podman can't find the files we 267 | # reference. 268 | cp ${ssh_key} . 269 | ssh_key_file=${rundir_name}/$(basename ${ssh_key}) 270 | podman build -t ovn/ovn-tester --build-arg SSH_KEY=${ssh_key_file} -f ${topdir}/Dockerfile ${topdir} 271 | } 272 | 273 | # Prepare OVS bridges and cleanup containers. 274 | function init_ovn_fake_multinode() { 275 | echo "-- Initializing ovn-fake-multinode cluster on all nodes" 276 | ansible-playbook ${ovn_fmn_playbooks}/deploy-minimal.yml -i ${hosts_file} 277 | } 278 | 279 | # Pull image on all nodes. 280 | function pull_ovn_fake_multinode() { 281 | echo "-- Saving the ovn/ovn-multi-node image and pulling it on all nodes." 282 | 283 | pushd ${rundir}/ovn-fake-multinode 284 | rm -f ovn-multi-node-image.tar 285 | podman save --format oci-archive -o ovn-multi-node-image.tar \ 286 | ovn/ovn-multi-node:latest 287 | ansible-playbook ${ovn_fmn_playbooks}/pull-fake-multinode.yml -i ${hosts_file} 288 | popd 289 | } 290 | 291 | function pull_ovn_tester() { 292 | echo "-- Saving the ovn/ovn-tester image and pulling it on the tester." 293 | 294 | rm -f ovn-tester-image.tar 295 | podman save --format oci-archive -o ovn-tester-image.tar \ 296 | ovn/ovn-tester:latest 297 | ansible-playbook ${ovn_fmn_playbooks}/pull-ovn-tester.yml -i ${hosts_file} 298 | } 299 | 300 | function install() { 301 | pushd ${rundir} 302 | if is_rpm_based 303 | then 304 | install_deps_local_rpm 305 | elif is_deb_based 306 | then 307 | install_deps_local_deb 308 | else 309 | die_distro 310 | fi 311 | install_deps_remote 312 | install_venv 313 | install_ovn_fake_multinode 314 | init_ovn_fake_multinode 315 | pull_ovn_fake_multinode 316 | install_ovn_tester 317 | pull_ovn_tester 318 | popd 319 | } 320 | 321 | function refresh_tester() { 322 | pushd ${rundir} 323 | 324 | install_ovn_tester 325 | pull_ovn_tester 326 | 327 | popd 328 | } 329 | 330 | function translate_yaml() { 331 | local test_file=$1 332 | 333 | pushd ${rundir} > /dev/null 334 | source ${ovn_heater_venv}/bin/activate 335 | translated_test_file=${rundir}/test-scenario.yml 336 | ${ovn_fmn_translate} ${test_file} ${translated_test_file} 337 | deactivate 338 | popd > /dev/null 339 | 340 | echo ${translated_test_file} 341 | } 342 | 343 | function record_test_config() { 344 | local out_dir=$1 345 | local out_file=${out_dir}/config 346 | local out_installer_log_file=${out_dir}/installer-log 347 | local out_hosts_file=${out_dir}/hosts 348 | 349 | echo "-- Storing installer log in ${install_log_file}" 350 | cp ${installer_log_file} ${out_installer_log_file} 351 | 352 | echo "-- Storing hosts file in ${out_hosts_file}" 353 | cp ${hosts_file} ${out_hosts_file} 354 | 355 | echo "-- Storing test components versions in ${out_file}" 356 | > ${out_file} 357 | 358 | components=("ovn-fake-multinode" "ovs" "ovn") 359 | for d in "${components[@]}"; do 360 | pushd ${rundir}/$d 361 | local origin=$(git config --get remote.origin.url) 362 | local sha=$(git rev-parse HEAD) 363 | local sha_name=$(git rev-parse --abbrev-ref HEAD) 364 | echo "$d (${origin}): ${sha} (${sha_name})" >> ${out_file} 365 | popd 366 | done 367 | } 368 | 369 | function mine_data() { 370 | out_dir=$1 371 | tester_host=$2 372 | 373 | echo "-- Mining data from logs in: ${out_dir}" 374 | 375 | pushd ${out_dir} 376 | 377 | mkdir -p mined-data 378 | for p in ovn-northd ovn-controller ovn-nbctl; do 379 | logs=$(find ${out_dir}/logs -name ${p}.log) 380 | ${topdir}/utils/mine-poll-intervals.sh ${logs} > mined-data/${p} 381 | done 382 | for p in ovsdb-server-sb ovsdb-server-nb; do 383 | logs=$(find ${out_dir}/logs -name ${p}.log) 384 | ${topdir}/utils/mine-db-poll-intervals.sh ${logs} > mined-data/${p} 385 | done 386 | 387 | cat ${out_dir}/test-log | grep 'Binding lport' \ 388 | | cut -d ' ' -f 1,2,8 > mined-data/ovn-binding.log 389 | 390 | logs=$(find ${out_dir}/logs -name ovn-controller.log) 391 | grep ovn-installed ${logs} | cut -d ':' -f 2- | tr '|' ' ' \ 392 | | cut -d ' ' -f 1,7 | tr 'T' ' ' | sort > mined-data/ovn-installed.log 393 | 394 | source ${rundir}/${ovn_heater_venv}/bin/activate 395 | python3 ${topdir}/utils/latency.py \ 396 | ./mined-data/ovn-binding.log ./mined-data/ovn-installed.log \ 397 | > mined-data/binding-to-ovn-installed-latency 398 | 399 | rm -rf ./mined-data/ovn-binding.log ./mined-data/ovn-installed.log 400 | 401 | logs=$(find ${out_dir}/logs -iname 'ps*') 402 | grep died ${logs} | sed 's/.*\/\(ovn-.*\)/\1/' > mined-data/crashes 403 | [ -s mined-data/crashes ] || rm -f mined-data/crashes 404 | 405 | # Collecting stats for the tester and central components from the first 406 | # 3 availability zones to avoid bloating the report. 407 | resource_usage_logs=$(find ${out_dir}/logs -name process-stats.json \ 408 | | grep -E 'ovn-tester|ovn-central-az[0-2]-') 409 | python3 ${topdir}/utils/process-stats.py \ 410 | -o resource-usage-report-central.html ${resource_usage_logs} 411 | 412 | # Collecting stats only for 3 workers to avoid bloating the report. 413 | resource_usage_logs=$(find ${out_dir}/logs -name process-stats.json \ 414 | | grep ovn-scale | head -3) 415 | python3 ${topdir}/utils/process-stats.py \ 416 | -o resource-usage-report-worker.html ${resource_usage_logs} 417 | 418 | # Preparing reports for aggregate resource usage. 419 | resource_usage_logs=$(find ${out_dir}/logs -name process-stats.json) 420 | python3 ${topdir}/utils/process-stats.py --aggregate \ 421 | -o resource-usage-report-aggregate.html ${resource_usage_logs} 422 | 423 | deactivate 424 | 425 | popd 426 | } 427 | 428 | function get_cluster_var() { 429 | local test_file=$1 430 | local var_name=$2 431 | 432 | var=$(${ovn_fmn_get} ${test_file} cluster ${var_name}) 433 | 434 | if [ "${var_name}" == "clustered_db" ]; then 435 | if [ "${var}" == "True" ]; then 436 | echo -n "n_central=3 " 437 | else 438 | echo -n "n_central=1 " 439 | fi 440 | fi 441 | 442 | if [ "${var}" == "True" ]; then 443 | echo "${var_name}=yes"; 444 | elif [ "${var}" == "False" ]; then 445 | echo "${var_name}=no" 446 | else 447 | echo "${var_name}=${var}" 448 | fi 449 | } 450 | 451 | function run_test() { 452 | local test_file=$1 453 | local out_dir=$2 454 | shift; shift 455 | 456 | # Make sure results dir exists. 457 | mkdir -p ${out_dir}/logs 458 | 459 | # Record SHAs of all components. 460 | record_test_config ${out_dir} 461 | 462 | # Perform a fast cleanup by doing a minimal redeploy. 463 | init_ovn_fake_multinode 464 | 465 | cluster_vars="" 466 | for var in enable_ssl clustered_db monitor_all use_ovsdb_etcd \ 467 | node_net datapath_type n_relays n_workers n_az; do 468 | cluster_vars="${cluster_vars} $(get_cluster_var ${test_file} ${var})" 469 | done 470 | echo "-- Cluster vars: ${cluster_vars}" 471 | 472 | if ! ansible-playbook ${ovn_fmn_playbooks}/bringup-cluster.yml \ 473 | -i ${hosts_file} --extra-vars "${cluster_vars}" ; then 474 | die "-- Failed to bring up fake cluster!" 475 | fi 476 | 477 | if ! ansible-playbook ${ovn_fmn_playbooks}/configure-tester.yml -i ${hosts_file} \ 478 | --extra-vars "test_file=${test_file} phys_deployment=${phys_deployment}" ; then 479 | die "-- Failed to set up test!" 480 | fi 481 | 482 | tester_host=$(${ovn_fmn_get} ${phys_deployment} tester-node name) 483 | if ! ssh root@${tester_host} podman exec \ 484 | ovn-tester python3 -u /ovn-tester/ovn_tester.py \ 485 | /physical-deployment.yml /test-scenario.yml ; 486 | then 487 | echo "-- Failed to run test. Check logs at: ${out_dir}/test-log" 488 | fi 489 | 490 | echo "-- Collecting logs to: ${out_dir}" 491 | ansible-playbook ${ovn_fmn_playbooks}/collect-logs.yml -i ${hosts_file} \ 492 | --extra-vars "results_dir=${out_dir}/logs" 493 | 494 | pushd ${out_dir}/logs 495 | for f in *.tgz; do 496 | tar xvfz $f 497 | done 498 | # Prior to containerization of ovn-tester, HTML files written by ovn-tester 499 | # were written directly to ${out_dir}. To make things easier for tools, we 500 | # copy the HTML files back to this original location. 501 | cp ${tester_host}/ovn-tester/*.html ${out_dir} || true 502 | 503 | # Once we successfully ran the test and collected its logs, the post 504 | # processing (e.g., data mining) can run in a subshell with errexit 505 | # disabled. We don't want the whole thing to error out if the post 506 | # processing fails. 507 | ( 508 | set +o errexit 509 | mine_data ${out_dir} ${tester_host} 510 | ) 511 | } 512 | 513 | function usage() { 514 | die "Usage: $0 install|generate|init|refresh-tester|run " 515 | } 516 | 517 | do_lockfile=/tmp/do.sh.lock 518 | 519 | function take_lock() { 520 | exec 42>${do_lockfile} || die "Failed setting FD for ${do_lockfile}" 521 | flock -n 42 || die "Error: ovn-heater ($1) already running" 522 | } 523 | 524 | case "${1:-"usage"}" in 525 | "install") 526 | ;& 527 | "generate") 528 | ;& 529 | "init") 530 | ;& 531 | "refresh-tester") 532 | ;& 533 | "run") 534 | take_lock $0 535 | trap "rm -f ${do_lockfile}" EXIT 536 | ;; 537 | esac 538 | 539 | case "${1:-"usage"}" in 540 | "install") 541 | generate 542 | 543 | # Store current environment variables. 544 | ( 545 | echo "Environment:" 546 | echo "============" 547 | env 548 | echo 549 | ) > ${installer_log_file} 550 | 551 | # Run installer and store logs. 552 | ( 553 | echo "Installer logs:" 554 | echo "===============" 555 | ) >> ${installer_log_file} 556 | install 2>&1 | tee -a ${installer_log_file} 557 | ;; 558 | "generate") 559 | generate 560 | ;; 561 | "init") 562 | init_ovn_fake_multinode 563 | pull_ovn_fake_multinode 564 | ;; 565 | "refresh-tester") 566 | refresh_tester 567 | ;; 568 | "run") 569 | cmd=$0 570 | shift 571 | test_file=$1 572 | out_dir=$2 573 | if [ -z "${test_file}" ]; then 574 | echo "Please supply a test scenario as argument!" 575 | usage ${cmd} 576 | fi 577 | if [ -z "${out_dir}" ]; then 578 | echo "Please supply an output results directory!" 579 | usage ${cmd} 580 | fi 581 | test_file=$(pwd)/${test_file} 582 | tstamp=$(date "+%Y%m%d-%H%M%S") 583 | out_dir=${result_dir}/${out_dir}-${tstamp} 584 | 585 | if [ ! -f ${test_file} ]; then 586 | echo "Test scenario ${test_file} does not exist!" 587 | usage ${cmd} 588 | fi 589 | if [ -d ${out_dir} ]; then 590 | echo "Results directory ${out_dir} already exists!" 591 | usage ${cmd} 592 | fi 593 | shift; shift 594 | 595 | test_file=$(translate_yaml ${test_file}) 596 | # Run the new test. 597 | mkdir -p ${out_dir} 598 | run_test ${test_file} ${out_dir} 2>&1 | tee ${out_dir}/test-log 599 | ;; 600 | *) 601 | usage $0 602 | ;; 603 | esac 604 | 605 | exit 0 606 | -------------------------------------------------------------------------------- /copyright: -------------------------------------------------------------------------------- 1 | Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ 2 | Upstream-Name: ovn-heater 3 | Source: https://github.com/ovn-org/ovn-heater 4 | 5 | Upstream Authors: 6 | 7 | Ales Musil amusil@redhat.com 8 | Ben Pfaff blp@ovn.org 9 | Dan Williams dcbw@redhat.com 10 | Dumitru Ceara dceara@redhat.com 11 | Frode Nordahl frode.nordahl@canonical.com 12 | Ilya Maximets i.maximets@ovn.org 13 | Jianlin Shi jishi@redhat.com 14 | Lorenzo Bianconi lorenzo.bianconi@redhat.com 15 | Mark Michelson mmichels@redhat.com 16 | Martin Kalcok martin.kalcok@canonical.com 17 | Mohamed S. Mahmoud mmahmoud@redhat.com 18 | Numan Siddique nusiddiq@redhat.com 19 | 20 | Files: * 21 | Copyright: (c) 2020 VMware, Inc. 22 | (c) 2020-2023 RedHat, Inc. 23 | (c) 2023 Canonical 24 | License: Apache-2.0 25 | 26 | Files: logo.png 27 | Copyright: (c) 2015 Ben Pfaff 28 | (c) 2008 Inkwina 29 | (c) 2023 Frode Nordahl 30 | License: CC BY-SA 4.0 31 | 32 | License: Apache-2.0 33 | Licensed under the Apache License, Version 2.0 (the "License"); 34 | you may not use this file except in compliance with the License. 35 | You may obtain a copy of the License at 36 | . 37 | http://www.apache.org/licenses/LICENSE-2.0 38 | . 39 | Unless required by applicable law or agreed to in writing, software 40 | distributed under the License is distributed on an "AS IS" BASIS, 41 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 42 | See the License for the specific language governing permissions and 43 | limitations under the License. 44 | . 45 | On Debian-based systems the full text of the Apache version 2.0 license 46 | can be found in `/usr/share/common-licenses/Apache-2.0'. 47 | 48 | License: CC BY-SA 4.0 49 | Attribution-ShareAlike 4.0 International 50 | . 51 | ======================================================================= 52 | . 53 | Creative Commons Corporation ("Creative Commons") is not a law firm and 54 | does not provide legal services or legal advice. Distribution of 55 | Creative Commons public licenses does not create a lawyer-client or 56 | other relationship. Creative Commons makes its licenses and related 57 | information available on an "as-is" basis. Creative Commons gives no 58 | warranties regarding its licenses, any material licensed under their 59 | terms and conditions, or any related information. Creative Commons 60 | disclaims all liability for damages resulting from their use to the 61 | fullest extent possible. 62 | . 63 | Using Creative Commons Public Licenses 64 | . 65 | Creative Commons public licenses provide a standard set of terms and 66 | conditions that creators and other rights holders may use to share 67 | original works of authorship and other material subject to copyright 68 | and certain other rights specified in the public license below. The 69 | following considerations are for informational purposes only, are not 70 | exhaustive, and do not form part of our licenses. 71 | . 72 | Considerations for licensors: Our public licenses are 73 | intended for use by those authorized to give the public 74 | permission to use material in ways otherwise restricted by 75 | copyright and certain other rights. Our licenses are 76 | irrevocable. Licensors should read and understand the terms 77 | and conditions of the license they choose before applying it. 78 | Licensors should also secure all rights necessary before 79 | applying our licenses so that the public can reuse the 80 | material as expected. Licensors should clearly mark any 81 | material not subject to the license. This includes other CC- 82 | licensed material, or material used under an exception or 83 | limitation to copyright. More considerations for licensors: 84 | wiki.creativecommons.org/Considerations_for_licensors 85 | . 86 | Considerations for the public: By using one of our public 87 | licenses, a licensor grants the public permission to use the 88 | licensed material under specified terms and conditions. If 89 | the licensor's permission is not necessary for any reason--for 90 | example, because of any applicable exception or limitation to 91 | copyright--then that use is not regulated by the license. Our 92 | licenses grant only permissions under copyright and certain 93 | other rights that a licensor has authority to grant. Use of 94 | the licensed material may still be restricted for other 95 | reasons, including because others have copyright or other 96 | rights in the material. A licensor may make special requests, 97 | such as asking that all changes be marked or described. 98 | Although not required by our licenses, you are encouraged to 99 | respect those requests where reasonable. More considerations 100 | for the public: 101 | wiki.creativecommons.org/Considerations_for_licensees 102 | . 103 | ======================================================================= 104 | . 105 | Creative Commons Attribution-ShareAlike 4.0 International Public 106 | License 107 | . 108 | By exercising the Licensed Rights (defined below), You accept and agree 109 | to be bound by the terms and conditions of this Creative Commons 110 | Attribution-ShareAlike 4.0 International Public License ("Public 111 | License"). To the extent this Public License may be interpreted as a 112 | contract, You are granted the Licensed Rights in consideration of Your 113 | acceptance of these terms and conditions, and the Licensor grants You 114 | such rights in consideration of benefits the Licensor receives from 115 | making the Licensed Material available under these terms and 116 | conditions. 117 | . 118 | . 119 | Section 1 -- Definitions. 120 | . 121 | a. Adapted Material means material subject to Copyright and Similar 122 | Rights that is derived from or based upon the Licensed Material 123 | and in which the Licensed Material is translated, altered, 124 | arranged, transformed, or otherwise modified in a manner requiring 125 | permission under the Copyright and Similar Rights held by the 126 | Licensor. For purposes of this Public License, where the Licensed 127 | Material is a musical work, performance, or sound recording, 128 | Adapted Material is always produced where the Licensed Material is 129 | synched in timed relation with a moving image. 130 | . 131 | b. Adapter's License means the license You apply to Your Copyright 132 | and Similar Rights in Your contributions to Adapted Material in 133 | accordance with the terms and conditions of this Public License. 134 | . 135 | c. BY-SA Compatible License means a license listed at 136 | creativecommons.org/compatiblelicenses, approved by Creative 137 | Commons as essentially the equivalent of this Public License. 138 | . 139 | d. Copyright and Similar Rights means copyright and/or similar rights 140 | closely related to copyright including, without limitation, 141 | performance, broadcast, sound recording, and Sui Generis Database 142 | Rights, without regard to how the rights are labeled or 143 | categorized. For purposes of this Public License, the rights 144 | specified in Section 2(b)(1)-(2) are not Copyright and Similar 145 | Rights. 146 | . 147 | e. Effective Technological Measures means those measures that, in the 148 | absence of proper authority, may not be circumvented under laws 149 | fulfilling obligations under Article 11 of the WIPO Copyright 150 | Treaty adopted on December 20, 1996, and/or similar international 151 | agreements. 152 | . 153 | f. Exceptions and Limitations means fair use, fair dealing, and/or 154 | any other exception or limitation to Copyright and Similar Rights 155 | that applies to Your use of the Licensed Material. 156 | . 157 | g. License Elements means the license attributes listed in the name 158 | of a Creative Commons Public License. The License Elements of this 159 | Public License are Attribution and ShareAlike. 160 | . 161 | h. Licensed Material means the artistic or literary work, database, 162 | or other material to which the Licensor applied this Public 163 | License. 164 | . 165 | i. Licensed Rights means the rights granted to You subject to the 166 | terms and conditions of this Public License, which are limited to 167 | all Copyright and Similar Rights that apply to Your use of the 168 | Licensed Material and that the Licensor has authority to license. 169 | . 170 | j. Licensor means the individual(s) or entity(ies) granting rights 171 | under this Public License. 172 | . 173 | k. Share means to provide material to the public by any means or 174 | process that requires permission under the Licensed Rights, such 175 | as reproduction, public display, public performance, distribution, 176 | dissemination, communication, or importation, and to make material 177 | available to the public including in ways that members of the 178 | public may access the material from a place and at a time 179 | individually chosen by them. 180 | . 181 | l. Sui Generis Database Rights means rights other than copyright 182 | resulting from Directive 96/9/EC of the European Parliament and of 183 | the Council of 11 March 1996 on the legal protection of databases, 184 | as amended and/or succeeded, as well as other essentially 185 | equivalent rights anywhere in the world. 186 | . 187 | m. You means the individual or entity exercising the Licensed Rights 188 | under this Public License. Your has a corresponding meaning. 189 | . 190 | . 191 | Section 2 -- Scope. 192 | . 193 | a. License grant. 194 | . 195 | 1. Subject to the terms and conditions of this Public License, 196 | the Licensor hereby grants You a worldwide, royalty-free, 197 | non-sublicensable, non-exclusive, irrevocable license to 198 | exercise the Licensed Rights in the Licensed Material to: 199 | . 200 | a. reproduce and Share the Licensed Material, in whole or 201 | in part; and 202 | . 203 | b. produce, reproduce, and Share Adapted Material. 204 | . 205 | 2. Exceptions and Limitations. For the avoidance of doubt, where 206 | Exceptions and Limitations apply to Your use, this Public 207 | License does not apply, and You do not need to comply with 208 | its terms and conditions. 209 | . 210 | 3. Term. The term of this Public License is specified in Section 211 | 6(a). 212 | . 213 | 4. Media and formats; technical modifications allowed. The 214 | Licensor authorizes You to exercise the Licensed Rights in 215 | all media and formats whether now known or hereafter created, 216 | and to make technical modifications necessary to do so. The 217 | Licensor waives and/or agrees not to assert any right or 218 | authority to forbid You from making technical modifications 219 | necessary to exercise the Licensed Rights, including 220 | technical modifications necessary to circumvent Effective 221 | Technological Measures. For purposes of this Public License, 222 | simply making modifications authorized by this Section 2(a) 223 | (4) never produces Adapted Material. 224 | . 225 | 5. Downstream recipients. 226 | . 227 | a. Offer from the Licensor -- Licensed Material. Every 228 | recipient of the Licensed Material automatically 229 | receives an offer from the Licensor to exercise the 230 | Licensed Rights under the terms and conditions of this 231 | Public License. 232 | . 233 | b. Additional offer from the Licensor -- Adapted Material. 234 | Every recipient of Adapted Material from You 235 | automatically receives an offer from the Licensor to 236 | exercise the Licensed Rights in the Adapted Material 237 | under the conditions of the Adapter's License You apply. 238 | . 239 | c. No downstream restrictions. You may not offer or impose 240 | any additional or different terms or conditions on, or 241 | apply any Effective Technological Measures to, the 242 | Licensed Material if doing so restricts exercise of the 243 | Licensed Rights by any recipient of the Licensed 244 | Material. 245 | . 246 | 6. No endorsement. Nothing in this Public License constitutes or 247 | may be construed as permission to assert or imply that You 248 | are, or that Your use of the Licensed Material is, connected 249 | with, or sponsored, endorsed, or granted official status by, 250 | the Licensor or others designated to receive attribution as 251 | provided in Section 3(a)(1)(A)(i). 252 | . 253 | b. Other rights. 254 | . 255 | 1. Moral rights, such as the right of integrity, are not 256 | licensed under this Public License, nor are publicity, 257 | privacy, and/or other similar personality rights; however, to 258 | the extent possible, the Licensor waives and/or agrees not to 259 | assert any such rights held by the Licensor to the limited 260 | extent necessary to allow You to exercise the Licensed 261 | Rights, but not otherwise. 262 | . 263 | 2. Patent and trademark rights are not licensed under this 264 | Public License. 265 | . 266 | 3. To the extent possible, the Licensor waives any right to 267 | collect royalties from You for the exercise of the Licensed 268 | Rights, whether directly or through a collecting society 269 | under any voluntary or waivable statutory or compulsory 270 | licensing scheme. In all other cases the Licensor expressly 271 | reserves any right to collect such royalties. 272 | . 273 | . 274 | Section 3 -- License Conditions. 275 | . 276 | Your exercise of the Licensed Rights is expressly made subject to the 277 | following conditions. 278 | . 279 | a. Attribution. 280 | . 281 | 1. If You Share the Licensed Material (including in modified 282 | form), You must: 283 | . 284 | a. retain the following if it is supplied by the Licensor 285 | with the Licensed Material: 286 | . 287 | i. identification of the creator(s) of the Licensed 288 | Material and any others designated to receive 289 | attribution, in any reasonable manner requested by 290 | the Licensor (including by pseudonym if 291 | designated); 292 | . 293 | ii. a copyright notice; 294 | . 295 | iii. a notice that refers to this Public License; 296 | . 297 | iv. a notice that refers to the disclaimer of 298 | warranties; 299 | . 300 | v. a URI or hyperlink to the Licensed Material to the 301 | extent reasonably practicable; 302 | . 303 | b. indicate if You modified the Licensed Material and 304 | retain an indication of any previous modifications; and 305 | . 306 | c. indicate the Licensed Material is licensed under this 307 | Public License, and include the text of, or the URI or 308 | hyperlink to, this Public License. 309 | . 310 | 2. You may satisfy the conditions in Section 3(a)(1) in any 311 | reasonable manner based on the medium, means, and context in 312 | which You Share the Licensed Material. For example, it may be 313 | reasonable to satisfy the conditions by providing a URI or 314 | hyperlink to a resource that includes the required 315 | information. 316 | . 317 | 3. If requested by the Licensor, You must remove any of the 318 | information required by Section 3(a)(1)(A) to the extent 319 | reasonably practicable. 320 | . 321 | b. ShareAlike. 322 | . 323 | In addition to the conditions in Section 3(a), if You Share 324 | Adapted Material You produce, the following conditions also apply. 325 | . 326 | 1. The Adapter's License You apply must be a Creative Commons 327 | license with the same License Elements, this version or 328 | later, or a BY-SA Compatible License. 329 | . 330 | 2. You must include the text of, or the URI or hyperlink to, the 331 | Adapter's License You apply. You may satisfy this condition 332 | in any reasonable manner based on the medium, means, and 333 | context in which You Share Adapted Material. 334 | . 335 | 3. You may not offer or impose any additional or different terms 336 | or conditions on, or apply any Effective Technological 337 | Measures to, Adapted Material that restrict exercise of the 338 | rights granted under the Adapter's License You apply. 339 | . 340 | . 341 | Section 4 -- Sui Generis Database Rights. 342 | . 343 | Where the Licensed Rights include Sui Generis Database Rights that 344 | apply to Your use of the Licensed Material: 345 | . 346 | a. for the avoidance of doubt, Section 2(a)(1) grants You the right 347 | to extract, reuse, reproduce, and Share all or a substantial 348 | portion of the contents of the database; 349 | . 350 | b. if You include all or a substantial portion of the database 351 | contents in a database in which You have Sui Generis Database 352 | Rights, then the database in which You have Sui Generis Database 353 | Rights (but not its individual contents) is Adapted Material, 354 | including for purposes of Section 3(b); and 355 | . 356 | c. You must comply with the conditions in Section 3(a) if You Share 357 | all or a substantial portion of the contents of the database. 358 | . 359 | For the avoidance of doubt, this Section 4 supplements and does not 360 | replace Your obligations under this Public License where the Licensed 361 | Rights include other Copyright and Similar Rights. 362 | . 363 | . 364 | Section 5 -- Disclaimer of Warranties and Limitation of Liability. 365 | . 366 | a. UNLESS OTHERWISE SEPARATELY UNDERTAKEN BY THE LICENSOR, TO THE 367 | EXTENT POSSIBLE, THE LICENSOR OFFERS THE LICENSED MATERIAL AS-IS 368 | AND AS-AVAILABLE, AND MAKES NO REPRESENTATIONS OR WARRANTIES OF 369 | ANY KIND CONCERNING THE LICENSED MATERIAL, WHETHER EXPRESS, 370 | IMPLIED, STATUTORY, OR OTHER. THIS INCLUDES, WITHOUT LIMITATION, 371 | WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR 372 | PURPOSE, NON-INFRINGEMENT, ABSENCE OF LATENT OR OTHER DEFECTS, 373 | ACCURACY, OR THE PRESENCE OR ABSENCE OF ERRORS, WHETHER OR NOT 374 | KNOWN OR DISCOVERABLE. WHERE DISCLAIMERS OF WARRANTIES ARE NOT 375 | ALLOWED IN FULL OR IN PART, THIS DISCLAIMER MAY NOT APPLY TO YOU. 376 | . 377 | b. TO THE EXTENT POSSIBLE, IN NO EVENT WILL THE LICENSOR BE LIABLE 378 | TO YOU ON ANY LEGAL THEORY (INCLUDING, WITHOUT LIMITATION, 379 | NEGLIGENCE) OR OTHERWISE FOR ANY DIRECT, SPECIAL, INDIRECT, 380 | INCIDENTAL, CONSEQUENTIAL, PUNITIVE, EXEMPLARY, OR OTHER LOSSES, 381 | COSTS, EXPENSES, OR DAMAGES ARISING OUT OF THIS PUBLIC LICENSE OR 382 | USE OF THE LICENSED MATERIAL, EVEN IF THE LICENSOR HAS BEEN 383 | ADVISED OF THE POSSIBILITY OF SUCH LOSSES, COSTS, EXPENSES, OR 384 | DAMAGES. WHERE A LIMITATION OF LIABILITY IS NOT ALLOWED IN FULL OR 385 | IN PART, THIS LIMITATION MAY NOT APPLY TO YOU. 386 | . 387 | c. The disclaimer of warranties and limitation of liability provided 388 | above shall be interpreted in a manner that, to the extent 389 | possible, most closely approximates an absolute disclaimer and 390 | waiver of all liability. 391 | . 392 | . 393 | Section 6 -- Term and Termination. 394 | . 395 | a. This Public License applies for the term of the Copyright and 396 | Similar Rights licensed here. However, if You fail to comply with 397 | this Public License, then Your rights under this Public License 398 | terminate automatically. 399 | . 400 | b. Where Your right to use the Licensed Material has terminated under 401 | Section 6(a), it reinstates: 402 | . 403 | 1. automatically as of the date the violation is cured, provided 404 | it is cured within 30 days of Your discovery of the 405 | violation; or 406 | . 407 | 2. upon express reinstatement by the Licensor. 408 | . 409 | For the avoidance of doubt, this Section 6(b) does not affect any 410 | right the Licensor may have to seek remedies for Your violations 411 | of this Public License. 412 | . 413 | c. For the avoidance of doubt, the Licensor may also offer the 414 | Licensed Material under separate terms or conditions or stop 415 | distributing the Licensed Material at any time; however, doing so 416 | will not terminate this Public License. 417 | . 418 | d. Sections 1, 5, 6, 7, and 8 survive termination of this Public 419 | License. 420 | . 421 | . 422 | Section 7 -- Other Terms and Conditions. 423 | . 424 | a. The Licensor shall not be bound by any additional or different 425 | terms or conditions communicated by You unless expressly agreed. 426 | . 427 | b. Any arrangements, understandings, or agreements regarding the 428 | Licensed Material not stated herein are separate from and 429 | independent of the terms and conditions of this Public License. 430 | . 431 | . 432 | Section 8 -- Interpretation. 433 | . 434 | a. For the avoidance of doubt, this Public License does not, and 435 | shall not be interpreted to, reduce, limit, restrict, or impose 436 | conditions on any use of the Licensed Material that could lawfully 437 | be made without permission under this Public License. 438 | . 439 | b. To the extent possible, if any provision of this Public License is 440 | deemed unenforceable, it shall be automatically reformed to the 441 | minimum extent necessary to make it enforceable. If the provision 442 | cannot be reformed, it shall be severed from this Public License 443 | without affecting the enforceability of the remaining terms and 444 | conditions. 445 | . 446 | c. No term or condition of this Public License will be waived and no 447 | failure to comply consented to unless expressly agreed to by the 448 | Licensor. 449 | . 450 | d. Nothing in this Public License constitutes or may be interpreted 451 | as a limitation upon, or waiver of, any privileges and immunities 452 | that apply to the Licensor or You, including from the legal 453 | processes of any jurisdiction or authority. 454 | . 455 | . 456 | ======================================================================= 457 | . 458 | Creative Commons is not a party to its public 459 | licenses. Notwithstanding, Creative Commons may elect to apply one of 460 | its public licenses to material it publishes and in those instances 461 | will be considered the “Licensor.” The text of the Creative Commons 462 | public licenses is dedicated to the public domain under the CC0 Public 463 | Domain Dedication. Except for the limited purpose of indicating that 464 | material is shared under a Creative Commons public license or as 465 | otherwise permitted by the Creative Commons policies published at 466 | creativecommons.org/policies, Creative Commons does not authorize the 467 | use of the trademark "Creative Commons" or any other trademark or logo 468 | of Creative Commons without its prior written consent including, 469 | without limitation, in connection with any unauthorized modifications 470 | to any of its public licenses or any other arrangements, 471 | understandings, or agreements concerning use of licensed material. For 472 | the avoidance of doubt, this paragraph does not form part of the 473 | public licenses. 474 | . 475 | Creative Commons may be contacted at creativecommons.org. 476 | --------------------------------------------------------------------------------