├── src
├── __init__.py
├── core
│ ├── __init__.py
│ ├── test
│ │ └── __init__.py
│ ├── network_factory.py
│ ├── mesh_topology.py
│ ├── testbed_mgr.py
│ ├── network_mgr.py
│ └── topology.py
├── testbed
│ ├── __init__.py
│ ├── config.py
│ └── linux_host.py
├── tools
│ ├── __init__.py
│ ├── test
│ │ ├── __init__.py
│ │ └── util_unittest.py
│ ├── util.py
│ ├── tc_rules.sh
│ ├── extract_data.py
│ └── psutil_monitor.py
├── containernet
│ ├── __init__.py
│ ├── requirements.txt
│ └── containernet_host.py
├── data_storage
│ ├── __init__.py
│ └── storage.py
├── interfaces
│ ├── __init__.py
│ ├── routing.py
│ ├── host.py
│ ├── network_mgr.py
│ └── network.py
├── protosuites
│ ├── __init__.py
│ ├── bats
│ │ ├── __init__.py
│ │ ├── bats_brtp_proxy.py
│ │ ├── bats_btp.py
│ │ └── bats_brtp.py
│ ├── proto_info.py
│ ├── cs_protocol.py
│ ├── proto.py
│ ├── std_protocol.py
│ └── noop_protocol.py
├── testsuites
│ ├── __init__.py
│ ├── test_ping.py
│ ├── test_sshping.py
│ ├── test_iperf_bats.py
│ ├── test_rtt.py
│ ├── test_scp.py
│ ├── test_regular.py
│ ├── test_iperf.py
│ └── test_competition.py
├── data_analyzer
│ ├── __init__.py
│ ├── analyzer.py
│ ├── analyzer_factory.py
│ ├── scp_analyzer.py
│ ├── first_rtt_analyzer.py
│ ├── sshping_analyzer.py
│ └── rtt_analyzer.py
├── config
│ ├── rootfs
│ │ ├── etc
│ │ │ ├── cfg
│ │ │ │ ├── oasis.cfg
│ │ │ │ └── licence
│ │ │ └── bats-protocol
│ │ │ │ ├── server.cert
│ │ │ │ └── server.key
│ │ └── usr
│ │ │ ├── bin
│ │ │ ├── sshping
│ │ │ ├── bats_cli
│ │ │ ├── bats_protocol
│ │ │ ├── kernel_tc.sh
│ │ │ ├── olsrd2_static
│ │ │ ├── secnetperf
│ │ │ ├── tcp_endpoint
│ │ │ ├── client_linux_amd64
│ │ │ └── server_linux_amd64
│ │ │ └── sbin
│ │ │ └── init_node.sh
│ ├── testbed
│ │ └── predefined.testbed.yaml
│ ├── keys
│ │ ├── id_rsa.pub
│ │ └── id_rsa
│ ├── protocol-scp-test.yaml
│ ├── containernet-docker-official
│ │ └── Dockerfile
│ ├── protocol-multi-hops-test.yaml
│ ├── protocol-first-rtt-test.yaml
│ ├── tcp-rtt-loss-test.yaml
│ ├── bats-protocol-rtt-loss-test.yaml
│ ├── protocol-sshping-test.yaml
│ ├── predefined.node_config.yaml
│ ├── protocol-multi-hops-test-olsr.yaml
│ ├── protocol-udp-test.yaml
│ ├── nested-containernet-config.yaml
│ ├── protocol-docker-azure
│ │ └── Dockerfile
│ ├── 4-hops-linear-network.json
│ ├── protocol-flow-competition-test.yaml
│ ├── protocol-single-hop-test.yaml
│ ├── protocol-performance-comparison.yaml
│ ├── predefined.protocols.yaml
│ ├── mesh-network.json
│ ├── mesh-network-no-loss.json
│ └── predefined.topology.yaml
├── var
│ └── global_var.py
├── routing
│ ├── openr_routing.py
│ ├── routing_factory.py
│ ├── static_routing.py
│ ├── static_routing_bfs.py
│ └── olsr_routing.py
└── start.py
├── bats
└── imgs
│ ├── arch.png
│ └── bats-brtp.png
├── .github
├── scripts
│ ├── README.md
│ └── pre-commit
└── workflows
│ ├── .github.ci.yml
│ └── .github.oasis-ci.yml
├── .vscode
├── extensions.json
└── settings.json
├── Dockerfile.ubuntu-generic-lttng
├── Dockerfile.containernet
├── Dockerfile.ubuntu-generic
├── .gitignore
├── .gitattributes
├── .pylintrc
├── docs
├── tc-strategy.md
├── flow_competition_test.md
├── protocols_and_tools.md
└── imgs
│ └── complex-top.svg
├── .gitlab-ci.yml
└── README.md
/src/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/core/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/testbed/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/tools/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/containernet/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/core/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/data_storage/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/interfaces/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/protosuites/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/testsuites/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/tools/test/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/data_analyzer/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/protosuites/bats/__init__.py:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/config/rootfs/etc/cfg/oasis.cfg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/bats/imgs/arch.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/n-hop/oasis/HEAD/bats/imgs/arch.png
--------------------------------------------------------------------------------
/bats/imgs/bats-brtp.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/n-hop/oasis/HEAD/bats/imgs/bats-brtp.png
--------------------------------------------------------------------------------
/src/containernet/requirements.txt:
--------------------------------------------------------------------------------
1 | # dependencies for containernet
2 | PyYAML==6.0.1
3 | dataclasses>=0.6
4 | matplotlib>=3.9.2
5 | psutil>=7.0.0
--------------------------------------------------------------------------------
/.github/scripts/README.md:
--------------------------------------------------------------------------------
1 | ## Scripts tools
2 |
3 | ### 1. pre-commit hook
4 |
5 | ```bash
6 | cp scripts/pre-commit .git/hooks/pre-commit
7 | ```
8 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/sshping:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:1c8c1cc2808e28c0725ea5df69b87852f1fce74577eef5a3b99b631116ddb218
3 | size 104296
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/bats_cli:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:f71d209985a8b6fea2a075697af1257abe05737f256b00e12b5f6322f40be332
3 | size 10690270
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/bats_protocol:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:4757abc57788f0344d69426ed94f04779cc79e5057e630feaf72333b896b937d
3 | size 1509070
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/kernel_tc.sh:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:8db18477c5d458cc83907f220df1aed1b994cf73f94f3a02f741af2e90a2f5dc
3 | size 2385
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/olsrd2_static:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:be208f1d6d30c9d26bf4ed22c081e5c3a38ecb2f12f73cc2e88452feec1f7ecb
3 | size 2250456
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/secnetperf:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:cea45d4e4deb4fa1d284b879f1dbfa87f76d4673d1e27a06f5fd0edaf8501caf
3 | size 4382592
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/tcp_endpoint:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:eaf9db969972be5af27bec5142329939b8f0c34957aa22a45815e2a987933bde
3 | size 174432
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/etc/cfg/licence:
--------------------------------------------------------------------------------
1 | [application]
2 | Company=n-hop
3 | Device_id=ID20240101001
4 | Expired_date=2026-01-01
5 | Product_name=Oasis
6 | Software_version=v3.5.8(6b3753d)
7 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/client_linux_amd64:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:8a88aedce7ce960195f3a56c671e8290fa7ff1d5fd709736c0002d009a2abdb1
3 | size 11083776
4 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/bin/server_linux_amd64:
--------------------------------------------------------------------------------
1 | version https://git-lfs.github.com/spec/v1
2 | oid sha256:0c4354a77f6fb49fcc6e7299695d07e8fcd87b076b97037f35cc853f3727a2eb
3 | size 11014144
4 |
--------------------------------------------------------------------------------
/src/var/global_var.py:
--------------------------------------------------------------------------------
1 | # mapping oasis workspace to root path
2 | g_root_path = '/root/oasis/'
3 | # after oasis workspace is mapped, the root fs path is at
4 | g_oasis_root_fs = '/root/oasis/src/config/rootfs/'
5 |
--------------------------------------------------------------------------------
/.vscode/extensions.json:
--------------------------------------------------------------------------------
1 | {
2 | "recommendations": [
3 | "redhat.vscode-yaml",
4 | "ms-python.autopep8",
5 | "streetsidesoftware.code-spell-checker",
6 | "ms-python.pylint",
7 | "yzhang.markdown-all-in-one"
8 | ]
9 | }
10 |
--------------------------------------------------------------------------------
/src/routing/openr_routing.py:
--------------------------------------------------------------------------------
1 | from interfaces.routing import IRoutingStrategy
2 |
3 | class OpenrRouting(IRoutingStrategy):
4 | """Summary:
5 | Configure routing for the network with open-r.
6 | """
7 | def setup_routes(self, network: 'INetwork'): # type: ignore
8 | pass
9 |
--------------------------------------------------------------------------------
/src/core/network_factory.py:
--------------------------------------------------------------------------------
1 | from interfaces.network_mgr import (INetworkManager, NetworkType)
2 | from .network_mgr import NetworkManager
3 | from .testbed_mgr import TestbedManager
4 |
5 |
6 | def create_network_mgr(type: NetworkType) -> INetworkManager:
7 | if type == NetworkType.containernet:
8 | return NetworkManager()
9 | if type == NetworkType.testbed:
10 | return TestbedManager()
11 | return NetworkManager()
12 |
--------------------------------------------------------------------------------
/.github/scripts/pre-commit:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | # Define patterns to search for
4 | patterns="(AWS_SECRET_ACCESS_KEY|API_KEY|PASSWORD|SECRET|KEY|TOKEN)"
5 |
6 | # Check for secrets in the staged files
7 | if git diff --cached --name-only | xargs grep -E -i "$patterns"; then
8 | cat <<\EOF
9 | ########################## WARNING ##########################
10 | Error: Attempt to commit secrets to the repository. Operation is aborted!
11 | EOF
12 | exit 1
13 | fi
--------------------------------------------------------------------------------
/src/data_storage/storage.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from dataclasses import dataclass
3 |
4 |
5 | @dataclass
6 | class StorageConfig:
7 | name: str
8 |
9 |
10 | class IDataStorage(ABC):
11 | def __init__(self, config: StorageConfig):
12 | super().__init__()
13 | self.config = config
14 |
15 | @abstractmethod
16 | def store(self):
17 | pass
18 |
19 | @abstractmethod
20 | def load(self):
21 | pass
22 |
--------------------------------------------------------------------------------
/src/config/testbed/predefined.testbed.yaml:
--------------------------------------------------------------------------------
1 | testbed_nhop_shenzhen:
2 | - user: nhop
3 | ip: 10.53.1.189
4 | arch: arm64 # arm32, arm64, amd64
5 | authorized_key: src/config/keys/id_rsa
6 | intf: [eth0]
7 | - user: nhop
8 | ip: 10.53.1.218
9 | arch: arm64
10 | authorized_key: src/config/keys/id_rsa
11 | intf: [eth0]
12 | - user: nhop
13 | ip: 10.53.1.186
14 | arch: arm64
15 | authorized_key: src/config/keys/id_rsa
16 | intf: [eth0]
17 |
--------------------------------------------------------------------------------
/src/interfaces/routing.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from enum import IntEnum
3 |
4 |
5 | class RouteType(IntEnum):
6 | static = 0
7 | olsr = 1
8 | openr = 2
9 | static_bfs = 3
10 |
11 |
12 | class IRoutingStrategy(ABC):
13 | @abstractmethod
14 | def setup_routes(self, network):
15 | pass
16 |
17 | def teardown_routes(self, network):
18 | pass
19 |
20 | def routing_type(self):
21 | return self.__class__.__name__
22 |
--------------------------------------------------------------------------------
/src/testbed/config.py:
--------------------------------------------------------------------------------
1 | from dataclasses import dataclass, field
2 | from typing import Optional, List
3 |
4 |
5 | @dataclass
6 | class HostConfig:
7 | """Configuration for the baremetal host.
8 | """
9 | user: str # username of the host
10 | ip: str # IP address of the host
11 | arch: str # hardware architecture of the host
12 | authorized_key: Optional[str] = None # path to the authorized ssh key.
13 | intf: Optional[List[str]] = field(default=None) # interfaces of the host
14 |
--------------------------------------------------------------------------------
/src/config/keys/id_rsa.pub:
--------------------------------------------------------------------------------
1 | ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABgQCEIB4SVEtDDzbi5+XJ5gTHp+DNaptloAloGV1O278y3IeoPUzzbSUDrGwWUZ2DzopBCMj6Wk4qBUdgyE6Kkt56SpZU5eeSts9XDNU1dxl/vvD+VAv0Id+p0AVqRFv7Pfr0TPaMVFB3UFM0c5rdKhHoLkPW/99eJepZsXXepnTZk1abYOqiMMPifHEWor9IhSig6SS4CyKIlOnC5DXArK7bkVtg1+SKk7VLh76G/K3vDuL69t7GnK8l7AdXGsl2KguveWTiQqhxibJSTGd0PJpuJSfnJz/c1W44aEgGTDOwrC8zNxrWjbjQhnT6CfMYs6Lf2ZBZLMDlPsoBCEyvfXf5VzA+WCajQ4HdX6BDbx3f225dEh+U5uoz8r8f+9HJp4jCKJqX4MCnoMwkZRW/IOuPBepgOFJqaOglxC5K79FGVuW2wEkTqn9k1pF37UC4S8+Ux52ucNdrroi+vI28tsQUgsrIfeTUlVqPwaRFfj58fUjIM+vAGelylfHO06OHGGE= oasis@n-hop
2 |
--------------------------------------------------------------------------------
/src/config/protocol-scp-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 | init_script: init_node.sh
7 |
8 | tests:
9 | test10:
10 | description: "Test scp with multiple files"
11 | topology:
12 | config_name: linear_network_1
13 | config_file: predefined.topology.yaml
14 | target_protocols: [tcp-bbr]
15 | route: static_route
16 | test_tools:
17 | scp:
18 | client_host: 0
19 | server_host: 1
20 |
--------------------------------------------------------------------------------
/Dockerfile.ubuntu-generic-lttng:
--------------------------------------------------------------------------------
1 | FROM gitlab.app.n-hop.com:5005/dependencies/oasis/ubuntu-generic:latest
2 | ARG DEBIAN_FRONTEND=noninteractive
3 |
4 | RUN set -eux; \
5 | apt-get update; \
6 | apt-get upgrade --yes; \
7 | apt-get install software-properties-common --yes; \
8 | apt-add-repository --yes ppa:lttng/stable-2.13; \
9 | apt-get update; \
10 | apt-get install liblttng-ust-dev babeltrace lttng-tools --yes;
11 |
12 |
13 | ## Cleanup
14 | RUN set -eux; \
15 | rm -rf /var/lib/apt/lists/*; \
16 | rm -rf /tmp/*;
17 |
18 | WORKDIR /root
19 |
--------------------------------------------------------------------------------
/src/config/containernet-docker-official/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM containernet/containernet:latest
2 |
3 | ARG DEBIAN_FRONTEND=noninteractive
4 |
5 | RUN apt-get update
6 | RUN python3 --version
7 | RUN python3 -m pip install --upgrade pip
8 | # Install tools needed for development
9 | RUN python3 -m pip install PyYAML==6.0.1 psutil>=7.0.0 dataclasses>=0.6 matplotlib>=3.9.2
10 | # -i https://pypi.tuna.tsinghua.edu.cn/simple
11 |
12 | # override the entry point of containernet w/ absolute path to avoid
13 | # path not found err when changing workdir
14 | ENTRYPOINT [ "/containernet/util/docker/entrypoint.sh" ]
15 |
16 | WORKDIR /root
17 |
--------------------------------------------------------------------------------
/Dockerfile.containernet:
--------------------------------------------------------------------------------
1 | FROM gitlab.app.n-hop.com:5005/dependencies/containernet:latest
2 | ARG DEBIAN_FRONTEND=noninteractive
3 | ARG TZ="Etc/UTC"
4 |
5 | RUN python3 -m pip install --upgrade pip
6 |
7 | ## Pre-install containernet dependencies
8 | COPY src/containernet/requirements.txt /tmp/oasis_requirements.txt
9 |
10 | RUN pip3 install -r /tmp/oasis_requirements.txt
11 |
12 | # Cleanup
13 | RUN rm -rf /var/lib/apt/lists/*
14 |
15 | # override the entry point of containernet w/ absolute path to avoid
16 | # path not found err when changing workdir
17 | ENTRYPOINT [ "/containernet/util/docker/entrypoint.sh" ]
18 |
19 | WORKDIR /root
20 |
--------------------------------------------------------------------------------
/src/config/protocol-multi-hops-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test2:
9 | description: "Test protocol throughput/rtt on multiple hops"
10 | topology:
11 | config_name: linear_network_2
12 | config_file: predefined.topology.yaml
13 | target_protocols: [brtp_proxy] # btp, brtp, kcp, tcp-bbr
14 | route: static_route
15 | test_tools:
16 | iperf:
17 | interval: 1
18 | interval_num: 10
19 | client_host: 0
20 | server_host: 2
21 |
--------------------------------------------------------------------------------
/src/config/protocol-first-rtt-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test_first_rtt:
9 | description: "Test protocol first rtt on single hop"
10 | topology:
11 | config_name: linear_network_1
12 | config_file: predefined.topology.yaml
13 | target_protocols: [btp, brtp, brtp_proxy, kcp, tcp-bbr]
14 | route: static_route
15 | test_tools:
16 | rtt:
17 | interval: 0.01
18 | packet_count: 1
19 | packet_size: 20
20 | client_host: 0
21 | server_host: 1
22 |
--------------------------------------------------------------------------------
/src/protosuites/bats/bats_brtp_proxy.py:
--------------------------------------------------------------------------------
1 | from interfaces.network import INetwork
2 | from protosuites.bats.bats_protocol import BATSProtocol
3 |
4 |
5 | class BRTPProxy(BATSProtocol):
6 | """BATS protocol BRTP-proxy mode.
7 | # The Iperf3 default port 5201 is set to exclude_port on the ini, for TCP proxy we use 5202
8 | """
9 |
10 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str:
11 | routing_type_name = network.get_routing_strategy().routing_type()
12 | if routing_type_name == 'OLSRRouting':
13 | host = network.get_hosts()[host_id]
14 | return self._get_ip_from_host(host, 'lo label lo:olsr')
15 |
16 | return ""
17 |
--------------------------------------------------------------------------------
/src/config/tcp-rtt-loss-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test2000:
9 | description: "Test tcp with different RTT and loss rate"
10 | topology:
11 | config_name: linear_network_rtt_loss
12 | config_file: predefined.topology.yaml
13 | target_protocols: [tcp-bbr]
14 | route: static_route
15 | test_tools:
16 | iperf:
17 | interval: 1
18 | interval_num: 10
19 | packet_type: tcp
20 | client_host: 0
21 | server_host: 1
22 | execution_mode: parallel # parallel or serial
23 |
--------------------------------------------------------------------------------
/src/config/bats-protocol-rtt-loss-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test1000:
9 | description: "Test BATS protocol with different RTT and loss rate"
10 | topology:
11 | config_name: linear_network_rtt_loss
12 | config_file: predefined.topology.yaml
13 | target_protocols: [brtp_proxy]
14 | route: static_route
15 | test_tools:
16 | iperf:
17 | interval: 1
18 | interval_num: 10
19 | packet_type: tcp
20 | client_host: 0
21 | server_host: 1
22 | execution_mode: parallel # parallel or serial
23 |
--------------------------------------------------------------------------------
/src/data_analyzer/analyzer.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from dataclasses import dataclass
3 | from typing import List, Optional
4 |
5 |
6 | @dataclass
7 | class AnalyzerConfig:
8 | input: List[str] # A series of input files
9 | output: str # The output visualization diagram file
10 | subtitle: str # The subtitle of the diagram
11 | data_type: Optional[str] = None # tcp. udp, bats, etc.
12 |
13 |
14 | class IDataAnalyzer(ABC):
15 | def __init__(self, config: AnalyzerConfig):
16 | super().__init__()
17 | self.config = config
18 |
19 | @abstractmethod
20 | def analyze(self):
21 | pass
22 |
23 | @abstractmethod
24 | def visualize(self):
25 | pass
26 |
--------------------------------------------------------------------------------
/src/protosuites/bats/bats_btp.py:
--------------------------------------------------------------------------------
1 | from protosuites.bats.bats_protocol import BATSProtocol
2 | from interfaces.network import INetwork
3 |
4 |
5 | class BTP(BATSProtocol):
6 | """BATS protocol BTP-TUN mode
7 | """
8 |
9 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str:
10 | routing_type_name = network.get_routing_strategy().routing_type()
11 | if routing_type_name == 'StaticRouting':
12 | host = network.get_hosts()[host_id]
13 | return self._get_ip_from_host(host, 'tun_session')
14 | if routing_type_name == 'OLSRRouting':
15 | host = network.get_hosts()[host_id]
16 | return self._get_ip_from_host(host, 'olsr_tun_BTP')
17 | return ""
18 |
--------------------------------------------------------------------------------
/src/protosuites/bats/bats_brtp.py:
--------------------------------------------------------------------------------
1 | from protosuites.bats.bats_protocol import BATSProtocol
2 | from interfaces.network import INetwork
3 |
4 |
5 | class BRTP(BATSProtocol):
6 | """BATS protocol BRTP-TUN mode
7 | """
8 |
9 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str:
10 | routing_type_name = network.get_routing_strategy().routing_type()
11 | if routing_type_name == 'StaticRouting':
12 | host = network.get_hosts()[host_id]
13 | return self._get_ip_from_host(host, 'tun_session')
14 | if routing_type_name == 'OLSRRouting':
15 | host = network.get_hosts()[host_id]
16 | return self._get_ip_from_host(host, 'olsr_tun_BRTP')
17 | return ""
18 |
--------------------------------------------------------------------------------
/src/config/protocol-sshping-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 | init_script: init_node.sh
7 |
8 | tests:
9 | test1:
10 | description: "Test sshping"
11 | topology:
12 | config_name: linear_network_1
13 | config_file: predefined.topology.yaml
14 | target_protocols: [tcp-bbr]
15 | route: static_route
16 | test_tools:
17 | sshping: # SSHPingTest https://github.com/spook/sshping
18 | args:
19 | - "-i /root/.ssh/id_rsa -H root@%s"
20 | client_host: 0
21 | server_host: 1 # test flow from 0 to 1
22 | interval: 1
23 | interval_num: 10
24 |
--------------------------------------------------------------------------------
/src/config/predefined.node_config.yaml:
--------------------------------------------------------------------------------
1 | node_config:
2 | - name: n_hop
3 | img: gitlab.app.n-hop.com:5005/dependencies/oasis/ubuntu-generic:latest
4 | vols:
5 | - "/var/run/docker.sock:/var/run/docker.sock"
6 | - "/nix/store:/nix/store:ro"
7 | - "/usr/bin/perf:/usr/bin/perf:ro"
8 | - "/lib/modules/:/lib/modules/:ro"
9 | - "/usr/lib/linux-tools/:/usr/lib/linux-tools/:ro"
10 | bind_port: True
11 | name_prefix: h
12 | ip_range: "10.0.0.0/8"
13 | - name: default
14 | img: ubuntu:22.04
15 | vols:
16 | - "/var/run/docker.sock:/var/run/docker.sock"
17 | - "/usr/bin/perf:/usr/bin/perf:ro"
18 | - "/lib/modules/:/lib/modules/:ro"
19 | - "/usr/lib/linux-tools/:/usr/lib/linux-tools/:ro"
20 | bind_port: True
21 | name_prefix: h
22 | ip_range: "10.0.0.0/8"
23 |
--------------------------------------------------------------------------------
/src/config/protocol-multi-hops-test-olsr.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test2:
9 | description: "Test protocol throughput/rtt on multiple hops with OLSR routing"
10 | topology:
11 | config_name: linear_network_3
12 | config_file: predefined.topology.yaml
13 | target_protocols: [btp, brtp, brtp_proxy, kcp, tcp-bbr, quic]
14 | route: olsr_route
15 | test_tools:
16 | iperf:
17 | interval: 1
18 | interval_num: 10
19 | client_host: 0
20 | server_host: 3
21 | rtt:
22 | interval: 0.01
23 | packet_count: 2000
24 | packet_size: 1014
25 | client_host: 0
26 | server_host: 3
27 |
--------------------------------------------------------------------------------
/src/config/rootfs/usr/sbin/init_node.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 | # This script is executed by the init process of each node in the network.
3 | init_ssh() {
4 | echo "Initializing SSH for the node..."
5 | rm -rf /root/.ssh/
6 | mkdir -p /root/.ssh
7 | cp /root/oasis/src/config/keys/* /root/.ssh/
8 | cp /root/.ssh/id_rsa.pub /root/.ssh/authorized_keys
9 | # fix: Permissions 0644 for '/root/.ssh/id_rsa' are too open
10 | chmod 600 /root/.ssh/id_rsa
11 | chmod 600 /root/.ssh/id_rsa.pub
12 | echo 'PermitRootLogin yes' | tee -a /etc/ssh/sshd_config
13 | echo 'PasswordAuthentication no' | tee -a /etc/ssh/sshd_config
14 | echo 'StrictModes no' | tee -a /etc/ssh/sshd_config
15 | service ssh start
16 | }
17 |
18 | init_library() {
19 | echo "Initializing libraries for the node..."
20 | }
21 |
22 | init_ssh
23 | init_library
24 |
--------------------------------------------------------------------------------
/src/config/protocol-udp-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test100:
9 | description: "UDP test over BATS protocol"
10 | topology:
11 | config_name: linear_network_1_20
12 | config_file: predefined.topology.yaml
13 | target_protocols: [btp]
14 | route: static_route
15 | test_tools:
16 | iperf1: # iperf udp test
17 | interval: 1
18 | interval_num: 10
19 | packet_type: udp
20 | bitrate: 200 # 10M
21 | client_host: 0
22 | server_host: 1
23 | iperf2: # iperf udp test
24 | interval: 1
25 | interval_num: 10
26 | packet_type: udp
27 | bitrate: 10 # 10M
28 | client_host: 0
29 | server_host: 1
--------------------------------------------------------------------------------
/src/data_analyzer/analyzer_factory.py:
--------------------------------------------------------------------------------
1 | from .iperf3_analyzer import Iperf3Analyzer
2 | from .rtt_analyzer import RTTAnalyzer
3 | from .first_rtt_analyzer import FirstRTTAnalyzer
4 | from .sshping_analyzer import SSHPingAnalyzer
5 | from .scp_analyzer import ScpAnalyzer
6 | from .analyzer import AnalyzerConfig
7 |
8 |
9 | class AnalyzerFactory:
10 | @staticmethod
11 | def get_analyzer(log_type: str, config: AnalyzerConfig):
12 | if log_type == "iperf3":
13 | return Iperf3Analyzer(config)
14 | if log_type == "rtt":
15 | return RTTAnalyzer(config)
16 | if log_type == "first_rtt":
17 | return FirstRTTAnalyzer(config)
18 | if log_type == "sshping":
19 | return SSHPingAnalyzer(config)
20 | if log_type == "scp":
21 | return ScpAnalyzer(config)
22 | raise ValueError(f"Unknown log type: {log_type}")
23 |
--------------------------------------------------------------------------------
/src/config/nested-containernet-config.yaml:
--------------------------------------------------------------------------------
1 | containernet:
2 | default: # n-hop default containernet configuration
3 | image: gitlab.app.n-hop.com:5005/dependencies/oasis/containernet:latest
4 | privileged: true
5 | network_mode: "host"
6 | mounts:
7 | - "/var/run/docker.sock:/var/run/docker.sock"
8 | n-hop-local-dev: # n-hop dev containernet configuration for local development
9 | image: gitlab.app.n-hop.com:5005/dependencies/oasis/containernet:latest
10 | privileged: true
11 | containernet_repo_from_user: true # If true, will mount `containernet_repo_path`.
12 | containernet_repo_path: "/home/{user_name}/workspace/containernet"
13 | network_mode: "host"
14 | mounts:
15 | - "/var/run/docker.sock:/var/run/docker.sock"
16 | official:
17 | image: containernet:latest
18 | privileged: true
19 | network_mode: "host"
20 | mounts:
21 | - "/var/run/docker.sock:/var/run/docker.sock"
22 |
--------------------------------------------------------------------------------
/src/routing/routing_factory.py:
--------------------------------------------------------------------------------
1 | from interfaces.routing import (IRoutingStrategy, RouteType)
2 | from routing.static_routing import StaticRouting
3 | from routing.static_routing_bfs import StaticRoutingBfs
4 | from routing.olsr_routing import OLSRRouting
5 | from routing.openr_routing import OpenrRouting
6 |
7 | # map string to type
8 | route_string_to_enum = {
9 | 'static_bfs': RouteType.static_bfs,
10 | 'static_route': RouteType.static,
11 | 'olsr_route': RouteType.olsr,
12 | 'openr_route': RouteType.openr}
13 |
14 |
15 | class RoutingFactory:
16 | def create_routing(self, routing_type: RouteType) -> IRoutingStrategy:
17 | if routing_type == RouteType.static:
18 | return StaticRouting()
19 | if routing_type == RouteType.static_bfs:
20 | return StaticRoutingBfs()
21 | if routing_type == RouteType.olsr:
22 | return OLSRRouting()
23 | if routing_type == RouteType.openr:
24 | return OpenrRouting()
25 | return None
26 |
--------------------------------------------------------------------------------
/src/interfaces/host.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class IHost(ABC):
5 | def __init__(self):
6 | pass
7 |
8 | @abstractmethod
9 | def cmd(self, command: str) -> str:
10 | """Execute a command on the host.
11 | """
12 |
13 | def cmdPrint(self, command: str) -> str:
14 | """Execute a command on the host and print the output.
15 | """
16 |
17 | def popen(self, command: str):
18 | """Execute a command on the host using popen.
19 | """
20 |
21 | def name(self) -> str:
22 | """Get the name of the host.
23 | """
24 | return ""
25 |
26 | def IP(self) -> str:
27 | """Get the IP address of the host.
28 | """
29 | return ""
30 |
31 | def deleteIntfs(self):
32 | """Delete all interfaces.
33 | """
34 |
35 | def getIntfs(self):
36 | pass
37 |
38 | def cleanup(self):
39 | """Cleanup the host.
40 | """
41 |
42 | def get_host(self):
43 | pass
44 |
45 | @abstractmethod
46 | def is_connected(self) -> bool:
47 | """Check if the host is connected to the network.
48 | """
49 |
--------------------------------------------------------------------------------
/src/data_analyzer/scp_analyzer.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import os
3 | from .analyzer import IDataAnalyzer
4 |
5 |
6 | class ScpAnalyzer(IDataAnalyzer):
7 | """Analyze and visualize multiple input scp logs.
8 | """
9 |
10 | def analyze(self):
11 | for input_log in self.config.input:
12 | logging.info(f"Analyze scp log: %s", input_log)
13 | if not os.path.exists(input_log):
14 | logging.info("The scp log file %s does not exist", input_log)
15 | return False
16 | with open(input_log, "r", encoding="utf-8") as f:
17 | lines = f.readlines()
18 | has_passed = any("passed" in line for line in lines)
19 | has_percent = any("100%" in line for line in lines)
20 | has_script_done = any("Script done." in line for line in lines)
21 | if not has_percent or not has_script_done or not has_passed:
22 | logging.info(
23 | "The scp log %s does not contain the expected content", input_log)
24 | return False
25 | logging.info("The scp log %s check is passed", input_log)
26 | return True
27 |
28 | def visualize(self):
29 | pass
30 |
--------------------------------------------------------------------------------
/src/protosuites/proto_info.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 |
4 | class IProtoInfo(ABC):
5 | """For a classic proxy protocol, it should have defined at least one of the following IPC mechanisms:
6 | - UDP forward port
7 | - TUN interface
8 | - UDS listening port
9 | """
10 | @abstractmethod
11 | def get_forward_port(self) -> int:
12 | """The UDP forward port of the protocol.
13 | """
14 |
15 | @abstractmethod
16 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str: # type: ignore
17 | """The ip address of the tun interface on host `host_id`
18 |
19 | Returns:
20 | str: ip address of the tun interface
21 | """
22 |
23 | @abstractmethod
24 | def get_protocol_args(self, network: 'INetwork') -> str: # type: ignore
25 | """The arguments for the protocol binary.
26 | The arguments should be a string that can be passed to the protocol binary.
27 | """
28 |
29 | @abstractmethod
30 | def get_protocol_name(self) -> str:
31 | """The name of the protocol
32 | """
33 |
34 | def is_distributed(self) -> bool:
35 | return True
36 |
37 | def get_protocol_version(self) -> str:
38 | return ""
39 |
--------------------------------------------------------------------------------
/Dockerfile.ubuntu-generic:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 | ARG DEBIAN_FRONTEND=noninteractive
3 |
4 | RUN set -eux; \
5 | apt-get update; \
6 | apt-get upgrade --yes;
7 |
8 | # FIXME: are all these packages needed?
9 | RUN set -eux; \
10 | apt-get install --yes \
11 | net-tools \
12 | iproute2 \
13 | iputils-ping \
14 | python3 \
15 | libatomic1 \
16 | iperf3 \
17 | python3-pip \
18 | vim \
19 | openssh-server \
20 | sysstat \
21 | wkhtmltopdf \
22 | curl
23 |
24 | ## install olsr tools, iptables, tcpdump
25 | RUN apt-get install --yes \
26 | netcat \
27 | tcpdump \
28 | iptables
29 |
30 | # FIXME: are all these packages needed?
31 | RUN pip3 install \
32 | psutil==5.9.5 \
33 | numpy==1.26.4 \
34 | matplotlib==3.7.2 \
35 | paramiko==2.8.1 \
36 | prettytable==3.9.0 \
37 | pdfkit==1.0.0 \
38 | PyPDF2 \
39 | reportlab==3.6.8 \
40 | tqdm==4.65.0 \
41 | markdown==3.4.4 \
42 | python-markdown-math==0.8 \
43 | PyYAML==6.0.1
44 |
45 | ## Cleanup
46 | RUN set -eux; \
47 | rm -rf /var/lib/apt/lists/*; \
48 | rm -rf /tmp/*;
49 |
50 | WORKDIR /root
51 |
--------------------------------------------------------------------------------
/src/config/rootfs/etc/bats-protocol/server.cert:
--------------------------------------------------------------------------------
1 | -----BEGIN CERTIFICATE-----
2 | MIIDazCCAlOgAwIBAgIUPe/FgF46MDLNQzstt99qdogsl88wDQYJKoZIhvcNAQEL
3 | BQAwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgMClNvbWUtU3RhdGUxITAfBgNVBAoM
4 | GEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZDAeFw0yNTA0MDMxMjMxMDJaFw0yNTA1
5 | MDMxMjMxMDJaMEUxCzAJBgNVBAYTAkFVMRMwEQYDVQQIDApTb21lLVN0YXRlMSEw
6 | HwYDVQQKDBhJbnRlcm5ldCBXaWRnaXRzIFB0eSBMdGQwggEiMA0GCSqGSIb3DQEB
7 | AQUAA4IBDwAwggEKAoIBAQCR6N0KNRxMxuATAuHApx+EJS9Egbhl+A3E8XLtsIDE
8 | 20f4ogro5DayEFJFeC/4bmID6lmDSlUKaG1D+ih+uIBQRDLyqnakqRdmPnHWqPyz
9 | kXrNrcDKVxKemD+/Va28ewebEaHuF/W5Cge9f1890LMp2jd1yS4q6hYFpJ1RDLP4
10 | hJXH8+smvsumdRmiuITyRiQ8D0zy6jIrsnORrnXjK12w6RHd+aulBXpIFEC7gOXA
11 | pd8Pef1NF6Mea1UT2a2pnOzwkxzOYaljVApZVse+Y7UFnOZGbwfP9xSMmYNm5QVu
12 | 2KSNQLfwH3AspNnCp3UtGMS6qSdEeVs0hbbt5XVwhJ2fAgMBAAGjUzBRMB0GA1Ud
13 | DgQWBBQe6WIbJ9+FnSu7Tt0N1Bk5pM9z+DAfBgNVHSMEGDAWgBQe6WIbJ9+FnSu7
14 | Tt0N1Bk5pM9z+DAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAV
15 | Z3TpJC1OV3Bt5VlnSZ2ak3zZ/qt9mYSCmzMtUI4iJfrnASvh+QdP+jZf0C/6dsPX
16 | 3EdBMDYtjqRpU9OSTZQuisJ9nRXaHu4oKyOeUTvJVjiQFtC7ia2+YKeTc+XfgvJ/
17 | SkxVqyNhidzqNsvGphtVO8goIThna/MZM5xMPdKWu+NWAucNuwluaNya3UqrfanO
18 | /yTeqs7bHxpbaxzx/xuebdRXbNZObU5pIAIae1PUB18htiJA/Hwlu053L+/FyZ7H
19 | EeSutiIIhZp0hYryViX7RDFia0A81ZIQrEMAQMNSq2dqH4e9LwzbaEb1M+PGyhtS
20 | SK15yKMqjShkagohrna8
21 | -----END CERTIFICATE-----
22 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Prerequisites
2 | *.d
3 |
4 | # Compiled Object files
5 | *.slo
6 | *.lo
7 | *.o
8 | *.obj
9 |
10 | # Precompiled Headers
11 | *.gch
12 | *.pch
13 |
14 | # Compiled Dynamic libraries
15 | *.so
16 | *.dylib
17 | *.dll
18 | !libelf.so
19 |
20 | # Fortran module files
21 | *.mod
22 | *.smod
23 | !go.mod
24 |
25 | # Compiled Static libraries
26 | *.lai
27 | *.la
28 | *.a
29 | !libbatscode_library*.a
30 | !libbpf.a
31 | !libxdp.a
32 | !libelf.a
33 | !af_xdp_kern.o
34 | !libbats-licence*.a
35 | *.lib
36 |
37 | # Executables
38 | *.exe
39 | *.out
40 | *.app
41 |
42 | # Cmake Build
43 | build*
44 | !.gitlab/jobs/build*
45 | cmake/open-cpp-coverage.cmake
46 | cmake-build-*/
47 | prefix/
48 | CMakeLists.txt.user
49 | CMakeUserPresets.json
50 |
51 | # IDE
52 | .idea/
53 | .vs/
54 | .vscode/
55 | .clangd/
56 |
57 | # Google Test
58 | bld*
59 |
60 | # temp dir
61 | tmp*
62 |
63 | # tag files
64 | *tags*
65 |
66 | # patch files
67 | *.log
68 |
69 | # history
70 | .mininet_history
71 | .viminfo
72 | .bash_history
73 | .cmake/*
74 |
75 | # containernet test
76 | .cache
77 | .config
78 | **/__pycache__/*
79 |
80 | # gcovr tmp config
81 | gcovr.cfg
82 |
83 | # version h
84 | src/include/version.h
85 |
86 | # Keys and certificates
87 | *.pem
88 | *.keys
89 | *key*
90 | id_rsa*
91 | *.pub
92 | **/bats_licence
93 |
94 | # results
95 | test_results
96 | test/
97 | config/
98 | !src/config/
99 | !src/test/
--------------------------------------------------------------------------------
/src/config/protocol-docker-azure/Dockerfile:
--------------------------------------------------------------------------------
1 | FROM ubuntu:22.04
2 |
3 | ARG DEBIAN_FRONTEND=noninteractive
4 |
5 | # Install tools needed for development
6 | # net-tools, iproute2 and python3 is neccessary for protocol run
7 | RUN apt update && \
8 | apt upgrade --yes
9 |
10 | RUN apt-get install --yes \
11 | net-tools \
12 | iproute2 \
13 | python3 \
14 | libatomic1 \
15 | iperf3 \
16 | iputils-ping \
17 | python3-pip \
18 | iptables \
19 | curl \
20 | wget \
21 | libnl-genl-3-dev \
22 | ncat \
23 | openssh-server \
24 | openssh-client
25 |
26 | RUN pip3 install \
27 | PyYAML==6.0.1 \
28 | psutil>=7.0.0 \
29 | matplotlib>=3.9.2
30 |
31 | # install Perf tools for Azure
32 | RUN apt-get install --yes \
33 | linux-tools-6.2.0-1016-azure \
34 | linux-cloud-tools-6.2.0-1016-azure \
35 | linux-tools-azure \
36 | linux-cloud-tools-azure
37 |
38 |
39 | # Install go
40 | RUN curl -L https://go.dev/dl/go1.23.0.linux-amd64.tar.gz | tar -C /usr/local -xz
41 | ENV GOPATH=/go
42 | ENV PATH=$PATH:/usr/local/go/bin:$GOPATH/bin
43 |
44 | # Install go-gost to $GOPATH
45 | RUN go install github.com/go-gost/gost/cmd/gost@latest
46 |
47 | # Cleanup
48 | RUN rm -rf /var/lib/apt/lists/*
49 |
50 | # override the entry point of containernet w/ absolute path to avoid
51 | # path not found err when changing workdir
52 |
53 | WORKDIR /root
54 |
--------------------------------------------------------------------------------
/src/interfaces/network_mgr.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from enum import IntEnum
3 | from core.topology import ITopology
4 |
5 | # alphabet table
6 | alphabet = ['h', 'i', 'j', 'k', 'l', 'm',
7 | 'n', 'o', 'p', 'q', 'r', 's', 't']
8 |
9 |
10 | class NetworkType(IntEnum):
11 | containernet = 0 # networks constructed by `containernet`
12 | testbed = 1 # networks constructed by real devices
13 | none = 2
14 |
15 |
16 | class INetworkManager(ABC):
17 | """NetworkManager manages multiple network instances.
18 | """
19 |
20 | def __init__(self):
21 | self.networks = []
22 | self.net_num = 0
23 | self.cur_top = None
24 | self.type = NetworkType.none
25 |
26 | def get_type(self):
27 | return self.type
28 |
29 | def get_networks(self):
30 | return self.networks
31 |
32 | @abstractmethod
33 | def get_top_description(self):
34 | pass
35 |
36 | @abstractmethod
37 | def build_networks(self, node_config,
38 | topology: ITopology,
39 | net_num: int,
40 | route: str = "static_route"):
41 | pass
42 |
43 | @abstractmethod
44 | def start_networks(self):
45 | pass
46 |
47 | @abstractmethod
48 | def stop_networks(self):
49 | pass
50 |
51 | @abstractmethod
52 | def reset_networks(self):
53 | pass
54 |
55 | @abstractmethod
56 | def enable_halt(self):
57 | pass
58 |
--------------------------------------------------------------------------------
/src/config/4-hops-linear-network.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "matrix_type": 0,
5 | "description": "This is an adjacency matrix",
6 | "matrix_data": [
7 | [0, 1, 0, 0, 0],
8 | [1, 0, 1, 0, 0],
9 | [0, 1, 0, 1, 0],
10 | [0, 0, 1, 0, 1],
11 | [0, 0, 0, 1, 0]
12 | ]
13 | },
14 | {
15 | "matrix_type": 1,
16 | "description": "This is an value matrix for bandwidth",
17 | "matrix_data": [
18 | [0, 100, 0, 0, 0],
19 | [100, 0, 100, 0, 0],
20 | [0, 100, 0, 100, 0],
21 | [0, 0, 100, 0, 100],
22 | [0, 0, 0, 100, 0]
23 | ]
24 | },
25 | {
26 | "matrix_type": 2,
27 | "description": "This is an value matrix for link loss with 1%",
28 | "matrix_data": [
29 | [0, 1, 0, 0, 0],
30 | [1, 0, 1, 0, 0],
31 | [0, 1, 0, 1, 0],
32 | [0, 0, 1, 0, 1],
33 | [0, 0, 0, 1, 0]
34 | ]
35 | },
36 | {
37 | "matrix_type": 3,
38 | "description": "This is an value matrix for link latency 10ms",
39 | "matrix_data": [
40 | [0, 10, 0, 0, 0],
41 | [10, 0, 10, 0, 0],
42 | [0, 10, 0, 10, 0],
43 | [0, 0, 10, 0, 10],
44 | [0, 0, 0, 10, 0]
45 | ]
46 | },
47 | {
48 | "matrix_type": 4,
49 | "description": "This is an value matrix for link jitter(1ms)",
50 | "matrix_data": [
51 | [0, 1, 0, 0, 0],
52 | [1, 0, 1, 0, 0],
53 | [0, 1, 0, 1, 0],
54 | [0, 0, 1, 0, 1],
55 | [0, 0, 0, 1, 0]
56 | ]
57 | }
58 | ]
59 | }
60 |
--------------------------------------------------------------------------------
/.vscode/settings.json:
--------------------------------------------------------------------------------
1 | {
2 | "[json]": {
3 | "editor.defaultFormatter": "esbenp.prettier-vscode",
4 | "editor.formatOnSave": true
5 | },
6 | "[markdown]": {
7 | "editor.defaultFormatter": "yzhang.markdown-all-in-one",
8 | "editor.formatOnSave": true,
9 | "editor.formatOnPaste": true
10 | },
11 | "[python]": {
12 | "editor.defaultFormatter": "ms-python.autopep8",
13 | "editor.formatOnSave": true
14 | },
15 | "[yaml]": {
16 | "editor.defaultFormatter": "redhat.vscode-yaml",
17 | "editor.formatOnSave": true
18 | },
19 | "yaml.schemas": {
20 | "https://json.schemastore.org/github-workflow.json": [
21 | "*.github/workflows/*.yaml",
22 | "*.github/workflows/*.yml"
23 | ],
24 | "https://json.schemastore.org/github-action.json": [
25 | "action.yaml",
26 | "action.yml"
27 | ]
28 | },
29 | "pylint.args": ["--rcfile=${workspaceFolder}/.pylintrc"],
30 | "cSpell.words": [
31 | "BANDW",
32 | "Baremetal",
33 | "BRTP",
34 | "Containernet",
35 | "datagram",
36 | "dimage",
37 | "fontsize",
38 | "fontweight",
39 | "intf",
40 | "Iperf",
41 | "markersize",
42 | "mininet",
43 | "nhop",
44 | "noqueue",
45 | "olsr",
46 | "openr",
47 | "pkill",
48 | "priomap",
49 | "protosuites",
50 | "pylint",
51 | "qdisc",
52 | "quic",
53 | "shenzhen",
54 | "sshping",
55 | "testsuites"
56 | ],
57 | "python.analysis.typeCheckingMode": "basic",
58 | "python.analysis.autoImportCompletions": true,
59 | "svg.preview.background": "white",
60 | "markdown-preview-github-styles.colorTheme": "light"
61 | }
62 |
--------------------------------------------------------------------------------
/src/config/protocol-flow-competition-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test1:
9 | description: "Flow competition test with multiple protocols"
10 | topology:
11 | config_name: complex_mesh_net
12 | config_file: predefined.topology.yaml
13 | target_protocols: [tcp-bbr]
14 | route: static_bfs
15 | test_tools:
16 | iperf:
17 | interval: 1
18 | interval_num: 60
19 | packet_type: tcp
20 | client_host: 2
21 | server_host: 5
22 | competition_flows:
23 | - flow_type: tcp-bbr
24 | client_host: 0
25 | server_host: 6
26 | delay: 10
27 | duration: 10
28 | - flow_type: tcp-bbr
29 | client_host: 1
30 | server_host: 7
31 | delay: 30
32 | duration: 15
33 | test2:
34 | description: "Flow competition test with multiple protocols"
35 | topology:
36 | config_name: complex_mesh_net
37 | config_file: predefined.topology.yaml
38 | target_protocols: [tcp-bbr]
39 | route: static_bfs
40 | test_tools:
41 | iperf:
42 | interval: 1
43 | interval_num: 60
44 | packet_type: tcp
45 | client_host: 2
46 | server_host: 5
47 | competition_flows:
48 | - flow_type: btp
49 | client_host: 0
50 | server_host: 6
51 | delay: 10
52 | duration: 10
53 | - flow_type: brtp
54 | client_host: 1
55 | server_host: 7
56 | delay: 30
57 | duration: 15
58 |
--------------------------------------------------------------------------------
/.gitattributes:
--------------------------------------------------------------------------------
1 | bin/kcp/client_linux_amd64 filter=lfs diff=lfs merge=lfs -text
2 | bin/kcp/server_linux_amd64 filter=lfs diff=lfs merge=lfs -text
3 | bats/bats_protocol filter=lfs diff=lfs merge=lfs -text
4 | bin/tcp_message/tcp_endpoint filter=lfs diff=lfs merge=lfs -text
5 | bin/ssh/sshping filter=lfs diff=lfs merge=lfs -text
6 | bats/bats_cli filter=lfs diff=lfs merge=lfs -text
7 | src/config/rootfs/bin/server_linux_amd64 filter=lfs diff=lfs merge=lfs -text
8 | src/config/rootfs/bin/sshping filter=lfs diff=lfs merge=lfs -text
9 | src/config/rootfs/bin/tcp_endpoint filter=lfs diff=lfs merge=lfs -text
10 | src/config/rootfs/bin/bats_cli filter=lfs diff=lfs merge=lfs -text
11 | src/config/rootfs/bin/bats_protocol filter=lfs diff=lfs merge=lfs -text
12 | src/config/rootfs/bin/client_linux_amd64 filter=lfs diff=lfs merge=lfs -text
13 | src/config/rootfs/bin/kernel_tc.sh filter=lfs diff=lfs merge=lfs -text
14 | src/config/rootfs/bin/olsrd2_static filter=lfs diff=lfs merge=lfs -text
15 | src/config/rootfs/usr/bin/bats_cli filter=lfs diff=lfs merge=lfs -text
16 | src/config/rootfs/usr/bin/bats_protocol filter=lfs diff=lfs merge=lfs -text
17 | src/config/rootfs/usr/bin/client_linux_amd64 filter=lfs diff=lfs merge=lfs -text
18 | src/config/rootfs/usr/bin/kernel_tc.sh filter=lfs diff=lfs merge=lfs -text
19 | src/config/rootfs/usr/bin/olsrd2_static filter=lfs diff=lfs merge=lfs -text
20 | src/config/rootfs/usr/bin/server_linux_amd64 filter=lfs diff=lfs merge=lfs -text
21 | src/config/rootfs/usr/bin/sshping filter=lfs diff=lfs merge=lfs -text
22 | src/config/rootfs/usr/bin/tcp_endpoint filter=lfs diff=lfs merge=lfs -text
23 | src/config/rootfs/usr/bin/secnetperf filter=lfs diff=lfs merge=lfs -text
24 |
--------------------------------------------------------------------------------
/src/config/protocol-single-hop-test.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 | env:
7 | - ENV_BATS_XXX_INTERVAL: "100"
8 |
9 | tests:
10 | test1:
11 | description: "Test protocol throughput/rtt on single hop"
12 | topology:
13 | config_name: linear_network_1
14 | config_file: predefined.topology.yaml
15 | target_protocols: [btp, tcp-bbr, kcp]
16 | route: static_bfs
17 | test_tools:
18 | iperf:
19 | interval: 1
20 | interval_num: 10
21 | packet_type: tcp
22 | client_host: 0
23 | server_host: 1
24 | execution_mode: parallel # parallel or serial
25 | test2:
26 | description: "Test protocol throughput/rtt on single hop"
27 | topology:
28 | config_name: linear_network_1
29 | config_file: predefined.topology.yaml
30 | target_protocols: [btp, tcp-bbr, kcp]
31 | route: static_route
32 | test_tools:
33 | iperf:
34 | interval: 1
35 | interval_num: 10
36 | packet_type: tcp
37 | client_host: 0
38 | server_host: 1
39 | execution_mode: parallel # parallel or serial
40 | test3:
41 | description: "Test quic throughput on single hop"
42 | topology:
43 | config_name: linear_network_1 # loss 5% 10ms
44 | config_file: predefined.topology.yaml
45 | target_protocols: [msquic]
46 | route: static_route
47 | test_tools:
48 | secnetperf:
49 | args:
50 | - "-target:%s -exec:maxtput -down:10s -ptput:1"
51 | client_host: 0
52 | server_host: 1 # test flow from 0 to 1
53 |
--------------------------------------------------------------------------------
/.github/workflows/.github.ci.yml:
--------------------------------------------------------------------------------
1 | name: CodeQL Pylint Unittest
2 |
3 | on:
4 | push:
5 | branches: ["main"]
6 | pull_request:
7 | branches: ["main"]
8 | schedule:
9 | - cron: "35 15 * * 4"
10 |
11 | jobs:
12 | ci:
13 | name: Continuous Integration
14 | runs-on: ${{ (matrix.language == 'swift' && 'macos-latest') || 'ubuntu-latest' }}
15 | timeout-minutes: ${{ (matrix.language == 'swift' && 120) || 360 }}
16 | permissions:
17 | # required for all workflows
18 | security-events: write
19 | # required to fetch internal or private CodeQL packs
20 | packages: read
21 | # only required for workflows in private repositories
22 | actions: read
23 | contents: read
24 | strategy:
25 | matrix:
26 | language: ["python"]
27 | python-version: ["3.8"]
28 | steps:
29 | - name: Checkout repository
30 | uses: actions/checkout@v4
31 |
32 | - name: Set up Python ${{ matrix.python-version }}
33 | uses: actions/setup-python@v5
34 | with:
35 | python-version: ${{ matrix.python-version }}
36 |
37 | - name: Install dependencies
38 | run: |
39 | python -m pip install --upgrade pip
40 | pip install pylint
41 |
42 | - name: Run Pylint
43 | run: |
44 | pylint --ignore-patterns=".*_unittest.py" --disable=unused-argument $(git ls-files '*.py') --rcfile=${{ github.workspace }}/.pylintrc
45 |
46 | - name: Run Unit tests
47 | run: |
48 | python3 -m unittest discover -s src -p "*_unittest.py"
49 |
50 | - uses: github/codeql-action/init@v3
51 | with:
52 | languages: ${{ matrix.language }}
53 | build-mode: none
54 |
55 | - name: Perform CodeQL Analysis
56 | uses: github/codeql-action/analyze@v3
57 |
--------------------------------------------------------------------------------
/src/config/rootfs/etc/bats-protocol/server.key:
--------------------------------------------------------------------------------
1 | -----BEGIN PRIVATE KEY-----
2 | MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQCR6N0KNRxMxuAT
3 | AuHApx+EJS9Egbhl+A3E8XLtsIDE20f4ogro5DayEFJFeC/4bmID6lmDSlUKaG1D
4 | +ih+uIBQRDLyqnakqRdmPnHWqPyzkXrNrcDKVxKemD+/Va28ewebEaHuF/W5Cge9
5 | f1890LMp2jd1yS4q6hYFpJ1RDLP4hJXH8+smvsumdRmiuITyRiQ8D0zy6jIrsnOR
6 | rnXjK12w6RHd+aulBXpIFEC7gOXApd8Pef1NF6Mea1UT2a2pnOzwkxzOYaljVApZ
7 | Vse+Y7UFnOZGbwfP9xSMmYNm5QVu2KSNQLfwH3AspNnCp3UtGMS6qSdEeVs0hbbt
8 | 5XVwhJ2fAgMBAAECggEAEXd0KSn9Hp5wFIEzz9bcdE/p6KQ0FCuC+nXXnDyU2FFS
9 | ZvZ+99eEn1b016bGQfx0yfiJM/5MpSz7Ejgla4YKq5EpHYOVJautnBUS65ovJg/e
10 | 84rpQ/pIcFPUIxjOmNGMMjQE08u/zqGKFlU5Ft1KP/2vcJo0vKO+dHklm/OOrSEu
11 | cdwJ9QxL7R2LQ/piblanDTqL2T/FvB8R0CulL68Oc7m9QGhHwib6LMWJIYe9GdBA
12 | eQ54LiUNI98wpgplBuFENCNV+aHcpDcuGTLCuA1NpcnG0yI6L7DUcUutT7Ek8F8i
13 | j+Y1yTTjOT0UuXdzrLfU+ltrS4fzUrJlym83Eob0gQKBgQDBnXZVaZYsYcyFWaeE
14 | V3NRCEJAHpadnVRoQo0scDD6/VXJjCWlicNB2inpsx8hcKhPVn8dxlOV2z5rA7AR
15 | VAOSNDC2Da9SA3416enaYmH+jSugy6yBGByYDNVW7vZ417MqxmB667wJZZj7vFAd
16 | uPbJOz9OHxAEhSsXbngPbEov1QKBgQDA7F4MN/6mGKrqjiWAwVYK8JKgodOGxIIi
17 | 0/+yt2hvJBSOTBQmIsiEtO353pYFWXl77WXcrapGZswt8E1yswXDJv5Z+SzUIWhJ
18 | 5N5a0f/JVslEiUHUnyEuo0PIucQbxMifDRKFm3J8Egs81QqDWPTaXD8GsIfU27gm
19 | IJdKdnUFowKBgGrcCHKtuve15pZ23BEb3waLQ112RHSrX+nHSXmMVkxAxDdgBWsu
20 | 9LiEu3tDpFvlfM+FEtWRH5LnFbUkrOlzcCEicvwX43qleOQyvSIOCL3gloLBxuzd
21 | LPLyqlC3xQr/KeDi6l8NepPJIj1WOvWcn4CDFbZ9SG7mPNlK3DLO9scdAoGBAJED
22 | mxX4KRPUyvSQcKQ9r119w9m2inds0p03WaaOspOIPWNQ0Hxx/OOz1IXTyFSxEt4S
23 | blTYvXs53hRR4wVqh9RPRTWAVTAHBIs22+TMoUZIU/WObstR6fFSi6sf58cvvdXA
24 | T8HaPM4CCixpvwNIJJTWbhGrNA7OMbeHG/G+5OudAoGBAJYawqLa1TCXQCaFEVqu
25 | 8cVK1RsYHwFBL8UnOHtKhlqxYe1I0yknix3UzdQqqF23E5i+XCE7zuQIIJl7smmc
26 | xsYA7iR3sd+Qykou0DdcHTMZF65ZPWXN6aPJVOs0VIXBQflXFnVspz/PlIkdwymS
27 | i6z9N937R+IvPDHhqeRgb6sx
28 | -----END PRIVATE KEY-----
29 |
--------------------------------------------------------------------------------
/src/config/protocol-performance-comparison.yaml:
--------------------------------------------------------------------------------
1 | # test case yaml file is only visible to the `run_test.py`
2 | containernet:
3 | node_config:
4 | config_name: default
5 | config_file: predefined.node_config.yaml
6 |
7 | tests:
8 | test2:
9 | description: "Test protocol throughput/rtt on single hop"
10 | topology:
11 | config_name: linear_network_1
12 | config_file: predefined.topology.yaml
13 | target_protocols: [btp, brtp_proxy, kcp, tcp-bbr]
14 | allow_failure: true
15 | route: static_route
16 | test_tools:
17 | iperf:
18 | interval: 1
19 | interval_num: 40
20 | client_host: 0
21 | server_host: 1
22 | rtt:
23 | interval: 0.01
24 | packet_count: 2000
25 | packet_size: 512
26 | client_host: 0
27 | server_host: 1
28 | test3:
29 | description: "Test protocol throughput/rtt on multiple hops"
30 | topology:
31 | config_name: linear_network_3
32 | config_file: predefined.topology.yaml
33 | target_protocols: [btp, brtp_proxy, kcp, tcp-bbr]
34 | allow_failure: true
35 | route: static_route
36 | test_tools:
37 | iperf:
38 | interval: 1
39 | interval_num: 40
40 | client_host: 0
41 | server_host: 3
42 | rtt:
43 | interval: 0.01
44 | packet_count: 2000
45 | packet_size: 512
46 | client_host: 0
47 | server_host: 3
48 | test4:
49 | description: "Long fat network test"
50 | topology:
51 | config_name: long_fat_network
52 | config_file: predefined.topology.yaml
53 | target_protocols: [brtp_proxy, tcp-cubic, tcp-bbr]
54 | allow_failure: true
55 | route: static_route
56 | test_tools:
57 | iperf:
58 | interval: 1
59 | interval_num: 40
60 | client_host: 0
61 | server_host: 1
62 |
--------------------------------------------------------------------------------
/src/config/predefined.protocols.yaml:
--------------------------------------------------------------------------------
1 | protocols:
2 | - name: tcp-cubic
3 | version: cubic
4 | type: distributed
5 | - name: tcp-reno
6 | version: reno
7 | type: distributed
8 | - name: tcp-bbr
9 | version: bbr
10 | type: distributed
11 | - name: btp
12 | type: distributed
13 | bin: bats_protocol
14 | args:
15 | - "--daemon_enabled=true"
16 | - "--api_enabled=true"
17 | - "--tun_protocol=BTP"
18 | version: latest
19 | - name: brtp
20 | type: distributed
21 | bin: bats_protocol
22 | args:
23 | - "--daemon_enabled=true"
24 | - "--api_enabled=true"
25 | - "--tun_protocol=BRTP"
26 | version: latest
27 | - name: brtp_proxy
28 | type: distributed
29 | bin: bats_protocol
30 | args:
31 | - "--daemon_enabled=true"
32 | - "--api_enabled=true"
33 | version: latest
34 | - name: kcp
35 | type: none_distributed
36 | args: # global args for kcp
37 | - "-mode fast3"
38 | - "--datashard 10 --parityshard 3 -nocomp"
39 | - "-sockbuf 16777217 -dscp 46 --crypt=none"
40 | port: 10100 # kcp protocol forward port
41 | protocols: # iterative
42 | - name: kcp_client
43 | bin: client_linux_amd64
44 | args:
45 | - "-r %s:4000 -l :10100" # %s is the ip of kcp_server
46 | - name: kcp_server
47 | bin: server_linux_amd64
48 | args:
49 | - "-l :4000 -t 127.0.0.1:10100" # by default, it forwards the traffic to localhost:10100
50 | - name: msquic
51 | type: distributed
52 | version: bbr
53 | bin: secnetperf
54 | args:
55 | - "-exec:maxtput"
56 | - name: btp-next
57 | type: none_distributed
58 | args:
59 | - "-m 0"
60 | - name: brtp-next
61 | type: none_distributed
62 | args:
63 | - "-m 1"
64 | - name: tcp-next # TCP over TCP.
65 | type: none_distributed
66 | args:
67 | - "-m 10"
68 |
--------------------------------------------------------------------------------
/src/testsuites/test_ping.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from interfaces.network import INetwork
4 | from protosuites.proto_info import IProtoInfo
5 | from .test import (ITestSuite)
6 |
7 |
8 | class PingTest(ITestSuite):
9 | def post_process(self):
10 | return True
11 |
12 | def pre_process(self):
13 | return True
14 |
15 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
16 | hosts = network.get_hosts()
17 | hosts_num = len(hosts)
18 | if self.config.client_host is None or self.config.server_host is None:
19 | for i in range(hosts_num):
20 | if i == 0:
21 | continue
22 | logging.info(
23 | f"############### Oasis PingTest from "
24 | "%s to %s ###############", hosts[i].name(), hosts[0].name())
25 | res = hosts[i].cmd(f'ping -W 1 -c {self.config.interval_num} -i {self.config.interval} '
26 | f'{hosts[i].IP()}'
27 | f' > {self.result.record}')
28 | logging.info('host %s', res)
29 | if "100% packet loss" in res:
30 | logging.error("Ping test failed")
31 | return False
32 | return True
33 | # Run ping test from client to server
34 | logging.info(
35 | f"############### Oasis PingTest from "
36 | "%s to %s ###############",
37 | hosts[self.config.client_host].name(),
38 | hosts[self.config.server_host].name())
39 | res = hosts[self.config.client_host].popen(
40 | f'ping -W 1 -c {self.config.interval_num} -i {self.config.interval} '
41 | f'{hosts[self.config.server_host].IP()}').stdout.read().decode('utf-8')
42 | logging.info('host %s', res)
43 | if "100% packet loss" in res:
44 | logging.error("Ping test failed")
45 | return False
46 | return True
47 |
--------------------------------------------------------------------------------
/src/tools/util.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 |
4 | def is_same_path(file_path1, file_path2):
5 | # check if file_path1 is the same as file_path2
6 | # return True if they are the same, otherwise False
7 | # file_path1 may contains `//`, remove them first
8 | file_path1 = os.path.normpath(file_path1)
9 | file_path2 = os.path.normpath(file_path2)
10 | if file_path1.endswith('/'):
11 | file_path1 = file_path1[:-1]
12 | if file_path2.endswith('/'):
13 | file_path2 = file_path2[:-1]
14 | return file_path1 == file_path2
15 |
16 |
17 | def is_base_path(file_path1, file_path2):
18 | """
19 | check whether `file_path1` is the base path of `file_path2`;
20 | for example: /root is base path of /root/a/b/c
21 | """
22 | file_path1 = os.path.normpath(file_path1)
23 | file_path2 = os.path.normpath(file_path2)
24 | if file_path1.endswith('/'):
25 | file_path1 = file_path1[:-1]
26 | if file_path2.endswith('/'):
27 | file_path2 = file_path2[:-1]
28 | return file_path2.startswith(file_path1)
29 |
30 |
31 | def str_to_mbps(x, unit):
32 | ret = 0.00
33 | if unit == "K":
34 | ret = float(x) / 1000
35 | elif unit == "M":
36 | ret = float(x)
37 | elif unit == "G":
38 | ret = float(x) * 1000
39 | elif unit == "":
40 | ret = float(x) / 1000000
41 | return round(ret, 2)
42 |
43 |
44 | def parse_test_file_name(test_file_path_string):
45 | """
46 | Parse the test YAML file string to extract the file path and test name.
47 | for example: `test.yaml:test1` will be parsed to `test.yaml` and `test1`
48 |
49 | return value: test_file_path, test_name
50 | """
51 | if not test_file_path_string or test_file_path_string == ':':
52 | return None, None
53 |
54 | temp_list = test_file_path_string.split(":")
55 | if len(temp_list) not in [1, 2]:
56 | return None, None
57 | if len(temp_list) == 2:
58 | return temp_list[0], temp_list[1]
59 | return test_file_path_string, None
60 |
--------------------------------------------------------------------------------
/.pylintrc:
--------------------------------------------------------------------------------
1 | [MASTER]
2 | # Specify the directory of Python source code for the project
3 | init-hook='import sys; sys.path.append("./containernet_test")'
4 | # Maximum number of lines Python code can execute
5 | max-line-length=120
6 | # ignore=a.py,b.py
7 |
8 | [MESSAGES CONTROL]
9 | # Disabled message numbers or message names
10 | disable=
11 | C0103, # Variable name does not conform to naming convention
12 | C0111, # Missing function docstring
13 | R0903, # Too few methods in class, not recommended to define class
14 | R0902, # Too many instance attributes
15 | W0703, # Caught too broad an exception
16 | R0801, # similar line in 2 file
17 | E0611, # no name in module
18 | R0915, # Too many statements
19 | R0912, # Too many branches
20 | R0914, # Too many local variables
21 | W1309, # Using an f-string that does not have any interpolated variables
22 | C0209, # Formatting a regular string which could be a f-string
23 | F0001, # No module
24 | W0105, # String statement has no effect
25 | C0201, # Consider iterating the dictionary directly instead of calling .keys()
26 | E1111, # Assigning result of a function call, where the function has no return
27 | W0622, # Redefining built-in 'type'
28 | R0911, # Too many return statements
29 | C0200, # Consider using enumerate instead of iterating with range and len
30 | R1702, # Too many nested blocks
31 | E0401, # Unable to import
32 | W0719, # Raising too general exception
33 |
34 | # Enabled message numbers or message names
35 | enable=
36 | W0611, # Unused import
37 | W0612, # Unused variable
38 |
39 | [REPORTS]
40 | # Output reports
41 | reports=false
42 | # Output score (0-10)
43 | score=yes
44 |
45 | [REFACTORING]
46 | # Number of members in a class
47 | max-attributes=20
48 | # Number of arguments in a function or method
49 | max-args=10
50 |
51 | [FORMAT]
52 | # Maximum number of lines allowed in a module
53 | max-module-lines=2000
54 | # Maximum number of attributes allowed in a class
55 | max-attributes=60
56 | # Type of indent for multi-line comments
57 | indent-string=' '
58 |
--------------------------------------------------------------------------------
/docs/tc-strategy.md:
--------------------------------------------------------------------------------
1 | # Traffic control (tc)
2 |
3 | ## tc in Linux
4 |
5 | In Linux, the traffic control for shaping the flow is implemented by the `tc` command. See details in [tc(8)](https://www.man7.org/linux/man-pages/man8/tc.8.html)
6 |
7 | ## shaping strategy in Oasis
8 |
9 |
10 |

11 | Fig 1.1 Traffic shaping strategy
12 |
13 | ## tc example
14 |
15 | Target:
16 |
17 | - Limit the rate from `h0` to `h1` to 100Mbit/s
18 | - For direction `h1` to `h0`, add 5% packet loss, 10ms delay.
19 |
20 | Topology:
21 |
22 | ```bash
23 | h0 (eth0) --- (eth0) h1
24 | ```
25 |
26 | on host `h0`, set the rate limit on `eth0`:
27 |
28 | ```bash
29 | tc qdisc add dev eth0 root handle 5: tbf rate 100.0Mbit burst 150kb latency 1ms
30 | ```
31 |
32 | on host `h1`, we create an `ifb` interface `ifb0`:
33 |
34 | ```bash
35 | ip link add name ifb0 type ifb
36 | ip link set ifb0 up
37 | ```
38 |
39 | Then, redirect the ingress ip traffic from `eth0` to `ifb0`:
40 |
41 | ```bash
42 | tc qdisc add dev eth0 ingress
43 | tc filter add dev h1-eth0 parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev ifb0
44 | ```
45 |
46 | Finally, apply the traffic shaping(loss, delay, jitter) on `ifb0`:
47 |
48 | ```bash
49 | tc qdisc add dev ifb0 root netem loss 5% delay 10ms limit 20000000
50 | ```
51 |
52 | In order to have a better simulation result for latency, a larger buffer/queue size is preferred(specified by `limit 20000000` in tc command) otherwise tc will drop more packets when buffer is overwhelmed.
53 |
54 | ## tc_rules script
55 |
56 | `./src/tools/tc_rules.sh` can be used to apply or unset the above tc rules for a topology H0-H1.
57 |
58 | ```bash
59 | # run `tc_rules.sh eth0 set` on host H0 to apply the tc rules:
60 | sudo ./src/tools/tc_rules.sh eth0 set
61 |
62 | # run `tc_rules.sh eth0 set` on host H1 to apply the tc rules:
63 | sudo ./src/tools/tc_rules.sh eth0 set
64 | ```
65 |
66 | To unset the rules, run:
67 |
68 | ```bash
69 | sudo ./src/tools/tc_rules.sh eth0 unset
70 | ```
--------------------------------------------------------------------------------
/src/config/mesh-network.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "matrix_type": 0,
5 | "description": "This is an adjacency matrix",
6 | "matrix_data": [
7 | [0, 0, 0, 1, 0, 0, 0, 0],
8 | [0, 0, 0, 1, 0, 0, 0, 0],
9 | [0, 0, 0, 1, 0, 0, 0, 0],
10 | [1, 1, 1, 0, 1, 0, 0, 0],
11 | [0, 0, 0, 1, 0, 1, 1, 1],
12 | [0, 0, 0, 0, 1, 0, 0, 0],
13 | [0, 0, 0, 0, 1, 0, 0, 0],
14 | [0, 0, 0, 0, 1, 0, 0, 0]
15 | ]
16 | },
17 | {
18 | "matrix_type": 1,
19 | "description": "This is an value matrix for bandwidth(100Mbps)",
20 | "matrix_data": [
21 | [0, 0, 0, 100, 0, 0, 0, 0],
22 | [0, 0, 0, 100, 0, 0, 0, 0],
23 | [0, 0, 0, 100, 0, 0, 0, 0],
24 | [100, 100, 100, 0, 100, 0, 0, 0],
25 | [0, 0, 0, 100, 0, 100, 100, 100],
26 | [0, 0, 0, 0, 100, 0, 0, 0],
27 | [0, 0, 0, 0, 100, 0, 0, 0],
28 | [0, 0, 0, 0, 100, 0, 0, 0]
29 | ]
30 | },
31 | {
32 | "matrix_type": 2,
33 | "description": "This is an value matrix for link loss with 3%",
34 | "matrix_data": [
35 | [0, 0, 0, 3, 0, 0, 0, 0],
36 | [0, 0, 0, 3, 0, 0, 0, 0],
37 | [0, 0, 0, 3, 0, 0, 0, 0],
38 | [3, 3, 3, 0, 3, 0, 0, 0],
39 | [0, 0, 0, 3, 0, 3, 3, 3],
40 | [0, 0, 0, 0, 3, 0, 0, 0],
41 | [0, 0, 0, 0, 3, 0, 0, 0],
42 | [0, 0, 0, 0, 3, 0, 0, 0]
43 | ]
44 | },
45 | {
46 | "matrix_type": 3,
47 | "description": "This is an value matrix for link latency 10ms",
48 | "matrix_data": [
49 | [0, 0, 0, 10, 0, 0, 0, 0],
50 | [0, 0, 0, 10, 0, 0, 0, 0],
51 | [0, 0, 0, 10, 0, 0, 0, 0],
52 | [10, 10, 10, 0, 10, 0, 0, 0],
53 | [0, 0, 0, 10, 0, 10, 10, 10],
54 | [0, 0, 0, 0, 10, 0, 0, 0],
55 | [0, 0, 0, 0, 10, 0, 0, 0],
56 | [0, 0, 0, 0, 10, 0, 0, 0]
57 | ]
58 | },
59 | {
60 | "matrix_type": 4,
61 | "description": "This is an value matrix for link jitter(0ms)",
62 | "matrix_data": [
63 | [0, 0, 0, 0, 0, 0, 0, 0],
64 | [0, 0, 0, 0, 0, 0, 0, 0],
65 | [0, 0, 0, 0, 0, 0, 0, 0],
66 | [0, 0, 0, 0, 0, 0, 0, 0],
67 | [0, 0, 0, 0, 0, 0, 0, 0],
68 | [0, 0, 0, 0, 0, 0, 0, 0],
69 | [0, 0, 0, 0, 0, 0, 0, 0],
70 | [0, 0, 0, 0, 0, 0, 0, 0]
71 | ]
72 | }
73 | ]
74 | }
75 |
--------------------------------------------------------------------------------
/src/config/mesh-network-no-loss.json:
--------------------------------------------------------------------------------
1 | {
2 | "data": [
3 | {
4 | "matrix_type": 0,
5 | "description": "This is an adjacency matrix",
6 | "matrix_data": [
7 | [0, 0, 0, 1, 0, 0, 0, 0],
8 | [0, 0, 0, 1, 0, 0, 0, 0],
9 | [0, 0, 0, 1, 0, 0, 0, 0],
10 | [1, 1, 1, 0, 1, 0, 0, 0],
11 | [0, 0, 0, 1, 0, 1, 1, 1],
12 | [0, 0, 0, 0, 1, 0, 0, 0],
13 | [0, 0, 0, 0, 1, 0, 0, 0],
14 | [0, 0, 0, 0, 1, 0, 0, 0]
15 | ]
16 | },
17 | {
18 | "matrix_type": 1,
19 | "description": "This is an value matrix for bandwidth(100Mbps)",
20 | "matrix_data": [
21 | [0, 0, 0, 100, 0, 0, 0, 0],
22 | [0, 0, 0, 100, 0, 0, 0, 0],
23 | [0, 0, 0, 100, 0, 0, 0, 0],
24 | [100, 100, 100, 0, 100, 0, 0, 0],
25 | [0, 0, 0, 100, 0, 100, 100, 100],
26 | [0, 0, 0, 0, 100, 0, 0, 0],
27 | [0, 0, 0, 0, 100, 0, 0, 0],
28 | [0, 0, 0, 0, 100, 0, 0, 0]
29 | ]
30 | },
31 | {
32 | "matrix_type": 2,
33 | "description": "This is an value matrix for link loss with 0%",
34 | "matrix_data": [
35 | [0, 0, 0, 0, 0, 0, 0, 0],
36 | [0, 0, 0, 0, 0, 0, 0, 0],
37 | [0, 0, 0, 0, 0, 0, 0, 0],
38 | [0, 0, 0, 0, 0, 0, 0, 0],
39 | [0, 0, 0, 0, 0, 0, 0, 0],
40 | [0, 0, 0, 0, 0, 0, 0, 0],
41 | [0, 0, 0, 0, 0, 0, 0, 0],
42 | [0, 0, 0, 0, 0, 0, 0, 0]
43 | ]
44 | },
45 | {
46 | "matrix_type": 3,
47 | "description": "This is an value matrix for link latency 10ms",
48 | "matrix_data": [
49 | [0, 0, 0, 10, 0, 0, 0, 0],
50 | [0, 0, 0, 10, 0, 0, 0, 0],
51 | [0, 0, 0, 10, 0, 0, 0, 0],
52 | [10, 10, 10, 0, 10, 0, 0, 0],
53 | [0, 0, 0, 10, 0, 10, 10, 10],
54 | [0, 0, 0, 0, 10, 0, 0, 0],
55 | [0, 0, 0, 0, 10, 0, 0, 0],
56 | [0, 0, 0, 0, 10, 0, 0, 0]
57 | ]
58 | },
59 | {
60 | "matrix_type": 4,
61 | "description": "This is an value matrix for link jitter(0ms)",
62 | "matrix_data": [
63 | [0, 0, 0, 0, 0, 0, 0, 0],
64 | [0, 0, 0, 0, 0, 0, 0, 0],
65 | [0, 0, 0, 0, 0, 0, 0, 0],
66 | [0, 0, 0, 0, 0, 0, 0, 0],
67 | [0, 0, 0, 0, 0, 0, 0, 0],
68 | [0, 0, 0, 0, 0, 0, 0, 0],
69 | [0, 0, 0, 0, 0, 0, 0, 0],
70 | [0, 0, 0, 0, 0, 0, 0, 0]
71 | ]
72 | }
73 | ]
74 | }
75 |
--------------------------------------------------------------------------------
/src/testsuites/test_sshping.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from interfaces.network import INetwork
3 | from protosuites.proto_info import IProtoInfo
4 | from .test import (ITestSuite, TestConfig)
5 |
6 |
7 | class SSHPingTest(ITestSuite):
8 | """Measures the RTT of ssh ping message between two hosts in the network.
9 | SSHPingTest https://github.com/spook/sshping
10 | """
11 |
12 | def __init__(self, config: TestConfig) -> None:
13 | super().__init__(config)
14 | self.binary_path = "sshping"
15 |
16 | def post_process(self):
17 | return True
18 |
19 | def pre_process(self):
20 | return True
21 |
22 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
23 | hosts = network.get_hosts()
24 | if hosts is None:
25 | logging.error("No host found in the network")
26 | return False
27 | hosts_num = len(hosts)
28 | receiver_ip = None
29 | if self.config.client_host is None or self.config.server_host is None:
30 | for i in range(hosts_num):
31 | if i == 0:
32 | continue
33 | tun_ip = proto_info.get_tun_ip(
34 | network, 0)
35 | if tun_ip == "":
36 | tun_ip = hosts[0].IP()
37 | receiver_ip = tun_ip
38 | logging.info(
39 | f"############### Oasis SSHPingTest from "
40 | "%s to %s ###############", hosts[i].name(), hosts[0].name())
41 | hosts[i].cmd(
42 | f'{self.binary_path} -i /root/.ssh/id_rsa'
43 | f' -H root@{receiver_ip} > {self.result.record}')
44 | return True
45 | # Run ping test from client to server
46 | logging.info(
47 | f"############### Oasis SSHPingTest from "
48 | "%s to %s ###############",
49 | hosts[self.config.client_host].name(),
50 | hosts[self.config.server_host].name())
51 | tun_ip = proto_info.get_tun_ip(
52 | network, self.config.server_host)
53 | if tun_ip == "":
54 | tun_ip = hosts[self.config.server_host].IP()
55 | receiver_ip = tun_ip
56 | hosts[self.config.client_host].cmd(
57 | f'{self.binary_path} -i /root/.ssh/id_rsa'
58 | f' -H root@{receiver_ip} > {self.result.record}')
59 | return True
60 |
--------------------------------------------------------------------------------
/src/core/mesh_topology.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import copy
3 | from .topology import (ITopology, MatrixType)
4 |
5 |
6 | class MeshTopology(ITopology):
7 | def __init__(self, base_path: str, top_config, init_all_mats=True):
8 | super().__init__(base_path, top_config, init_all_mats)
9 | if init_all_mats is True:
10 | self.__init_topologies()
11 |
12 | def description(self) -> str:
13 | nodes_num = len(self.all_mats[MatrixType.ADJACENCY_MATRIX][0])
14 | description = f"Mesh {nodes_num} nodes \n"
15 | latency = self.all_mats[MatrixType.LATENCY_MATRIX][0][1]
16 | bandwidth = self.all_mats[MatrixType.BW_MATRIX][0][1]
17 | for i in range(nodes_num):
18 | for j in range(nodes_num):
19 | if self.all_mats[MatrixType.BW_MATRIX][i][j] > 0:
20 | latency = self.all_mats[MatrixType.LATENCY_MATRIX][i][j]
21 | bandwidth = self.all_mats[MatrixType.BW_MATRIX][i][j]
22 | break
23 | description += f"latency {latency}ms,"
24 | description += f"bandwidth {bandwidth}Mbps."
25 | return description
26 |
27 | def generate_adj_matrix(self, num_of_nodes: int):
28 | pass
29 |
30 | def generate_other_matrices(self, adj_matrix):
31 | pass
32 |
33 | def __init_topologies(self):
34 | self.compound_top = True
35 | # purpose: for iterating through multiple topologies
36 | the_unique_topology = MeshTopology(self.conf_base_path,
37 | self.top_config, False) # don't init all mats
38 | the_unique_topology.all_mats[MatrixType.ADJACENCY_MATRIX] = copy.deepcopy(
39 | self.all_mats[MatrixType.ADJACENCY_MATRIX])
40 | the_unique_topology.all_mats[MatrixType.LOSS_MATRIX] = copy.deepcopy(
41 | self.all_mats[MatrixType.LOSS_MATRIX])
42 | the_unique_topology.all_mats[MatrixType.LATENCY_MATRIX] = copy.deepcopy(
43 | self.all_mats[MatrixType.LATENCY_MATRIX])
44 | the_unique_topology.all_mats[MatrixType.JITTER_MATRIX] = copy.deepcopy(
45 | self.all_mats[MatrixType.JITTER_MATRIX])
46 | the_unique_topology.all_mats[MatrixType.BW_MATRIX] = copy.deepcopy(
47 | self.all_mats[MatrixType.BW_MATRIX])
48 | logging.info(
49 | "Added MeshTopology %s", the_unique_topology.all_mats)
50 | self.topologies.append(the_unique_topology)
51 |
--------------------------------------------------------------------------------
/src/protosuites/cs_protocol.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from interfaces.network import INetwork
3 | from protosuites.proto import (ProtoConfig, IProtoSuite, ProtoRole)
4 |
5 |
6 | class CSProtocol(IProtoSuite):
7 | def __init__(self, config: ProtoConfig, client: IProtoSuite, server: IProtoSuite):
8 | super().__init__(config, False, ProtoRole.both)
9 | self.client = client
10 | self.server = server
11 | if self.config.hosts is None or len(self.config.hosts) != 2:
12 | logging.error(
13 | "Test non-distributed protocols, but protocol server/client hosts are not set correctly.")
14 | return
15 | self.client.get_config().hosts = [self.config.hosts[0]]
16 | self.server.get_config().hosts = [self.config.hosts[1]]
17 | # rewrite the protocol_args of client and server
18 | self.client.protocol_args += self.protocol_args
19 | self.server.protocol_args += self.protocol_args
20 | logging.info("CSProtocol config %s",
21 | self.config)
22 | logging.info("client protocol %s args: %s",
23 | self.client.config.name, self.client.protocol_args)
24 | logging.info("server protocol %s args: %s",
25 | self.server.config.name, self.server.protocol_args)
26 |
27 | def is_distributed(self) -> bool:
28 | return False
29 |
30 | def is_noop(self) -> bool:
31 | return self.client.is_noop() and self.server.is_noop()
32 |
33 | def post_run(self, network: INetwork):
34 | return self.client.post_run(network) and self.server.post_run(network)
35 |
36 | def pre_run(self, network: INetwork):
37 | if self.client.pre_run(network):
38 | return self.server.pre_run(network)
39 | return False
40 |
41 | def run(self, network: INetwork):
42 | return self.client.run(network) \
43 | and self.server.run(network)
44 |
45 | def stop(self, network: INetwork):
46 | return self.client.stop(network) and self.server.stop(network)
47 |
48 | def get_forward_port(self) -> int:
49 | return self.client.get_forward_port()
50 |
51 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str:
52 | return self.client.get_tun_ip(network, host_id)
53 |
54 | def get_protocol_name(self) -> str:
55 | return self.config.name
56 |
57 | def get_protocol_args(self, network: INetwork) -> str:
58 | return self.protocol_args
59 |
--------------------------------------------------------------------------------
/src/data_analyzer/first_rtt_analyzer.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import re
3 | import os
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from .analyzer import IDataAnalyzer
7 |
8 |
9 | class FirstRTTAnalyzer(IDataAnalyzer):
10 | """Analyze and visualize multiple input rtt logs.
11 | """
12 |
13 | def analyze(self):
14 | return True
15 |
16 | def visualize(self):
17 | """
18 | plot first rtt graph
19 | """
20 | plt.clf()
21 | plt.rcParams['font.family'] = 'serif'
22 | plt.xlabel('Time (ms)', fontsize=8,
23 | fontweight='bold')
24 | plt.ylabel('RTT (ms)', fontsize=8,
25 | fontweight='bold')
26 | default_title = "The first TCP message RTT\n"
27 | default_title += self.config.subtitle
28 | plt.title(default_title, fontsize=10, fontweight="bold")
29 | data_first_rtt = {}
30 | for input_log in self.config.input:
31 | logging.info(f"Visualize rtt log: %s", input_log)
32 | # input_log format /a/b/c/iperf3_h1_h2.log
33 | log_base_name = os.path.basename(input_log)
34 | first_rtt_list = []
35 | with open(f"{input_log}", "r", encoding='utf-8') as f:
36 | lines = f.readlines()
37 | for line in lines:
38 | first_rtt = re.findall(
39 | r'First response from the server, rtt = (\d+)', line)
40 | if len(first_rtt) != 0:
41 | first_rtt_list.append(int(first_rtt[0]))
42 | if len(first_rtt_list) == 0:
43 | logging.warning(
44 | "No first rtt data found in the log file %s", input_log)
45 | continue
46 | # first_rtt_agv
47 | first_rtt_agv = sum(first_rtt_list) / len(first_rtt_list)
48 | log_label = log_base_name.split("_")[0]
49 | data_first_rtt[log_label] = first_rtt_agv
50 | x = np.array(list(data_first_rtt.keys()))
51 | y = np.array(list(data_first_rtt.values()))
52 | plt.bar(x, y, width=0.8, bottom=None)
53 | plt.xticks(rotation=30, fontsize=5)
54 |
55 | # save the plot to svg file
56 | if not self.config.output:
57 | self.config.output = "first_rtt.svg"
58 | plt.savefig(f"{self.config.output}")
59 | logging.info("Visualize first rtt diagram saved to %s",
60 | self.config.output)
61 |
--------------------------------------------------------------------------------
/src/containernet/containernet_host.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from interfaces.host import IHost
3 | from var.global_var import g_root_path # pylint: disable=W0611
4 |
5 |
6 | class ContainernetHostAdapter(IHost):
7 | def __init__(self, containernet_host):
8 | self.containernet_host = containernet_host
9 |
10 | def is_connected(self) -> bool:
11 | return True
12 |
13 | def cmd(self, command: str) -> str:
14 | """Execute a command on the host and print the output.
15 | """
16 | return self.containernet_host.cmd(command)
17 |
18 | def cmdPrint(self, command: str) -> str:
19 | """Execute a command on the host and print the output.
20 | """
21 | return self.containernet_host.cmdPrint(command)
22 |
23 | def name(self) -> str:
24 | """Get the name of the host.
25 | """
26 | return self.containernet_host.name
27 |
28 | def IP(self) -> str:
29 | """Get the IP address of the host.
30 | """
31 | return self.containernet_host.IP()
32 |
33 | def deleteIntfs(self):
34 | """Delete all interfaces.
35 | """
36 | return self.containernet_host.deleteIntfs()
37 |
38 | def getIntfs(self):
39 | return self.containernet_host.intfList()
40 |
41 | def cleanup(self):
42 | """Cleanup the host.
43 | """
44 | # clean up all tc qdisc rules.
45 | for intf in self.containernet_host.intfList():
46 | logging.debug(
47 | f"clean up tc qdisc on host %s, interface %s",
48 | self.containernet_host.name, intf.name)
49 | tc_output = self.containernet_host.cmd(
50 | f'tc qdisc show dev {intf.name}')
51 | if "priomap" not in tc_output and "noqueue" not in tc_output:
52 | self.containernet_host.cmd(
53 | f'tc qdisc del dev {intf.name} root')
54 | intf_port = intf.name[-1]
55 | if intf_port.isdigit():
56 | ifb = f'ifb{intf_port}'
57 | tc_output = self.containernet_host.cmd(
58 | f'tc qdisc show dev {ifb}')
59 | if "priomap" not in tc_output and "noqueue" not in tc_output:
60 | self.containernet_host.cmd(
61 | f'tc qdisc del dev {ifb} root')
62 | return self.containernet_host.cleanup()
63 |
64 | def get_host(self):
65 | return self.containernet_host
66 |
67 | def popen(self, command):
68 | return self.containernet_host.popen(command)
69 |
--------------------------------------------------------------------------------
/docs/flow_competition_test.md:
--------------------------------------------------------------------------------
1 | ## The complex topology
2 |
3 | The required topology for flow competition test is as follows:
4 |
5 |
6 |

7 | Fig 1.1 topology for flow competition test
8 |
9 | The test case configuration is as follows:
10 |
11 | ```yaml
12 | tests:
13 | test1:
14 | description: "Flow competition test with multiple protocols"
15 | topology:
16 | config_name: complex_mesh_net
17 | config_file: predefined.topology.yaml
18 | target_protocols: [tcp-bbr]
19 | route: static_bfs
20 | test_tools:
21 | iperf:
22 | interval: 1
23 | interval_num: 10
24 | packet_type: tcp
25 | client_host: 2
26 | server_host: 5
27 | competition_flows:
28 | - flow_type: tcp # flow type
29 | client_host: 0 # the source node of the flow.
30 | server_host: 6 # the sink node the of flow.
31 | delay: 10 # the flow will be generated by delay 10 seconds.
32 | duration: 10 # the flow will last for 10 seconds.
33 | ```
34 |
35 | The configuration examples are in `test/protocol-throughput-test-with-competition.yaml`.
36 |
37 | - Supported `flow_type` in `competition_flows` section:
38 | - tcp (use `iperf3` as the flow generator)
39 | - btp (use `bats_iperf` as the flow generator)
40 | - brtp (use `bats_iperf` as the flow generator)
41 | - [protocol]-[cc]: tcp-bbr, tcp-cubic
42 |
43 | - Supported `test_tools` which has no conflicts with `competition_flows`:
44 | - iperf
45 | - bats_iperf
46 |
47 | In `competition_flows` section, it's allowed to add multiple flows.
48 |
49 | ### **Adjacency Matrix (8x8):**
50 |
51 | ```yaml
52 | matrix_data:
53 | # 0 1 2 3 4 5 6 7
54 | - [0, 0, 0, 1, 0, 0, 0, 0] # 0: h0
55 | - [0, 0, 0, 1, 0, 0, 0, 0] # 1: h1
56 | - [0, 0, 0, 1, 0, 0, 0, 0] # 2: target protocol sender
57 | - [1, 1, 1, 0, 1, 0, 0, 0] # 3: Router0
58 | - [0, 0, 0, 1, 0, 1, 1, 1] # 4: Router1
59 | - [0, 0, 0, 0, 1, 0, 0, 0] # 5: target protocol receiver
60 | - [0, 0, 0, 0, 1, 0, 0, 0] # 6: h0'
61 | - [0, 0, 0, 0, 1, 0, 0, 0] # 7: h1'
62 | ```
63 |
64 | **Legend:**
65 | - Each row and column is a node.
66 | - A `1` at `[i][j]` means a link from node `i` to node `j`.
67 | - The matrix is symmetric for bidirectional links.
68 |
69 | **Bandwidths:**
70 |
71 | Fixed 1000 Mbps for all links.
72 |
73 | **Delay:**
74 |
75 | Fixed 10 ms for all links.
76 |
77 | **loss rate:**
78 |
79 | The links which are connected to the router have a loss rate of 3% for each direction.
80 |
--------------------------------------------------------------------------------
/src/core/testbed_mgr.py:
--------------------------------------------------------------------------------
1 | import logging
2 |
3 | from core.topology import ITopology
4 | from interfaces.network_mgr import (INetworkManager, NetworkType)
5 | from testbed.config import HostConfig
6 |
7 |
8 | def load_all_hosts(testbed_yaml_config):
9 | hosts_config = []
10 | for host in testbed_yaml_config:
11 | host_config = HostConfig(**host)
12 | hosts_config.append(host_config)
13 | logging.info("loaded host_config: %s", host_config)
14 | if len(hosts_config) == 0:
15 | logging.error("No hosts are loaded for the testbed.")
16 | return None
17 | return False
18 |
19 |
20 | class TestbedManager(INetworkManager):
21 | """TestbedManager manages multiple network instances.
22 | """
23 |
24 | def __init__(self):
25 | super().__init__()
26 | self.networks = []
27 | self.net_num = 0
28 | self.cur_top = None
29 | self.type = NetworkType.containernet
30 |
31 | def get_top_description(self):
32 | if len(self.networks) > 0:
33 | return self.networks[0].get_topology_description()
34 | return ''
35 |
36 | def get_networks(self):
37 | return self.networks
38 |
39 | def build_networks(self, node_config,
40 | topology: ITopology,
41 | net_num: int,
42 | route: str = "static_route"):
43 | """Build multiple network instances based on the given topology.
44 |
45 | Args:
46 | node_config (NodeConfig): The configuration of each node in the network.
47 | topology (ITopology): The topology to be built.
48 | net_num (int): The number of networks to be built.
49 | route (str, optional): The route strategy. Defaults to "static_route".
50 |
51 | Returns:
52 | bool: True if the networks are built successfully, False otherwise.
53 | """
54 | logging.info("########## Oasis find access to testbed network.")
55 | all_hosts_conf = load_all_hosts(node_config)
56 | logging.info("########## The number of hosts: %s",
57 | len(all_hosts_conf or []))
58 | return True
59 |
60 | def start_networks(self):
61 | logging.info("########## Oasis start access to testbed network.")
62 |
63 | def stop_networks(self):
64 | logging.info("########## Oasis stop access to testbed network.")
65 |
66 | def reset_networks(self):
67 | logging.info("########## Oasis reset access to testbed network.")
68 |
69 | def enable_halt(self):
70 | logging.info("########## Oasis enable halt for testbed network.")
71 |
--------------------------------------------------------------------------------
/src/config/keys/id_rsa:
--------------------------------------------------------------------------------
1 | -----BEGIN OPENSSH PRIVATE KEY-----
2 | b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABlwAAAAdzc2gtcn
3 | NhAAAAAwEAAQAAAYEAhCAeElRLQw824uflyeYEx6fgzWqbZaAJaBldTtu/MtyHqD1M820l
4 | A6xsFlGdg86KQQjI+lpOKgVHYMhOipLeekqWVOXnkrbPVwzVNXcZf77w/lQL9CHfqdAFak
5 | Rb+z369Ez2jFRQd1BTNHOa3SoR6C5D1v/fXiXqWbF13qZ02ZNWm2DqojDD4nxxFqK/SIUo
6 | oOkkuAsiiJTpwuQ1wKyu25FbYNfkipO1S4e+hvyt7w7i+vbexpyvJewHVxrJdioLr3lk4k
7 | KocYmyUkxndDyabiUn5yc/3NVuOGhIBkwzsKwvMzca1o240IZ0+gnzGLOi39mQWSzA5T7K
8 | AQhMr313+VcwPlgmo0OB3V+gQ28d39tuXRIflObqM/K/H/vRyaeIwiial+DAp6DMJGUVvy
9 | DrjwXqYDhSamjoJcQuSu/RRlbltsBJE6p/ZNaRd+1AuEvPlMedrnDXa66IvryNvLbEFILK
10 | yH3k1JVaj8GkRX4+fH1IyDPrwBnpcpXxztOjhxhhAAAFgNUkQ3DVJENwAAAAB3NzaC1yc2
11 | EAAAGBAIQgHhJUS0MPNuLn5cnmBMen4M1qm2WgCWgZXU7bvzLch6g9TPNtJQOsbBZRnYPO
12 | ikEIyPpaTioFR2DIToqS3npKllTl55K2z1cM1TV3GX++8P5UC/Qh36nQBWpEW/s9+vRM9o
13 | xUUHdQUzRzmt0qEeguQ9b/314l6lmxdd6mdNmTVptg6qIww+J8cRaiv0iFKKDpJLgLIoiU
14 | 6cLkNcCsrtuRW2DX5IqTtUuHvob8re8O4vr23sacryXsB1cayXYqC695ZOJCqHGJslJMZ3
15 | Q8mm4lJ+cnP9zVbjhoSAZMM7CsLzM3GtaNuNCGdPoJ8xizot/ZkFkswOU+ygEITK99d/lX
16 | MD5YJqNDgd1foENvHd/bbl0SH5Tm6jPyvx/70cmniMIompfgwKegzCRlFb8g648F6mA4Um
17 | po6CXELkrv0UZW5bbASROqf2TWkXftQLhLz5THna5w12uuiL68jby2xBSCysh95NSVWo/B
18 | pEV+Pnx9SMgz68AZ6XKV8c7To4cYYQAAAAMBAAEAAAGAGFtZX9SlSTf5TMwmOtmKAQpsMU
19 | IZYpslkMi4QEzXSveHlJa0ZF23CJ3VESo7iOxe5U1Ky4gxR6LQfUF/5SDdHZs5am41igDk
20 | aS5G8cPCch/PinsQ9xD1i/b6wJPYhxDuffHgM49phA1NQWqcXnAee/hnbi+eTFtd/+nsij
21 | 6SZPe57AvqRTsLKWn6aQtYivRtQHiqWblHxPj6vQBX/mx3EagSueAi0/8V2VYtY/IZ2Guc
22 | oVo4SFHjIfEIi2C3Se7HE+JUzlG1LdXlOED+Lc/og/LBMM8z3vS3ai6Sa0Drreod9ZeRd5
23 | ASIngRCDmeSlK7wR+vHZDmTeTZ69c3h1eDxyGPwMSNSsqQb2KXTsvUA7D+TUvYWbIX+fH0
24 | o1V+p85lcuLqIvwpm8SBYr6ADgxV45kdiKlu49Q1CSgC1PP9XH/zoiXrC4SPNHMG1972Oa
25 | LitQZYMrZm2mB3txFMYEFTYQtsPTTIr3UJecA6OVTV0wH1NOW+h/w3W5afRPi4bl3HAAAA
26 | wDX4V7YThlSLlNS+e5qE3JctLzdtqGzhKF4wcSxfjqqZXIEdtY2CD6fRO6nPT5cuhJy88x
27 | FiOEaII8gWW+6KMLc0iU6iiS5kCQxG/xns9n7U6wFJGDNLhYYDfzISLhswBMGUJ9e4sHKE
28 | QKjAMogastP/VQrBWkoVin6X5+bKmzf/nYXefWBUxUKt0f2Nd25GI/+gjnUYREwkkV2GpS
29 | X9WDjHnsCv4+LbkN6crqe7h583F3zf4JFZVPp5WSmkOWBcGwAAAMEAtaZlhnyn6EMh1O+g
30 | xs89Axk+svreSki80su3xK77OpGoJy8OjtF6tawiuBy/5qRyLnDsFwkdNGui6jOgTf2wc5
31 | 0XtRTME8bCe40GLgSNt5Qq7qlHQak70stTjp97Y5BadPj32kS+PVKQQrg++yntgbsA81Ex
32 | +qxgyPQla3fw/n0+Ham6E0PmUzmsTXcqTrx/txn4g+tk9/NCZlx9z1VMiDoG2t49UzFiYp
33 | r1RHW2ktMzJc2G1cn+f84HTghgRQvXAAAAwQC6NHRHTd5jeN3vEiixePhDTM+rUCqPtVZG
34 | Io1787eZ8ykGbR7JUYSUfz0SkKTuVmKQubJayD3ZH7FbT+HMUx7BkbuB9PZTx07xB+OicI
35 | gt2dzQvGcDjHhreYRPmLTVay5RE8Von3YZstlABgIeNn+YSNfSW8X73uEyr+P7n0ghV6ed
36 | +J/b7nsgwiaMSU/whgNdCUYsNt1eNPa0Oyl7BxoS1104aUYiirymMP4n53C5aPxuwHYbCu
37 | g6yyk4yC6FtocAAAALb2FzaXNAbi1ob3A=
38 | -----END OPENSSH PRIVATE KEY-----
39 |
--------------------------------------------------------------------------------
/src/routing/static_routing.py:
--------------------------------------------------------------------------------
1 | from interfaces.routing import IRoutingStrategy
2 |
3 |
4 | class StaticRouting(IRoutingStrategy):
5 | """Summary:
6 | Configure static routing for the chain network.
7 | """
8 |
9 | def __init__(self):
10 | self.pair_to_link_ip = {}
11 | self.net_routes = []
12 |
13 | def setup_routes(self, network: 'INetwork'):
14 | '''
15 | Setup the routing by ip route.
16 | '''
17 | hosts = network.get_hosts()
18 | self.pair_to_link_ip = network.get_link_table()
19 | self.net_routes = [range(network.get_num_of_host())]
20 | for route in self.net_routes:
21 | route = [hosts[i] for i in route]
22 | self._add_route(route)
23 |
24 | @staticmethod
25 | def _add_ip_gateway(host, gateway_ip, dst_ip):
26 | host.cmd(f'ip r a {dst_ip} via {gateway_ip}')
27 |
28 | def _add_route(self, route):
29 | for i in range(len(route) - 1):
30 | for j in range(i + 1, len(route)):
31 | host = route[i]
32 | gateway = route[i + 1]
33 | dst_prev = route[j - 1]
34 | dst = route[j]
35 | if j < len(route) - 1:
36 | dst_next = route[j + 1]
37 |
38 | # gateway ip is the ip of the second (right) interface
39 | # of the link (route_i, route_{i+1})
40 | gateway_ip = self.pair_to_link_ip[(host, gateway)]
41 |
42 | # dst ip is the ip of the second (right) interface in the link
43 | # (route_{j-1}, route_j)
44 | dst_ip = self.pair_to_link_ip[(dst_prev, dst)]
45 | if j < len(route) - 1:
46 | dst_ip_right = self.pair_to_link_ip[(dst_next, dst)]
47 | self._add_ip_gateway(host, gateway_ip, dst_ip)
48 | if j < len(route) - 1:
49 | self._add_ip_gateway(host, gateway_ip, dst_ip_right)
50 |
51 | for i in range(1, len(route)):
52 | for j in range(0, i):
53 | host = route[i]
54 | gateway = route[i - 1]
55 | dst_prev = route[j + 1]
56 | dst = route[j]
57 |
58 | if j >= 1:
59 | dst_next = route[j - 1]
60 |
61 | gateway_ip = self.pair_to_link_ip[(host, gateway)]
62 | dst_ip = self.pair_to_link_ip[(dst_prev, dst)]
63 | if j >= 1:
64 | dst_ip_left = self.pair_to_link_ip[(dst_next, dst)]
65 | self._add_ip_gateway(host, gateway_ip, dst_ip)
66 | if j >= 1:
67 | self._add_ip_gateway(host, gateway_ip, dst_ip_left)
68 |
--------------------------------------------------------------------------------
/src/routing/static_routing_bfs.py:
--------------------------------------------------------------------------------
1 |
2 | import logging
3 | from collections import deque
4 | from interfaces.routing import IRoutingStrategy
5 |
6 |
7 | class StaticRoutingBfs(IRoutingStrategy):
8 | """Summary:
9 | Configure static routing for the network using BFS to find the shortest path.
10 | StaticRoutingBfs is the replacement for StaticRouting which only works with the chain network.
11 | """
12 |
13 | def __init__(self):
14 | self.pair_to_link_ip = {}
15 | self.net_routes = []
16 |
17 | def setup_routes(self, network: 'INetwork'):
18 | '''
19 | Setup the routing by ip route.
20 | '''
21 | hosts = network.get_hosts()
22 | self.pair_to_link_ip = network.get_link_table()
23 | adjacency = network.net_mat # adjacency matrix
24 | num_hosts = len(hosts)
25 |
26 | # Compute next hops for all pairs using BFS
27 | for src in range(num_hosts):
28 | for dst in range(num_hosts):
29 | if src == dst:
30 | continue
31 | path = self._bfs_shortest_path(adjacency, src, dst)
32 | if not path or len(path) < 2:
33 | # No path or already at destination
34 | continue
35 | next_hop = path[1]
36 | # Find the IP of the next hop interface
37 | gateway_ip = self.pair_to_link_ip.get(
38 | (hosts[src], hosts[next_hop]))
39 | dst_ip = hosts[dst].IP()
40 | if gateway_ip:
41 | self._add_ip_gateway(hosts[src], gateway_ip, dst_ip)
42 | logging.debug(
43 | "Static route: %s -> %s via %s (%s)",
44 | hosts[src].name(), hosts[dst].name(), hosts[next_hop].name(), gateway_ip)
45 | else:
46 | logging.warning(
47 | f"No link IP for %s to %s", hosts[src].name(), hosts[next_hop].name())
48 |
49 | @staticmethod
50 | def _add_ip_gateway(host, gateway_ip, dst_ip):
51 | host.cmd(f'ip r a {dst_ip}/32 via {gateway_ip}')
52 |
53 | def _bfs_shortest_path(self, adjacency, start, goal):
54 | queue = deque([[start]])
55 | visited = set()
56 | while queue:
57 | path = queue.popleft()
58 | node = path[-1]
59 | if node == goal:
60 | return path
61 | if node in visited:
62 | continue
63 | visited.add(node)
64 | for neighbor, connected in enumerate(adjacency[node]):
65 | if connected and neighbor not in visited:
66 | queue.append(path + [neighbor])
67 | return None
68 |
--------------------------------------------------------------------------------
/src/testsuites/test_iperf_bats.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 | import os
4 | from interfaces.network import INetwork
5 | from protosuites.proto_info import IProtoInfo
6 | from .test import (ITestSuite)
7 |
8 |
9 | class IperfBatsTest(ITestSuite):
10 | def post_process(self):
11 | return True
12 |
13 | def pre_process(self):
14 | return True
15 |
16 | def _run_iperf(self, client, server, args_from_proto: str, proto_name: str):
17 | if self.config is None:
18 | logging.error("IperfBatsTest config is None.")
19 | return False
20 | receiver_ip = server.IP()
21 | receiver_port = 5201
22 | parallel = self.config.parallel or 1
23 | if parallel > 1:
24 | logging.info(
25 | "IperfBatsTest is running with parallel streams: %d", parallel)
26 | interval_num = self.config.interval_num or 10
27 | interval = self.config.interval or 1
28 | base_path = os.path.dirname(os.path.abspath(self.result.record))
29 | server_log_path = os.path.join(
30 | base_path, f"{proto_name}_server/log/")
31 | client_log_path = os.path.join(
32 | base_path, f"{proto_name}_client/log/")
33 | for intf in server.getIntfs():
34 | bats_iperf_server_cmd = f'bats_iperf -s -p {receiver_port} -i {float(interval)} -I {intf}' \
35 | f' -l {self.result.record} -L {server_log_path} &'
36 | logging.info(
37 | 'bats_iperf server cmd: %s', bats_iperf_server_cmd)
38 | server.cmd(f'{bats_iperf_server_cmd}')
39 | bats_iperf_client_cmd = f'bats_iperf -c {receiver_ip} {args_from_proto} -p {receiver_port} -P {parallel}' \
40 | f' -i {float(interval)} -t {int(interval_num)} -L {client_log_path}'
41 | logging.info('bats_iperf client cmd: %s', bats_iperf_client_cmd)
42 | res = client.popen(
43 | f'{bats_iperf_client_cmd}').stdout.read().decode('utf-8')
44 | logging.info('bats_iperf client output: %s', res)
45 | logging.info('bats_iperf test result save to %s', self.result.record)
46 | time.sleep(1)
47 | client.cmd('pkill -9 -f bats_iperf')
48 | server.cmd('pkill -9 -f bats_iperf')
49 | return True
50 |
51 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
52 | hosts = network.get_hosts()
53 | if hosts is None:
54 | return False
55 | if self.config.client_host is None or self.config.server_host is None:
56 | self.config.client_host = 0
57 | self.config.server_host = len(hosts) - 1
58 | client = hosts[self.config.client_host]
59 | server = hosts[self.config.server_host]
60 | logging.info(
61 | "############### Oasis IperfBatsTest from %s to %s ###############", client.name(), server.name())
62 | return self._run_iperf(client, server, proto_info.get_protocol_args(network), proto_info.get_protocol_name())
63 |
--------------------------------------------------------------------------------
/docs/protocols_and_tools.md:
--------------------------------------------------------------------------------
1 | ## 1. Protocols
2 |
3 | Oasis currently supports the following protocols:
4 |
5 | | Protocol | Description |
6 | | ---------- | ---------------------------------------------------------------------------------- |
7 | | btp | BATS™ Transport Protocol (the framework, multi-hop version) |
8 | | brtp | BATS™ Reliable Transport Protocol (the framework, multi-hop version) |
9 | | brtp_proxy | BATS™ Reliable Transport Protocol in Proxy mode (the framework, multi-hop version) |
10 | | tcp-bbr | TCP with bbr congestion control algorithm |
11 | | btp-next | BATS™ protocol (the API/SDK, end-to-end version) |
12 | | brtp-next | BATS™ protocol (the API/SDK, end-to-end version) |
13 | | tcp-next | The tcp protocol integrated in bats protocol API/SDK, it's a TCP over TCP. |
14 | | KCP | KCP Protocol (uses KCP-TUN) |
15 |
16 | ### 1.1 BATS Protocol
17 |
18 | The items `BTP`, `BRTP`,`BRTP_PROXY` are part of the BATS™ protocol. You can find the details in [BATS™](../bats/README.md).
19 |
20 | ### 1.2 KCP
21 |
22 | [KCP](https://github.com/skywind3000/kcp) is a fast and reliable protocol that can reduce average latency by 30% to 40% and maximum delay by a factor of three, at the cost of 10% to 20% more bandwidth usage compared to TCP.
23 |
24 | [KCP-TUN](https://github.com/xtaci/kcptun) is a practical application based on KCP. It uses Reed-Solomon Codes to recover lost packets
25 |
26 | ## 2. Tools
27 |
28 | Oasis currently supports the following test tools:
29 |
30 | | Tools | Description | Name in YAML |
31 | | -------------------- | ------------------------------------------------------ | ------------ |
32 | | ping | tool of sending ICMP messages | ping |
33 | | Iperf3 | tool of perform throughput test with UDP/TCP | iperf |
34 | | tcp_message_endpoint | tool of sending/echoing TCP messages | rtt |
35 | | sshping | tool of sending ping messages over SSH | sshping |
36 | | scp | tool of testing file transfer over different protocols | scp |
37 |
38 | ### 2.1 TCP messaging endpoint
39 |
40 | The binary located in `bin/tcp_message/tcp_endpoint` is a tool for measuring the RTT of TCP messages over other protocols. Its source code is in [bats-documentation](https://github.com/n-hop/bats-documentation).
41 |
42 | ### 2.3 sshping
43 |
44 | `sshping` is a tool for sending ping messages over SSH. The source code is in [sshping](https://github.com/spook/sshping).
45 |
46 | ### 2.4 Traffic control (tc)
47 |
48 | [`docs/tc-strategy.md`](tc-strategy.md) provides a detailed explanation of the traffic control strategy in Oasis.
49 |
--------------------------------------------------------------------------------
/docs/imgs/complex-top.svg:
--------------------------------------------------------------------------------
1 |
--------------------------------------------------------------------------------
/src/testbed/linux_host.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import subprocess
3 | import os
4 |
5 | from interfaces.host import IHost
6 | from var.global_var import g_root_path
7 | from .config import HostConfig
8 |
9 |
10 | class LinuxHost(IHost):
11 | """Linux host class.
12 |
13 | Define the way to interact with a Linux host.
14 | """
15 |
16 | def __init__(self, config: HostConfig):
17 | self.host_config = config
18 | if self.host_config.ip is None:
19 | raise ValueError("The host IP is None.")
20 | if self.host_config.user is None:
21 | raise ValueError("The host user is None.")
22 | private_key = f"{g_root_path}{self.host_config.authorized_key}"
23 | # check if the private key exists.
24 | if not os.path.exists(private_key):
25 | raise FileNotFoundError(
26 | f"The private key {private_key} does not exist.")
27 | self.ssh_cmd_prefix = f"ssh -i {self.host_config.authorized_key} {self.host_config.user}@{self.host_config.ip}"
28 | self.intf_list = ['eth0', 'eth1']
29 | result = subprocess.run(
30 | [f"{self.ssh_cmd_prefix} hostname"], shell=True, capture_output=True, text=True, check=False)
31 | if result.returncode == 0:
32 | self.is_connected_flag = True
33 | logging.info("Connected to the host %s", self.host_config.ip)
34 | else:
35 | logging.error("Failed to connect to the host %s: %s",
36 | self.host_config.ip, result.stderr)
37 | self.is_connected_flag = False
38 |
39 | def is_connected(self) -> bool:
40 | return self.is_connected_flag
41 |
42 | def cmd(self, command):
43 | cmd_str = f"{self.ssh_cmd_prefix} \"{command}\""
44 | result = subprocess.run(
45 | [cmd_str], shell=True, capture_output=True, text=True, check=False)
46 | logging.debug("STDOUT: %s", result.stdout)
47 | logging.debug("STDERR: %s", result.stderr)
48 | if result.returncode != 0:
49 | logging.error("Command failed with return code %d: %s",
50 | result.returncode, result.stderr)
51 | return result.stdout
52 |
53 | def cmdPrint(self, command: str) -> str:
54 | """Execute a command on the host and print the output.
55 | """
56 | logging.info(f"cmdPrint: %s", command)
57 | return self.cmd(command)
58 |
59 | def name(self) -> str:
60 | """Get the name of the host.
61 | """
62 | cmd_str = f"{self.ssh_cmd_prefix} hostname"
63 | return self.cmd(cmd_str)
64 |
65 | def IP(self) -> str:
66 | """Get the IP address of the host.
67 | """
68 | return self.host_config.ip
69 |
70 | def deleteIntfs(self):
71 | """Delete all interfaces.
72 | Action is not permitted.
73 | """
74 | return True
75 |
76 | def getIntfs(self):
77 | return self.intf_list
78 |
79 | def cleanup(self):
80 | """Cleanup the host.
81 | """
82 | # clean up all tc qdisc rules.
83 | for intf in self.intf_list:
84 | # src/tools/tc_rules.sh
85 | self.cmd(f"{g_root_path}src/tools/tc_rules.sh {intf} unset")
86 |
87 | def get_host(self):
88 | return self
89 |
90 | def popen(self, command):
91 | return self.cmd(command)
92 |
--------------------------------------------------------------------------------
/.gitlab-ci.yml:
--------------------------------------------------------------------------------
1 | image: gitlab.app.n-hop.com:5005/infra/ci-images/docker-cli:latest
2 |
3 | workflow:
4 | auto_cancel:
5 | on_job_failure: all
6 | name: "$PIPELINE_NAME"
7 | rules:
8 | - if: '$CI_PIPELINE_SOURCE == "merge_request_event"'
9 | variables:
10 | PIPELINE_NAME: "Merge request pipeline"
11 | - if: '$CI_PIPELINE_SOURCE == "schedule"'
12 | variables:
13 | PIPELINE_NAME: "Other scheduled pipeline"
14 | - if: "$CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH"
15 | variables:
16 | PIPELINE_NAME: "Default branch pipeline"
17 | - if: $CI_COMMIT_TAG =~ /^v/
18 | variables:
19 | PIPELINE_NAME: "Release pipeline"
20 |
21 | # define stages
22 | stages:
23 | - style_check
24 | - build
25 |
26 | .prepare_pylint_template: &prepare_pylint_definition
27 | - . /etc/profile
28 | - pip install --upgrade pip
29 | - pip install pylint==3.1.0
30 |
31 | pylint:
32 | stage: style_check
33 | allow_failure: false
34 | interruptible: true
35 | tags:
36 | - performance
37 | before_script:
38 | - *prepare_pylint_definition
39 | script:
40 | - |
41 | pylint $(git ls-files '*.py') --rcfile=${CI_PROJECT_DIR}/.pylintrc --ignore-patterns=".*_unittest.py" --disable=unused-argument
42 |
43 | build-generic-container:
44 | stage: build
45 | interruptible: true
46 | allow_failure: false
47 | tags:
48 | - docker
49 | rules:
50 | - if: $CI_PIPELINE_SOURCE == "schedule"
51 | when: always
52 | - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
53 | changes:
54 | - Dockerfile.ubuntu-generic
55 | variables:
56 | IMAGE_NAME: $CI_REGISTRY/$CI_PROJECT_PATH/ubuntu-generic
57 | DOCKERFILE: Dockerfile.ubuntu-generic
58 | script:
59 | - >
60 | docker build
61 | --pull
62 | --push
63 | --provenance false
64 | --tag $IMAGE_NAME:dev
65 | --tag $IMAGE_NAME:latest
66 | --tag $IMAGE_NAME:$CI_COMMIT_SHA
67 | --file $DOCKERFILE .
68 |
69 | build-lttng-container:
70 | stage: build
71 | interruptible: true
72 | allow_failure: false
73 | tags:
74 | - docker
75 | rules:
76 | - if: $CI_PIPELINE_SOURCE == "schedule"
77 | when: always
78 | - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
79 | changes:
80 | - Dockerfile.ubuntu-generic-lttng
81 | variables:
82 | IMAGE_NAME: $CI_REGISTRY/$CI_PROJECT_PATH/ubuntu-generic
83 | DOCKERFILE: Dockerfile.ubuntu-generic-lttng
84 | script:
85 | - >
86 | docker build
87 | --pull
88 | --push
89 | --provenance false
90 | --tag $IMAGE_NAME:lttng
91 | --file $DOCKERFILE .
92 |
93 | build-modified-containernet:
94 | stage: build
95 | interruptible: true
96 | allow_failure: false
97 | tags:
98 | - docker
99 | rules:
100 | - if: $CI_PIPELINE_SOURCE == "schedule"
101 | when: always
102 | - if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
103 | changes:
104 | - Dockerfile.containernet
105 | variables:
106 | IMAGE_NAME: $CI_REGISTRY/$CI_PROJECT_PATH/containernet
107 | DOCKERFILE: Dockerfile.containernet
108 | script:
109 | - >
110 | docker build
111 | --pull
112 | --push
113 | --provenance false
114 | --tag $IMAGE_NAME:dev
115 | --tag $IMAGE_NAME:latest
116 | --tag $IMAGE_NAME:$CI_COMMIT_SHA
117 | --file $DOCKERFILE .
118 |
--------------------------------------------------------------------------------
/src/testsuites/test_rtt.py:
--------------------------------------------------------------------------------
1 | import time
2 | import logging
3 |
4 | from interfaces.network import INetwork
5 | from protosuites.proto_info import IProtoInfo
6 | from .test import (ITestSuite, TestConfig)
7 |
8 |
9 | class RTTTest(ITestSuite):
10 | """Measures the round trip time between two hosts in the network.
11 | RTTTest uses tool `bin/tcp_message/tcp_endpoint` to measure the RTT.
12 | Source of the tool is in https://github.com/n-hop/bats-documentation
13 | """
14 |
15 | def __init__(self, config: TestConfig) -> None:
16 | super().__init__(config)
17 | self.run_times = 0
18 | self.first_rtt_repeats = 15
19 | self.binary_path = "tcp_endpoint"
20 |
21 | def post_process(self):
22 | return True
23 |
24 | def pre_process(self):
25 | return True
26 |
27 | def _run_tcp_endpoint(self, client, server, port, recv_ip):
28 | loop_cnt = 1
29 | server.cmd(f'{self.binary_path} -p {port} &')
30 | tcp_client_cmd = f'{self.binary_path} -c {recv_ip} -p {port}'
31 | tcp_client_cmd += f' -i {self.config.interval}' \
32 | f' -w {self.config.packet_count} -l {self.config.packet_size}'
33 | if self.config.packet_count == 1:
34 | # measure the first rtt, repeat 10 times
35 | loop_cnt = self.first_rtt_repeats
36 | tcp_client_cmd += f' >> {self.result.record}'
37 | else:
38 | tcp_client_cmd += f' > {self.result.record}'
39 | for _ in range(loop_cnt):
40 | client.cmd(f'{tcp_client_cmd}')
41 | client.cmd('pkill -9 -f tcp_endpoint')
42 | logging.info('rtt test result save to %s', self.result.record)
43 | time.sleep(1)
44 | server.cmd('pkill -9 -f tcp_endpoint')
45 | return True
46 |
47 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
48 | hosts = network.get_hosts()
49 |
50 | if self.config.client_host is None or self.config.server_host is None:
51 | self.config.client_host = 0
52 | self.config.server_host = len(hosts) - 1
53 |
54 | client = hosts[self.config.client_host]
55 | server = hosts[self.config.server_host]
56 | receiver_ip = None
57 | if (proto_info.get_protocol_name().upper() == "KCP") or (proto_info.get_protocol_name().upper() == "QUIC"):
58 | # kcp tun like a proxy, all traffic will be forwarded to the proxy server
59 | tun_ip = proto_info.get_tun_ip(network, self.config.client_host)
60 | if tun_ip == "":
61 | tun_ip = client.IP()
62 | receiver_ip = tun_ip
63 | else:
64 | tun_ip = proto_info.get_tun_ip(network, self.config.server_host)
65 | if tun_ip == "":
66 | tun_ip = server.IP()
67 | receiver_ip = tun_ip
68 | # KCP defines the forward port `10100`
69 | receiver_port = proto_info.get_forward_port()
70 | if receiver_port == 0:
71 | # if no forward port defined, use random port start from 30011
72 | # for port conflict, use different port for each test
73 | receiver_port = 30011 + self.run_times
74 | self.run_times += 1
75 | logging.info(
76 | "############### Oasis RTTTest from %s to %s with forward port %s ###############",
77 | client.name(), server.name(), receiver_port)
78 | return self._run_tcp_endpoint(client, server, receiver_port, receiver_ip)
79 |
--------------------------------------------------------------------------------
/src/tools/test/util_unittest.py:
--------------------------------------------------------------------------------
1 | import unittest
2 | from src.tools.util import is_same_path
3 | from src.tools.util import is_base_path
4 | from src.tools.util import str_to_mbps
5 | from src.tools.util import parse_test_file_name
6 |
7 |
8 | class TestIsSamePath(unittest.TestCase):
9 |
10 | def test_is_same_path_identical_paths(self):
11 | self.assertTrue(is_same_path(
12 | '/home/user/file.txt', '/home/user/file.txt'))
13 |
14 | def test_is_same_path_different_paths(self):
15 | self.assertFalse(is_same_path(
16 | '/home/user/file1.txt', '/home/user/file2.txt'))
17 |
18 | def test_is_same_path_with_double_slashes(self):
19 | self.assertTrue(is_same_path(
20 | '/home//user//file.txt', '/home/user/file.txt'))
21 |
22 | def test_is_same_path_with_trailing_slash(self):
23 | self.assertTrue(is_same_path(
24 | '/home/user/file.txt/', '/home/user/file.txt'))
25 |
26 | def test_is_same_path_with_mixed_slashes(self):
27 | self.assertTrue(is_same_path(
28 | '/home/user//file.txt/', '/home//user/file.txt'))
29 |
30 |
31 | class TestIsSameBase(unittest.TestCase):
32 |
33 | def test_is_base_path_base_path(self):
34 | self.assertTrue(is_base_path(
35 | '/home/user', '/home/user/file.txt'))
36 |
37 | def test_is_base_path_not_base_path(self):
38 | self.assertFalse(is_base_path(
39 | '/home/user/docs', '/home/user/file.txt'))
40 |
41 | def test_is_base_path_identical_paths(self):
42 | self.assertTrue(is_base_path(
43 | '/home/user', '/home/user'))
44 |
45 | def test_is_base_path_with_double_slashes(self):
46 | self.assertTrue(is_base_path(
47 | '/home//user', '/home/user/file.txt'))
48 |
49 | def test_is_base_path_with_trailing_slash(self):
50 | self.assertTrue(is_base_path(
51 | '/home/user/', '/home/user/file.txt'))
52 |
53 | def test_is_base_path_with_mixed_slashes(self):
54 | self.assertTrue(is_base_path(
55 | '/home/user//', '/home//user/file.txt'))
56 |
57 |
58 | class TestStrToMbps(unittest.TestCase):
59 |
60 | def test_str_to_mbps_kilobits(self):
61 | self.assertEqual(str_to_mbps(1000, "K"), 1.00)
62 |
63 | def test_str_to_mbps_megabits(self):
64 | self.assertEqual(str_to_mbps(1, "M"), 1.00)
65 |
66 | def test_str_to_mbps_gigabits(self):
67 | self.assertEqual(str_to_mbps(1, "G"), 1000.00)
68 |
69 | def test_str_to_mbps_no_unit(self):
70 | self.assertEqual(str_to_mbps(1000000, ""), 1.00)
71 |
72 | def test_str_to_mbps_invalid_unit(self):
73 | self.assertEqual(str_to_mbps(1000, "X"), 0.00)
74 |
75 | def test_parse_test_file_name_with_test_name(self):
76 | self.assertEqual(parse_test_file_name(
77 | 'test.yaml:test1'), ('test.yaml', 'test1'))
78 |
79 | def test_parse_test_file_name_without_test_name(self):
80 | self.assertEqual(parse_test_file_name(
81 | 'test.yaml'), ('test.yaml', None))
82 |
83 | def test_parse_test_file_name_with_multiple_colons(self):
84 | self.assertEqual(parse_test_file_name(
85 | 'test.yaml:test1:test2'), (None, None))
86 |
87 | def test_parse_test_file_name_empty_string(self):
88 | self.assertEqual(parse_test_file_name(''), (None, None))
89 |
90 | def test_parse_test_file_name_only_colon(self):
91 | self.assertEqual(parse_test_file_name(':'), (None, None))
92 |
93 |
94 | if __name__ == '__main__':
95 | unittest.main()
96 |
--------------------------------------------------------------------------------
/src/routing/olsr_routing.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 | from interfaces.routing import IRoutingStrategy
4 |
5 |
6 | class OLSRRouting(IRoutingStrategy):
7 | """Summary:
8 | Configure routing for the network with OLSR.
9 | """
10 |
11 | def __init__(self):
12 | self.binary_path = "olsrd2_static"
13 | self.cfg_path = "/etc/olsr/olsr.config"
14 |
15 | def setup_routes(self, network: 'INetwork'):
16 | self.start(network)
17 |
18 | def teardown_routes(self, network: 'INetwork'):
19 | self.stop(network)
20 |
21 | def _generate_cfg(self, network: 'INetwork'):
22 | template = ""
23 | template += "[olsrv2]\n"
24 | template += " originator -127.0.0.1/8\n"
25 | template += " originator -::/0\n"
26 | template += " originator default_accept\n"
27 | template += "\n"
28 | template += "[interface]\n"
29 | template += " hello_interval 1\n"
30 | template += " hello_validity 20\n"
31 | template += " bindto -127.0.0.1/8\n"
32 | template += " bindto -::/0\n"
33 | template += " bindto default_accept\n"
34 | template += "\n"
35 | template += "[interface=lo]\n"
36 | template += "{interface}\n"
37 | # template += "[log]\n"
38 | # template += " info all\n"
39 | template += "\n"
40 |
41 | hosts = network.get_hosts()
42 | host_num = network.get_num_of_host()
43 | for i in range(host_num):
44 | interface = ""
45 | if i == 0:
46 | interface += f"[interface={hosts[i].name()}-eth0]\n\n"
47 | interface += "[lan_import=lan]\n"
48 | interface += "interface eth0\n\n"
49 | elif i == host_num - 1:
50 | interface += f"[interface={hosts[i].name()}-eth0]\n\n"
51 | else:
52 | interface += f"[interface={hosts[i].name()}-eth0]\n"
53 | interface += f"[interface={hosts[i].name()}-eth1]\n\n"
54 | hosts[i].cmd(f'mkdir /etc/olsr')
55 | hosts[i].cmd(
56 | f'echo "{template.format(interface=interface)}" > {self.cfg_path}')
57 | hosts[i].cmd(
58 | f'ip addr add 172.23.1.{i + 1}/32 dev lo label \"lo:olsr\"')
59 |
60 | def start(self, network: 'INetwork'):
61 | self._generate_cfg(network)
62 | hosts = network.get_hosts()
63 | host_num = network.get_num_of_host()
64 | for host in hosts:
65 | host.cmd(f'nohup {self.binary_path} --load={self.cfg_path} &')
66 | # host.cmd(
67 | # f'nohup {self.binary_path} --load={self.cfg_path} >
68 | # {g_root_path}test_results/olsr{host.name()}.log &')
69 | max_wait_sec = 20 + host_num * 3
70 | wait_sec = 0
71 | last_host_ip = f'172.23.1.{host_num}'
72 | while wait_sec < max_wait_sec:
73 | time.sleep(1)
74 | wait_sec += 1
75 | route = hosts[0].cmd(
76 | f'ip route | grep {last_host_ip} | grep -v grep')
77 | if route is not None and route.find(last_host_ip) != -1:
78 | break
79 | if wait_sec >= max_wait_sec:
80 | logging.error("OLSR routing is not setup correctly.")
81 | return False
82 | logging.info(
83 | "OLSR routing is setup correctly at %u seconds.", wait_sec)
84 | return True
85 |
86 | def stop(self, network: 'INetwork'):
87 | hosts = network.get_hosts()
88 | for host in hosts:
89 | host.cmd(f'killall -9 {self.binary_path}')
90 | logging.info("OLSR routing is stopped.")
91 |
--------------------------------------------------------------------------------
/src/config/predefined.topology.yaml:
--------------------------------------------------------------------------------
1 | # **init_value**: initial value, must be set. if all same, give one value; other wise, give the whole value list.
2 | # **json_description**: path to json file which contains the description of the topology in details.
3 | # **array_description**: the array_description of the topology, only available for the linear topology.
4 | topology:
5 | - name: linear_network_6
6 | topology_type: linear
7 | nodes: 7
8 | array_description:
9 | - link_loss:
10 | init_value: [5]
11 | - link_latency:
12 | init_value: [1]
13 | - link_jitter:
14 | init_value: [0]
15 | - link_bandwidth_forward:
16 | init_value: [100]
17 | - link_bandwidth_backward:
18 | init_value: [100]
19 | - name: linear_network_1
20 | topology_type: linear
21 | nodes: 2
22 | array_description:
23 | - link_loss:
24 | init_value: [5]
25 | - link_latency:
26 | init_value: [10]
27 | - link_jitter:
28 | init_value: [0]
29 | - link_bandwidth_forward:
30 | init_value: [100]
31 | - link_bandwidth_backward:
32 | init_value: [100]
33 | - name: long_fat_network
34 | topology_type: linear
35 | nodes: 2
36 | array_description:
37 | - link_loss:
38 | init_value: [0.01]
39 | - link_latency:
40 | init_value: [100]
41 | - link_jitter:
42 | init_value: [0]
43 | - link_bandwidth_forward:
44 | init_value: [1000]
45 | - link_bandwidth_backward:
46 | init_value: [1000]
47 | - name: linear_network_3
48 | topology_type: linear
49 | nodes: 4
50 | array_description:
51 | - link_loss:
52 | init_value: [5]
53 | - link_latency:
54 | init_value: [10]
55 | - link_jitter:
56 | init_value: [0]
57 | - link_bandwidth_forward:
58 | init_value: [100]
59 | - link_bandwidth_backward:
60 | init_value: [100]
61 | - name: linear_network_2
62 | topology_type: linear
63 | nodes: 3
64 | array_description:
65 | - link_loss:
66 | init_value: [2]
67 | - link_latency:
68 | init_value: [10]
69 | - link_jitter:
70 | init_value: [0]
71 | - link_bandwidth_forward:
72 | init_value: [100]
73 | - link_bandwidth_backward:
74 | init_value: [100]
75 | - name: 4-hops-linear-network
76 | topology_type: linear
77 | nodes: 5
78 | json_description: 4-hops-linear-network.json
79 | - name: linear_network_1_20
80 | topology_type: linear
81 | nodes: 2
82 | array_description:
83 | - link_loss:
84 | init_value: [20]
85 | - link_latency:
86 | init_value: [10]
87 | - link_jitter:
88 | init_value: [0]
89 | - link_bandwidth_forward:
90 | init_value: [100]
91 | - link_bandwidth_backward:
92 | init_value: [100]
93 | - name: linear_network_rtt_loss
94 | topology_type: linear
95 | nodes: 2
96 | array_description:
97 | - link_loss:
98 | init_value: [0]
99 | step_len: 2
100 | step_num: 5
101 | - link_latency:
102 | init_value: [10]
103 | step_len: 20
104 | step_num: 5
105 | - link_jitter:
106 | init_value: [0]
107 | - link_bandwidth_forward:
108 | init_value: [100]
109 | - link_bandwidth_backward:
110 | init_value: [100]
111 | - name: complex_mesh_net_l3
112 | topology_type: mesh
113 | nodes: 8
114 | json_description: mesh-network.json
115 | - name: complex_mesh_net
116 | topology_type: mesh
117 | nodes: 8
118 | json_description: mesh-network-no-loss.json
119 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | [](https://github.com/n-hop/oasis/actions/workflows/.github.oasis-ci.yml)
2 | [](https://github.com/n-hop/oasis/actions/workflows/.github.ci.yml)
3 |
4 | -----
5 |
6 | # Oasis
7 |
8 | Oasis is a network emulation platform that enables protocol developers to test their protocols in diverse network topologies and conditions.
9 |
10 | Oasis is built on [Containernet](https://github.com/containernet/containernet/), a fork of [Mininet](http://mininet.org/), and [Docker](https://www.docker.com/). It offers a web-based user interface for creating and managing testbeds, test cases, and test results. Additionally, it provides a RESTful API for interacting with the DevOps platform. One of the most impressive features of Oasis is its extensive set of components for visualizing and analyzing test results, as well as automatically generating test reports based on provided templates.
11 |
12 | ## Architecture
13 |
14 |
15 |

16 | Fig 1.1 Oasis architecture brief view
17 |
18 | ## Features
19 |
20 | - **Data Visualization**: Oasis offers numerous components for visualizing test data.
21 | - Visualize the TCP throughput over time
22 | - Visualize the packet rtt over time
23 | - Visualize the rtt distribution
24 | - **Flexible Architecture**: Oasis can be used for pure software emulation or as a front-end for a series of real testbeds.
25 | - **Built-in Protocol Support**: Oasis includes extensive built-in protocol support, such as TCP, KCP, and more.
26 | - **Built-in Dynamic Routing Support**: For complex mesh networks, Oasis offers built-in dynamic routing support.
27 |
28 | ## Features in development
29 |
30 | - **Testbed**: Instead of using visualize networks provided by Containernet, oasis can use the real testbed to do the protocol testing.
31 | - **Web-based User Interface**: Users can create, modify, delete, and execute test cases, as well as manage versions of user-defined protocols.
32 | - **RESTful API**: Users can interact with the DevOps platform through the API.
33 |
34 | ## Get started
35 |
36 | A simple guide to getting started with Oasis can be found in [Get Started](docs/get-started.md).
37 |
38 | ## Workflow of Testing
39 |
40 | A typical workflow of a Oasis test is as follows:
41 |
42 | 1. construct a `INetwork` with a given yaml configuration which describes the network topology.
43 | 2. load `ITestSuite`(the test tool) from yaml configuration.
44 | 3. load `IProtoSuite`(the target test protocol) from yaml configuration.
45 | 4. run `IProtoSuite` on `INetwork`.
46 | 5. perform the test with `ITestSuite` on `INetwork`.
47 | 6. read/generate test results by `IDataAnalyzer`.
48 |
49 | ## Protocols and Tools
50 |
51 | Detailed information can be found in [Protocols and Tools](docs/protocols_and_tools.md).
52 |
53 | ## Flow competition test
54 |
55 | The flow competition test is a test case that evaluates the fairness and the convergence of the target protocol. Detailed information can be found in [Flow Competition Test](docs/flow_competition_test.md).
56 |
57 | ## Limitations
58 |
59 | - **Link latency**: The valid range is 0-200ms; 0ms means no additional latency is added. And the maximum latency of each link is 200ms.
60 | The link latency is simulated by the Linux `tc` module, which requires sufficient queuing buffer capacity. If the queuing buffer is not large enough, `tc` module will drop packets under heavy traffic, affecting the accuracy of simulating the link loss rate.
61 |
62 | - **Link bandwidth**: The valid range is 1-4000Mbps.
--------------------------------------------------------------------------------
/src/tools/tc_rules.sh:
--------------------------------------------------------------------------------
1 | #!/bin/bash
2 |
3 | # ################# Usage #################
4 | # H0 (eth0)<-----------------> (eth0)H1
5 | # `tc_rules.sh` is used to define the tc rules on the link between H0 and H1
6 | # #########################################
7 | # run `tc_rules.sh eth0 set` on host H0 to apply the tc rules:
8 | # sudo ./tc_rules.sh eth0 set
9 | # #########################################
10 | # run `tc_rules.sh eth0 set` on host H1 to apply the tc rules:
11 | # sudo ./tc_rules.sh eth0 set
12 | # #########################################
13 | # Unset the tc rules by running the following command:
14 | # sudo ./tc_rules.sh eth0 unset
15 |
16 | # step 0. get input interface name
17 | if [ $# -ne 2 ]; then
18 | echo "Usage: $0 set"
19 | echo "Usage: $0 unset"
20 | exit 1
21 | fi
22 |
23 | applied_interface=$1
24 | # check if interface exists
25 | if [ ! -d "/sys/class/net/$applied_interface" ]; then
26 | echo "Interface $applied_interface does not exist"
27 | exit 1
28 | fi
29 |
30 | action_type=$2
31 | if [ "$action_type" != "set" ] && [ "$action_type" != "unset" ]; then
32 | echo "Usage: $0 set"
33 | echo "Usage: $0 unset"
34 | exit 1
35 | fi
36 |
37 | # add tc rules
38 | function setup_tc_rules {
39 | local interface=$1
40 | local bandwidth=$2
41 | local loss_rate=$3
42 | local delay=$4
43 | echo "Applying tc rules on interface: $interface with bandwidth: $bandwidth, loss_rate: $loss_rate, delay: $delay"
44 |
45 | # step 1.1: set bandwidth
46 | tc qdisc add dev "$interface" root handle 1: tbf rate "$bandwidth" burst 125.0kb latency 1ms
47 |
48 | # step 1.2: add ifb interface
49 | ip link add name ifb0 type ifb
50 | ip link set dev ifb0 up
51 | tc qdisc add dev "$interface" ingress
52 | tc filter add dev "$interface" parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev ifb0
53 | # check if delay is greater than 0
54 | if [ "$delay" -eq 0 ]; then
55 | echo "No delay is added"
56 | tc qdisc add dev ifb0 root netem loss "$loss_rate" limit 10000000
57 | else
58 | tc qdisc add dev ifb0 root netem loss "$loss_rate" delay "$delay"ms limit 30000000
59 | fi
60 | }
61 |
62 | # cleanup tc rules
63 | function cleanup {
64 | echo "Cleaning up tc rules"
65 | local interface=$1
66 | if tc qdisc show dev "$interface" | grep -q "ingress"; then
67 | echo "Deleting tc ingress qdisc"
68 | tc qdisc del dev "$interface" ingress
69 | fi
70 |
71 | if tc filter show dev "$interface" parent ffff: | grep -q "mirred egress"; then
72 | echo "Deleting tc filter"
73 | tc filter del dev "$interface" parent ffff: protocol ip u32 match u32 0 0 action mirred egress redirect dev ifb0
74 | fi
75 |
76 | if [ -z "$(tc qdisc show dev "$interface" | grep "tbf 1")" ]; then
77 | echo "No tc rules found on interface $interface"
78 | else
79 | echo "Deleting tbf rate rules on interface $interface"
80 | tc qdisc del dev "$interface" root handle 1: tbf rate "$bandwidth" burst 125.0kb latency 1ms
81 | fi
82 |
83 | if [ -d "/sys/class/net/ifb0" ]; then
84 | echo "Deleting ifb0 interface"
85 | tc qdisc del dev ifb0 root
86 | ip link set dev ifb0 down
87 | ip link delete ifb0
88 | fi
89 | }
90 |
91 | # change the following values to set the desired bandwidth, loss rate, and delay
92 | bandwidth=1000mbit
93 | loss_rate=0%
94 | delay=1 #ms
95 |
96 | if [ "$action_type" == "set" ]; then
97 | setup_tc_rules "$applied_interface" "$bandwidth" "$loss_rate" "$delay"
98 | else
99 | cleanup "$applied_interface"
100 | fi
--------------------------------------------------------------------------------
/src/protosuites/proto.py:
--------------------------------------------------------------------------------
1 | import os
2 | import logging
3 | from abc import ABC, abstractmethod
4 | from enum import IntEnum
5 | from dataclasses import dataclass, field
6 | from typing import (Optional, List)
7 | from protosuites.proto_info import IProtoInfo
8 | from var.global_var import g_root_path
9 |
10 |
11 | class ProtoType(IntEnum):
12 | distributed = 0
13 | none_distributed = 1
14 |
15 |
16 | class ProtoRole(IntEnum):
17 | server = 0
18 | client = 1
19 | both = 2
20 |
21 |
22 | proto_type_str_mapping = {
23 | "distributed": ProtoType.distributed,
24 | "none_distributed": ProtoType.none_distributed
25 | }
26 |
27 |
28 | @dataclass
29 | class ProtoConfig:
30 | name: str = field(default="")
31 | bin: Optional[str] = field(default=None) # binary name of the protocol
32 | args: Optional[List[str]] = field(default=None)
33 | config_file: Optional[str] = field(default=None)
34 | version: Optional[str] = field(default="")
35 | hosts: Optional[List[int]] = field(default=None)
36 | port: Optional[int] = field(default=0)
37 | type: Optional[str] = field(default='distributed')
38 | test_name: str = field(default="")
39 | protocols: Optional[List['ProtoConfig']] = field(default=None)
40 | config_base_path: Optional[str] = field(default=None)
41 |
42 |
43 | SupportedProto = ['btp', 'brtp', 'brtp_proxy', 'tcp', 'kcp']
44 | SupportedBATSProto = ['btp', 'brtp', 'brtp_proxy']
45 |
46 |
47 | class IProtoSuite(IProtoInfo, ABC):
48 | def __init__(self, config: ProtoConfig, is_distributed: bool = True, role: ProtoRole = ProtoRole.both):
49 | self.is_distributed_var = is_distributed
50 | self.proto_role = role
51 | self.is_success = False
52 | self.config = config
53 | # save configs
54 | self.log_config_dir = f"{g_root_path}test_results/{self.config.test_name}/{self.config.name}/config/"
55 | if not os.path.exists(f"{self.log_config_dir}"):
56 | os.makedirs(f"{self.log_config_dir}")
57 | # save logs
58 | self.log_dir = f"{g_root_path}test_results/{self.config.test_name}/{self.config.name}/log/"
59 | if not os.path.exists(f"{self.log_dir}"):
60 | os.makedirs(f"{self.log_dir}")
61 |
62 | self.protocol_args: str = ''
63 | if self.config.args:
64 | for arg in self.config.args:
65 | self.protocol_args += arg + ' '
66 | logging.info("protocol %s args: %s",
67 | self.config.name, self.protocol_args)
68 | self.process_name = self.config.bin
69 |
70 | def get_config(self) -> ProtoConfig:
71 | return self.config
72 |
73 | @abstractmethod
74 | def is_noop(self) -> bool:
75 | pass
76 |
77 | @abstractmethod
78 | def post_run(self, network: 'INetwork') -> bool: # type: ignore
79 | pass
80 |
81 | @abstractmethod
82 | def pre_run(self, network: 'INetwork') -> bool: # type: ignore
83 | pass
84 |
85 | @abstractmethod
86 | def run(self, network: 'INetwork') -> bool: # type: ignore
87 | pass
88 |
89 | def start(self, network: 'INetwork') -> bool: # type: ignore
90 | self.is_success = self.pre_run(network)
91 | if not self.is_success:
92 | logging.debug("pre_run failed")
93 | return False
94 | self.is_success = self.run(network)
95 | if not self.is_success:
96 | logging.debug("run failed")
97 | return False
98 | self.is_success = self.post_run(network)
99 | if not self.is_success:
100 | logging.debug("post_run failed")
101 | return False
102 | return True
103 |
104 | @abstractmethod
105 | def stop(self, network: 'INetwork'): # type: ignore
106 | pass
107 |
108 | @abstractmethod
109 | def is_distributed(self) -> bool:
110 | pass
111 |
--------------------------------------------------------------------------------
/src/testsuites/test_scp.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from interfaces.network import INetwork
3 | from protosuites.proto_info import IProtoInfo
4 | from .test import (ITestSuite, TestConfig)
5 |
6 |
7 | class ScpTest(ITestSuite):
8 | """Measures the time of scp file transfer between two hosts in the network.
9 | """
10 |
11 | def __init__(self, config: TestConfig) -> None:
12 | super().__init__(config)
13 | self.scp_files = []
14 |
15 | def post_process(self):
16 | return True
17 |
18 | def pre_process(self):
19 | return True
20 |
21 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
22 | hosts = network.get_hosts()
23 | if hosts is None:
24 | logging.error("No host found in the network")
25 | return False
26 | if self.config.client_host is None or self.config.server_host is None:
27 | logging.error(
28 | "Only support scp test with client and server hosts.")
29 | return False
30 | hosts_num = len(hosts)
31 | if self.config.client_host >= hosts_num or self.config.server_host >= hosts_num:
32 | logging.error(
33 | "Client or server host index is out of range: %d, %d", self.config.client_host, self.config.server_host)
34 | return False
35 | receiver_ip = None
36 | target_file_name = f'scp_data_{self.config.file_size}M'
37 | gen_file_cmd = f'head -c {self.config.file_size}M /dev/urandom > {target_file_name}'
38 | # Run ping test from client to server
39 | logging.info(
40 | f"############### Oasis ScpTest from "
41 | "%s to %s ###############",
42 | hosts[self.config.client_host].name(),
43 | hosts[self.config.server_host].name())
44 | tun_ip = proto_info.get_tun_ip(
45 | network, self.config.server_host)
46 | if tun_ip == "":
47 | tun_ip = hosts[self.config.server_host].IP()
48 | receiver_ip = tun_ip
49 | # 1. Generate scp files
50 | hosts[self.config.client_host].cmd(f'{gen_file_cmd}')
51 | self.scp_files.append(f'{target_file_name}')
52 | # 2. Run scp client
53 | scp_cmd = f'scp -o StrictHostKeyChecking=no -i /root/.ssh/id_rsa'
54 | for file in self.scp_files:
55 | scp_cmd += f' {file}'
56 | scp_cmd += f' root@{receiver_ip}:/tmp/'
57 | scp_res = hosts[self.config.client_host].cmd(
58 | f'script -c \'{scp_cmd}\' | tee {self.result.record} ')
59 | logging.info(f"ScpTest result: %s", scp_res)
60 | with open(self.result.record, 'a', encoding='utf-8') as f:
61 | for file in self.scp_files:
62 | org_file_hash = self.__get_file_hash(
63 | hosts[self.config.client_host], file)
64 | received_file_hash = self.__get_file_hash(
65 | hosts[self.config.server_host], f'/tmp/{file}')
66 | if org_file_hash != "ERROR" and org_file_hash == received_file_hash:
67 | f.write(f"{file}: passed\n")
68 | else:
69 | logging.warning(
70 | "File %s hash mismatch: original %s, received %s",
71 | file, org_file_hash, received_file_hash)
72 | f.write(f"{file}: failed\n")
73 | return True
74 |
75 | def __get_file_hash(self, host, file):
76 | """Get the hash of the scp files."""
77 | popen_res = host.popen(
78 | f'sha256sum {file}')
79 | if popen_res and hasattr(popen_res, "stdout"):
80 | output = popen_res.stdout.read().decode('utf-8')
81 | logging.info(f"ScpTest __get_file_hash output: %s", output)
82 | file_hash = output.split()[0] if output else "ERROR"
83 | else:
84 | file_hash = "ERROR"
85 | logging.error(
86 | "Failed to get hash for file %s, using ERROR as placeholder.", {file})
87 | return file_hash
88 |
--------------------------------------------------------------------------------
/src/testsuites/test_regular.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 | from interfaces.network import INetwork
4 | from protosuites.proto_info import IProtoInfo
5 | from .test import (ITestSuite, TestConfig, TestType)
6 |
7 |
8 | class RegularTest(ITestSuite):
9 | """RegularTest is used to represent a regular test tool which can be run with a pattern of
10 | command line arguments, such as `./binary %s other_args`; `%s` is used to indicate the IP address
11 | of the target.
12 | """
13 |
14 | def __init__(self, config: TestConfig) -> None:
15 | super().__init__(config)
16 | self.binary_path = config.name
17 | if config.name == "sshping":
18 | self.config.test_type = TestType.sshping
19 | else:
20 | self.config.test_type = TestType.throughput
21 |
22 | def post_process(self):
23 | return True
24 |
25 | def pre_process(self):
26 | return True
27 |
28 | def _get_format_args(self, ip: str):
29 | formatted_args = self.config.args if self.config.args else ""
30 | if isinstance(formatted_args, list):
31 | formatted_args = " ".join(formatted_args)
32 | if "%s" in formatted_args:
33 | formatted_args = formatted_args % ip
34 | return formatted_args
35 |
36 | def _wait_timeout_or_finish(self, hosts):
37 | interval_num = self.config.interval_num or 10
38 | interval = self.config.interval or 1
39 | max_wait_time = interval_num * interval + 1
40 | # wait for the test to finish in max_wait_time seconds
41 | wait_time = 0
42 | while wait_time < max_wait_time:
43 | wait_time += 1
44 | time.sleep(1)
45 | for h in hosts:
46 | if h is None:
47 | continue
48 | logging.info("RegularTest %s timeout", self.config.name)
49 | h.cmd(
50 | f'pkill -9 -f {self.binary_path}')
51 | logging.info("RegularTest %s killed", self.config.name)
52 |
53 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
54 | hosts = network.get_hosts()
55 | if hosts is None:
56 | logging.error("No host found in the network")
57 | return False
58 | hosts_num = len(hosts)
59 | receiver_ip = None
60 | if self.config.client_host is None or self.config.server_host is None:
61 | for i in range(hosts_num):
62 | if i == 0:
63 | continue
64 | tun_ip = proto_info.get_tun_ip(
65 | network, 0)
66 | if tun_ip == "":
67 | tun_ip = hosts[0].IP()
68 | receiver_ip = tun_ip
69 | logging.info(
70 | f"############### Oasis RegularTest %s from "
71 | "%s to %s ###############",
72 | self.config.name, hosts[i].name(), hosts[0].name())
73 | formatted_args = self._get_format_args(receiver_ip)
74 | hosts[i].cmd(
75 | f'{self.binary_path} {formatted_args} > {self.result.record} &')
76 | self._wait_timeout_or_finish(hosts)
77 | return True
78 | logging.info(
79 | f"############### Oasis RegularTest %s from "
80 | "%s to %s ###############",
81 | self.config.name,
82 | hosts[self.config.client_host].name(),
83 | hosts[self.config.server_host].name())
84 | tun_ip = proto_info.get_tun_ip(
85 | network, self.config.server_host)
86 | if tun_ip == "":
87 | tun_ip = hosts[self.config.server_host].IP()
88 | receiver_ip = tun_ip
89 | formatted_args = self._get_format_args(receiver_ip)
90 | logging.info("formatted_args: %s", formatted_args)
91 | hosts[self.config.client_host].cmd(
92 | f'{self.binary_path} {formatted_args} > {self.result.record} &')
93 | self._wait_timeout_or_finish([hosts[self.config.client_host]])
94 | return True
95 |
--------------------------------------------------------------------------------
/src/testsuites/test_iperf.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 | from interfaces.network import INetwork
4 | from protosuites.proto_info import IProtoInfo
5 | from .test import (ITestSuite, TestConfig)
6 |
7 |
8 | class IperfTest(ITestSuite):
9 | def __init__(self, config: TestConfig) -> None:
10 | super().__init__(config)
11 | self.is_udp_mode = False
12 | self.is_port_forward = False
13 | if self.config.packet_type == "udp":
14 | self.is_udp_mode = True
15 | if self.config.bitrate == 0:
16 | self.config.bitrate = 10
17 | logging.info("IperfTest is in UDP mode, bitrate: %d Mbps",
18 | self.config.bitrate)
19 |
20 | def post_process(self):
21 | return True
22 |
23 | def pre_process(self):
24 | return True
25 |
26 | def _run_iperf(self, client, server, recv_port, recv_ip):
27 | if self.config is None:
28 | logging.error("IperfTest config is None.")
29 | return False
30 | parallel = self.config.parallel or 1
31 | if parallel > 1:
32 | logging.info(
33 | "IperfTest is running with parallel streams: %d", parallel)
34 | interval_num = self.config.interval_num or 10
35 | interval = self.config.interval or 1
36 | server.cmd(f'iperf3 -s -p {recv_port} -i {int(interval)} -V --forceflush'
37 | f' --logfile {self.result.record} &')
38 | iperf3_client_cmd = f'iperf3 -c {recv_ip} -p {recv_port} -P {parallel} -i {int(interval)}' \
39 | f' -t {int(interval_num * interval)}'
40 | if self.is_udp_mode:
41 | iperf3_client_cmd += f' -u -b {self.config.bitrate}M'
42 | else:
43 | iperf3_client_cmd += f' --connect-timeout 5000'
44 | if self.config.bitrate != 0:
45 | iperf3_client_cmd += f' -b {self.config.bitrate}M'
46 | logging.info('iperf client cmd: %s', iperf3_client_cmd)
47 | res = client.popen(
48 | f'{iperf3_client_cmd}').stdout.read().decode('utf-8')
49 | logging.info('iperf client output: %s', res)
50 | logging.info('iperf test result save to %s', self.result.record)
51 | time.sleep(1)
52 | client.cmd('pkill -9 -f iperf3')
53 | server.cmd('pkill -9 -f iperf3')
54 | return True
55 |
56 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
57 | hosts = network.get_hosts()
58 | if hosts is None:
59 | return False
60 | if self.config.client_host is None or self.config.server_host is None:
61 | self.config.client_host = 0
62 | self.config.server_host = len(hosts) - 1
63 |
64 | client = hosts[self.config.client_host]
65 | server = hosts[self.config.server_host]
66 | receiver_ip = None
67 | upper_proto_name = proto_info.get_protocol_name().upper()
68 | if upper_proto_name in ["KCP", "QUIC"]:
69 | # kcp tun like a proxy, all traffic will be forwarded to the proxy server
70 | tun_ip = proto_info.get_tun_ip(network, self.config.client_host)
71 | if tun_ip == "":
72 | tun_ip = client.IP()
73 | receiver_ip = tun_ip
74 | else:
75 | if upper_proto_name in ["BTP", "BRTP"] and self.is_port_forward:
76 | # iperf3 default port 5201 is set as udp port-forwarding port in h0.
77 | # send data to h0:5201, then forward to the last node in the chain.
78 | receiver_ip = client.IP()
79 | logging.debug(
80 | "Test iperf3 with port forwarding from %s:5201 to %s", receiver_ip, server.IP())
81 | else:
82 | tun_ip = proto_info.get_tun_ip(
83 | network, self.config.server_host)
84 | if tun_ip == "":
85 | tun_ip = server.IP()
86 | receiver_ip = tun_ip
87 | # only kcp has forward port `10100`
88 | receiver_port = proto_info.get_forward_port()
89 | if receiver_port == 0:
90 | # if no forward port defined, use iperf3 default port 5201
91 | receiver_port = 5201
92 |
93 | logging.info(
94 | "############### Oasis IperfTest from %s to %s ###############", client.name(), server.name())
95 | return self._run_iperf(client, server, receiver_port, receiver_ip)
96 |
--------------------------------------------------------------------------------
/src/protosuites/std_protocol.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 | import re
4 | from protosuites.proto import (ProtoConfig, IProtoSuite, ProtoRole)
5 | from interfaces.network import INetwork
6 |
7 |
8 | class StdProtocol(IProtoSuite):
9 | """StdProtocol is the protocol with the user process running on the host. It can accept arguments from YAML config.
10 | """
11 |
12 | def __init__(self, config: ProtoConfig, is_distributed: bool = True, role: ProtoRole = ProtoRole.both):
13 | super().__init__(config, is_distributed, role)
14 | self.forward_port = self.config.port
15 | if self.process_name is None:
16 | logging.warning(
17 | "No process name found for StdProtocol %s .", self.config.name)
18 |
19 | def is_distributed(self) -> bool:
20 | return self.is_distributed_var
21 |
22 | def is_noop(self) -> bool:
23 | return False
24 |
25 | def post_run(self, network: INetwork):
26 | return True
27 |
28 | def pre_run(self, network: INetwork):
29 | return True
30 |
31 | def run(self, network: INetwork):
32 | if self.config.type == 'none_distributed':
33 | if self.config.hosts is None or len(self.config.hosts) != 2:
34 | logging.error(
35 | "Test non-distributed protocols, but protocol server/client hosts are not set correctly.")
36 | return False
37 | hosts = network.get_hosts()
38 | if hosts is None:
39 | return False
40 | if self.config.hosts is None:
41 | # if not defined, then run on all hosts
42 | self.config.hosts = [0, len(hosts) - 1]
43 | for host_id in self.config.hosts:
44 | cur_protocol_args = self.get_protocol_args(network)
45 | hosts[host_id].cmd(
46 | f'{self.config.bin} {cur_protocol_args} > '
47 | f'{self.log_dir}{self.config.name}_h{host_id}.log &')
48 | time.sleep(2)
49 | for host_id in self.config.hosts:
50 | res = hosts[host_id].cmd(f"ps aux | grep {self.process_name}")
51 | if self.process_name and res.find(self.process_name) == -1:
52 | logging.error(
53 | "Failed to start the protocol %s on %s", self.config.name, hosts[host_id].name())
54 | return False
55 | logging.info(
56 | f"############### Oasis start %s protocol on %s ###############",
57 | self.config.name, hosts[host_id].name())
58 | return True
59 |
60 | def stop(self, network: INetwork):
61 | hosts = network.get_hosts()
62 | if hosts is None:
63 | return False
64 | for host in hosts:
65 | host.cmd(f'pkill -9 -f {self.process_name}')
66 | logging.info(
67 | f"############### Oasis stop %s protocol on %s ###############",
68 | self.config.name, host.name())
69 | return True
70 |
71 | def get_forward_port(self) -> int:
72 | if self.forward_port is not None:
73 | return self.forward_port
74 | return 0
75 |
76 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str:
77 | routing_type_name = network.get_routing_strategy().routing_type()
78 | if routing_type_name == 'OLSRRouting':
79 | host = network.get_hosts()[host_id]
80 | pf = host.popen(f"ip addr show lo label lo:olsr")
81 | if pf is None:
82 | return ""
83 | ip = pf.stdout.read().decode('utf-8')
84 | match = re.search(r'inet (\d+\.\d+\.\d+\.\d+)', ip)
85 | if match:
86 | return match.group(1)
87 | return ""
88 | return ""
89 |
90 | def get_protocol_name(self) -> str:
91 | return self.config.name
92 |
93 | def get_protocol_version(self) -> str:
94 | return self.config.version or ""
95 |
96 | def get_protocol_args(self, network: INetwork) -> str:
97 | hosts = network.get_hosts()
98 | if "%s" in self.protocol_args:
99 | receiver_ip = self.get_tun_ip(network, len(hosts) - 1)
100 | if receiver_ip == "":
101 | receiver_ip = hosts[-1].IP()
102 | if 'kcp' in self.config.name:
103 | return self.protocol_args % receiver_ip
104 | return self.protocol_args
105 |
--------------------------------------------------------------------------------
/src/data_analyzer/sshping_analyzer.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import re
3 | import os
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from .analyzer import IDataAnalyzer
7 |
8 |
9 | class SSHPingAnalyzer(IDataAnalyzer):
10 | """Analyze and visualize multiple input sshping logs.
11 | """
12 |
13 | def analyze(self):
14 | return True
15 |
16 | def visualize(self):
17 | """
18 | plot sshping graph
19 | """
20 | data_sshping_all = {}
21 | plt.clf()
22 | plt.rcParams['font.family'] = 'serif'
23 | plt.xlabel('Time (ms)', fontsize=8,
24 | fontweight='bold')
25 | plt.ylabel('RTT (ms)', fontsize=8,
26 | fontweight='bold')
27 | default_title = "SSHPing RTT for each packet\n"
28 | default_title += self.config.subtitle
29 | plt.title(default_title, fontsize=10, fontweight="bold")
30 | sshping_pattern = r"Ping (\d+)/1000:\s+([\d.]+) ms"
31 | total_pings = 1000
32 | is_plot = False
33 | for input_log in self.config.input:
34 | logging.info(f"Visualize rtt log: %s", input_log)
35 | if not os.path.exists(input_log):
36 | logging.error("sshping log file %s not found", input_log)
37 | continue
38 | log_base_name = os.path.basename(input_log)
39 | data_sshping_all[log_base_name] = []
40 | with open(f"{input_log}", "r", encoding='utf-8') as f:
41 | content = f.read()
42 | pings = {}
43 | for match in re.findall(sshping_pattern, content):
44 | ping_id = int(match[0])
45 | rtt = float(match[1])
46 | pings[ping_id] = rtt
47 | rtt_values = [pings.get(i, None)
48 | for i in range(total_pings)]
49 | x_values = list(range(total_pings))
50 | valid_x = [x for x, y in zip(
51 | x_values, rtt_values) if y is not None]
52 | valid_y = [y for y in rtt_values if y is not None]
53 | data_sshping_all[log_base_name] = valid_y
54 | # start plot x-y scatter graph: x is time in ms,
55 | log_label = log_base_name.split("_")[0]
56 | plt.scatter(valid_x, valid_y,
57 | s=3, alpha=0.5, label=f"{log_label}")
58 | plt.legend(loc='upper left', fontsize=8)
59 | is_plot = True
60 | if not is_plot:
61 | logging.warning("no data to plot")
62 | return
63 | if not self.config.output:
64 | self.config.output = "sshping.svg"
65 | if '.svg' not in self.config.output:
66 | plt.savefig(f"{self.config.output}sshping.svg")
67 | logging.info("Visualize sshping diagram saved to %s",
68 | self.config.output)
69 | else:
70 | plt.savefig(f"{self.config.output}")
71 | logging.info("Visualize sshping diagram saved to %s",
72 | self.config.output)
73 | self.plot_sshping_cdf(data_sshping_all)
74 |
75 | def plot_sshping_cdf(self, rtt_data: dict):
76 | plt.clf()
77 | plt.rcParams['font.family'] = 'serif'
78 | plt.xlabel('RTT (ms)', fontsize=8,
79 | fontweight='bold')
80 | plt.ylabel('Cumulative Probability', fontsize=8,
81 | fontweight='bold')
82 | default_title = "SSHPing CDF\n"
83 | default_title += self.config.subtitle
84 | plt.title(default_title, fontsize=10, fontweight="bold")
85 | for log_base_name, rtt_list in rtt_data.items():
86 | if len(rtt_list) == 0:
87 | logging.warning(
88 | f"no per sshping data in %s", log_base_name)
89 | continue
90 | # Sort the RTT values
91 | rtt_list.sort()
92 | # Calculate the cumulative probabilities
93 | cdf = np.arange(1, len(rtt_list) + 1) / len(rtt_list)
94 | # Plot the CDF
95 | log_label = log_base_name.split("_")[0]
96 | plt.plot(rtt_list, cdf, label=f"{log_label}")
97 | plt.legend(loc='lower right', fontsize=8)
98 | # Save the plot to svg file
99 | if '.svg' not in self.config.output:
100 | plt.savefig(f"{self.config.output}sshping_cdf.svg")
101 | logging.info("Visualize sshping CDF diagram saved to %s",
102 | self.config.output)
103 | else:
104 | path = os.path.dirname(self.config.output)
105 | plt.savefig(f"{path}sshping_cdf.svg")
106 | logging.info("Visualize sshping CDF diagram saved to %s",
107 | path)
108 |
--------------------------------------------------------------------------------
/.github/workflows/.github.oasis-ci.yml:
--------------------------------------------------------------------------------
1 | name: Oasis CI
2 | on:
3 | pull_request:
4 | branches: ["main", "dev"]
5 | types:
6 | - opened
7 | - synchronize
8 | - ready_for_review
9 | paths-ignore:
10 | - "**.md"
11 | - "**.json"
12 | push:
13 | branches:
14 | - main
15 | - dev
16 | paths-ignore:
17 | - "**.md"
18 | - "**.json"
19 |
20 | concurrency:
21 | group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}"
22 | cancel-in-progress: true
23 |
24 | jobs:
25 | oasis-run:
26 | runs-on: ubuntu-latest
27 | steps:
28 | - uses: actions/checkout@v4
29 | - name: sync git-lfs
30 | run: git lfs install && git lfs pull
31 | - name: Set up python
32 | uses: actions/setup-python@v5
33 | with:
34 | python-version: "3.10.14"
35 | - name: Install dependencies
36 | run: |
37 | python3 -m pip install --upgrade pip
38 | pip install -r ${{github.workspace}}/src/containernet/requirements.txt
39 | - name: Restore Cached Containernet Image
40 | id: cache-containernet-image
41 | uses: actions/cache@v4
42 | with:
43 | path: ${{github.workspace}}/containernet_cache/
44 | key: docker_cache-${{ hashFiles('**/containernet-docker-official/Dockerfile') }}
45 | - name: Restore Cached Node Image
46 | id: cache-node-image
47 | uses: actions/cache@v4
48 | with:
49 | path: ${{github.workspace}}/node_cache/
50 | key: docker_cache-${{ hashFiles('**/protocol-docker-azure/Dockerfile') }}
51 | - name: Build containernet
52 | if: steps.cache-containernet-image.outputs.cache-hit != 'true'
53 | run: |
54 | cd ${{github.workspace}}/src/config/containernet-docker-official && docker build -t containernet:latest .
55 | mkdir -p ${{github.workspace}}/containernet_cache/
56 | docker image save containernet:latest --output ${{github.workspace}}/containernet_cache/containernet.tar
57 | docker images
58 | - name: Build containernet node
59 | if: steps.cache-node-image.outputs.cache-hit != 'true'
60 | run: |
61 | cd ${{github.workspace}}/src/config/protocol-docker-azure && docker build -t ubuntu:22.04 .
62 | mkdir -p ${{github.workspace}}/node_cache/
63 | docker image save ubuntu:22.04 --output ${{github.workspace}}/node_cache/containernet_node.tar
64 | docker images
65 | - name: Load containernet image
66 | working-directory: ${{github.workspace}}/
67 | if: steps.cache-containernet-image.outputs.cache-hit == 'true'
68 | run: |
69 | docker image load --input ./containernet_cache/containernet.tar
70 | - name: Load containernet node image
71 | working-directory: ${{github.workspace}}/
72 | if: steps.cache-node-image.outputs.cache-hit == 'true'
73 | run: |
74 | docker image load --input ./node_cache/containernet_node.tar
75 | - name: Loading secrets
76 | run: |
77 | echo "Hardware_info=${{secrets.BATS_HARDWARE_INFO}}" >> ${{github.workspace}}/bats/licence
78 | echo "Licence_id=${{secrets.BATS_LICENCE_ID}}" >> ${{github.workspace}}/bats/licence
79 | - name: SSHPing tests
80 | working-directory: ${{github.workspace}}/
81 | run: |
82 | python3 ./src/start.py -p src/config \
83 | --containernet=official \
84 | -t protocol-sshping-test.yaml
85 | if [ ! -f ${{github.workspace}}/test.success ]; then
86 | echo "oasis test failed"
87 | exit 1
88 | fi
89 | rm -rf ${{github.workspace}}/test.success
90 | - name: SCP test
91 | working-directory: ${{github.workspace}}/
92 | run: |
93 | python3 ./src/start.py -p src/config \
94 | --containernet=official \
95 | -t protocol-scp-test.yaml -d True
96 | if [ ! -f ${{github.workspace}}/test.success ]; then
97 | echo "oasis test failed"
98 | exit 1
99 | fi
100 | rm -rf ${{github.workspace}}/test.success
101 | - name: Throughput tests
102 | working-directory: ${{github.workspace}}/
103 | run: |
104 | python3 ./src/start.py -p src/config \
105 | --containernet=official \
106 | -t protocol-single-hop-test.yaml
107 | if [ ! -f ${{github.workspace}}/test.success ]; then
108 | echo "oasis test failed"
109 | exit 1
110 | fi
111 | - name: Upload test logs
112 | uses: actions/upload-artifact@v4
113 | continue-on-error: true
114 | with:
115 | name: ci-test-logs
116 | path: |
117 | ${{github.workspace}}/test_results/**
118 | retention-days: 1
119 |
--------------------------------------------------------------------------------
/src/core/network_mgr.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import time
3 |
4 | from core.config import NodeConfig
5 | from core.topology import ITopology
6 | from containernet.containernet_network import ContainerizedNetwork
7 | from routing.routing_factory import RoutingFactory, route_string_to_enum
8 | from interfaces.network_mgr import (INetworkManager, NetworkType)
9 |
10 | # alphabet table
11 | alphabet = ['h', 'i', 'j', 'k', 'l', 'm',
12 | 'n', 'o', 'p', 'q', 'r', 's', 't']
13 |
14 |
15 | class NetworkManager(INetworkManager):
16 | """NetworkManager manages multiple network instances.
17 | """
18 |
19 | def __init__(self):
20 | super().__init__()
21 | self.networks = []
22 | self.net_num = 0
23 | self.cur_top = None
24 | self.enabled_halt = False
25 | self.type = NetworkType.containernet
26 |
27 | def get_top_description(self):
28 | if len(self.networks) > 0:
29 | return self.networks[0].get_topology_description()
30 | return ''
31 |
32 | def get_networks(self):
33 | return self.networks
34 |
35 | def build_networks(self, node_config: NodeConfig,
36 | topology: ITopology,
37 | net_num: int,
38 | route: str = "static_route"):
39 | """Build multiple network instances based on the given topology.
40 |
41 | Args:
42 | node_config (NodeConfig): The configuration of each node in the network.
43 | topology (ITopology): The topology to be built.
44 | net_num (int): The number of networks to be built.
45 | route (str, optional): The route strategy. Defaults to "static_route".
46 |
47 | Returns:
48 | bool: True if the networks are built successfully, False otherwise.
49 | """
50 | if net_num > len(alphabet):
51 | logging.error("Error: number of networks exceeds the limit.")
52 | return False
53 | org_name_prefix = node_config.name_prefix
54 | cur_net_num = len(self.networks)
55 | if cur_net_num < net_num:
56 | for i in range(cur_net_num, net_num):
57 | if net_num > 1:
58 | logging.info(
59 | "####################################################")
60 | logging.info(
61 | "########## Oasis Parallel Execution Mode. ##########")
62 | logging.info(
63 | "########## network instance %s ##########", i)
64 | logging.info(
65 | "####################################################")
66 | node_config.name_prefix = f"{org_name_prefix}{alphabet[i]}"
67 | node_config.bind_port = False
68 | route_strategy = RoutingFactory().create_routing(
69 | route_string_to_enum[route])
70 | net = ContainerizedNetwork(
71 | node_config, topology, route_strategy)
72 | net.id = i
73 | self.networks.append(net)
74 | elif cur_net_num > net_num:
75 | # stop the extra networks
76 | for i in range(net_num, cur_net_num):
77 | self.networks[i].stop()
78 | logging.info("########## Oasis stop the network %s.", i)
79 | self.networks = self.networks[:net_num]
80 | logging.info(
81 | "######################################################")
82 | logging.info("########## Oasis build %d network with top: \n %s .", net_num,
83 | topology.description())
84 | logging.info(
85 | "######################################################")
86 | self.net_num = len(self.networks)
87 | # use `self.cur_top` to reload network
88 | self.cur_top = topology
89 | return True
90 |
91 | def start_networks(self):
92 | """reload networks if networks is already built; otherwise, start networks.
93 | """
94 | if self.cur_top is None:
95 | logging.error("Current topology is not set.")
96 | if self.net_num == 0:
97 | logging.error("nothing to start")
98 | for i in range(self.net_num):
99 | if not self.networks[i].is_started():
100 | self.networks[i].start()
101 | logging.info("########## Oasis start the network %s.", i)
102 | else:
103 | # reload the network instances can save time
104 | self.networks[i].reload(self.cur_top)
105 | logging.info("########## Oasis reload the network %s.", i)
106 |
107 | def stop_networks(self):
108 | # Stop all networks
109 | if self.enabled_halt:
110 | while True:
111 | time.sleep(10)
112 | logging.info("########## Oasis halt the destroy of networks.")
113 | for i in range(self.net_num):
114 | logging.info("########## Oasis stop the network %s.", i)
115 | self.networks[i].stop()
116 | self.networks = []
117 | self.net_num = 0
118 |
119 | def reset_networks(self):
120 | if self.enabled_halt:
121 | while True:
122 | time.sleep(10)
123 | logging.info("########## Oasis halt the reset of networks.")
124 | # Reset all networks, mainly for routes/tc rules/ip config.
125 | for i in range(self.net_num):
126 | self.networks[i].reset()
127 | logging.info("########## Oasis reset the network %s.", i)
128 |
129 | def enable_halt(self):
130 | self.enabled_halt = True
131 |
--------------------------------------------------------------------------------
/src/interfaces/network.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import copy
3 | import os # pylint: disable=W0611
4 | from abc import ABC, abstractmethod
5 | from typing import List
6 | from core.topology import (ITopology)
7 | from protosuites.proto import IProtoSuite
8 | from interfaces.routing import IRoutingStrategy
9 | from interfaces.host import IHost
10 | from testsuites.test import (ITestSuite)
11 |
12 |
13 | class INetwork(ABC):
14 | def __init__(self):
15 | self.id = 0
16 | self.test_suites = []
17 | self.proto_suites = []
18 | self.test_results = {}
19 | self.is_started_flag = False
20 | self.is_accessible_flag = True
21 | self.config_base_path = None
22 |
23 | def is_accessible(self):
24 | return self.is_accessible_flag
25 |
26 | def get_id(self):
27 | return self.id
28 |
29 | @abstractmethod
30 | def start(self):
31 | pass
32 |
33 | @abstractmethod
34 | def stop(self):
35 | pass
36 |
37 | @abstractmethod
38 | def get_hosts(self) -> List[IHost]:
39 | pass
40 |
41 | @abstractmethod
42 | def get_num_of_host(self) -> int:
43 | pass
44 |
45 | @abstractmethod
46 | def get_host_ip_range(self) -> str:
47 | pass
48 |
49 | @abstractmethod
50 | def get_link_table(self):
51 | pass
52 |
53 | @abstractmethod
54 | def get_routing_strategy(self) -> IRoutingStrategy:
55 | pass
56 |
57 | @abstractmethod
58 | def reload(self, top: ITopology):
59 | pass
60 |
61 | def is_started(self):
62 | return self.is_started_flag
63 |
64 | def get_topology_description(self):
65 | return ""
66 |
67 | def add_protocol_suite(self, proto_suite: IProtoSuite):
68 | self.proto_suites.append(proto_suite)
69 | self._load_config_base_path(proto_suite)
70 |
71 | def add_test_suite(self, test_suite: ITestSuite):
72 | self.test_suites.append(test_suite)
73 |
74 | def perform_test(self):
75 | """Perform the test for each input case from YAML file
76 | """
77 | if self.proto_suites is None or len(self.proto_suites) == 0:
78 | logging.error("No protocol set")
79 | return False
80 | if self.test_suites is None or len(self.test_suites) == 0:
81 | logging.error("No test suite set")
82 | return False
83 | # Combination of protocol and test
84 | for proto in self.proto_suites:
85 | # start the protocol
86 | logging.info("Starting protocol %s on network %s",
87 | proto.get_config().name, self.get_id())
88 | if proto.start(self) is False:
89 | logging.error("Protocol %s failed to start",
90 | proto.get_config().name)
91 | return False
92 | for test in self.test_suites:
93 | valid_config = self._check_test_config(proto, test)
94 | if not valid_config:
95 | continue
96 | # run `test` on `network`(self) specified by `proto`
97 | logging.info("Running test protocol %s %s", proto.get_config().name,
98 | test.type())
99 | result = test.run(self, proto)
100 | if result.is_success is False:
101 | logging.error(
102 | "Test %s failed, please check the log file %s",
103 | test.config.test_name, result.record)
104 | return False
105 | # mark competition test
106 | if test.is_competition_test():
107 | result.is_competition_test = True
108 | if test.type() not in self.test_results:
109 | self.test_results[test.type()] = {}
110 | self.test_results[test.type()]['results'] = []
111 | self.test_results[test.type()]['config'] = copy.deepcopy(
112 | test.get_config())
113 | self.test_results[test.type()]['results'].append(
114 | copy.deepcopy(result))
115 | # set is_competition_test to true if the test is a competition test
116 | logging.debug("Added Test result for %s", result.record)
117 | # stop the protocol
118 | proto.stop(self)
119 | return True
120 |
121 | def get_test_results(self):
122 | return self.test_results
123 |
124 | def reset(self):
125 | self.proto_suites = []
126 | self.test_suites = []
127 | self.test_results = {}
128 |
129 | def _load_config_base_path(self, proto_suite: IProtoSuite):
130 | if self.config_base_path is None:
131 | self.config_base_path = proto_suite.get_config().config_base_path
132 |
133 | def _check_test_config(self, proto: IProtoSuite, test: ITestSuite):
134 | if proto.is_noop():
135 | return True
136 | if not proto.is_distributed():
137 | proto_conf = proto.get_config()
138 | if proto_conf is None:
139 | logging.error("Protocol config is not set")
140 | return False
141 | hosts = proto_conf.hosts
142 | if hosts is None or len(hosts) != 2:
143 | logging.error(
144 | "INetwork Test non-distributed protocols, but protocol server/client hosts are not set correctly.")
145 | return False
146 | if hosts[0] != test.config.client_host or \
147 | hosts[1] != test.config.server_host:
148 | logging.error(
149 | "Test non-distributed protocols, protocol client/server runs on %s/%s, "
150 | "but test tools client/server hosts are %s/%s.",
151 | hosts[0], hosts[1],
152 | test.config.client_host, test.config.server_host)
153 | return False
154 | return True
155 |
--------------------------------------------------------------------------------
/src/protosuites/noop_protocol.py:
--------------------------------------------------------------------------------
1 | import logging
2 | from abc import ABC, abstractmethod
3 | from interfaces.network import INetwork
4 | from protosuites.proto import (ProtoConfig, IProtoSuite, ProtoRole)
5 |
6 |
7 | def is_next_protocol(proto_name: str) -> bool:
8 | """Check if the protocol is a bats next protocol."""
9 | if '-next' in proto_name or '-NEXT' in proto_name:
10 | return True
11 | return False
12 |
13 |
14 | def is_no_op_protocol(proto_name: str) -> bool:
15 | if 'tcp' in proto_name or 'TCP' in proto_name:
16 | return True
17 | if 'udp' in proto_name or 'UDP' in proto_name:
18 | return True
19 | return is_next_protocol(proto_name)
20 |
21 |
22 | class ProtocolConfigInf(ABC):
23 | """ProtocolConfigInf is the interface for protocol version configuration."""
24 |
25 | def __init__(self, name: str, version: str):
26 | self.name = name
27 | self.version = version
28 | self.default_version_dict = {}
29 |
30 | def __str__(self):
31 | return f"{self.name} {self.version}"
32 |
33 | @abstractmethod
34 | def setup(self, network: 'INetwork') -> bool: # type: ignore
35 | pass
36 |
37 | @abstractmethod
38 | def restore(self, network: 'INetwork') -> bool: # type: ignore
39 | pass
40 |
41 |
42 | class TCPConfigInf(ProtocolConfigInf):
43 | def setup(self, network: 'INetwork') -> bool: # type: ignore
44 | if self.version not in ['cubic', 'bbr', 'reno']:
45 | logging.error(
46 | "TCP version %s is not supported, please check the configuration.", self.version)
47 | return False
48 | hosts = network.get_hosts()
49 | # read `tcp_congestion_control` before change
50 | for host in hosts:
51 | pf = host.popen(
52 | f"sysctl net.ipv4.tcp_congestion_control")
53 | if pf is None:
54 | logging.error(
55 | "Failed to get the tcp congestion control on %s", host.name())
56 | continue
57 | res = pf.stdout.read().decode('utf-8')
58 | default_version = res.split('=')[-1].strip()
59 | if default_version == self.version:
60 | logging.info(
61 | "tcp default version on %s is already %s, skip the setup.",
62 | host.name(), default_version)
63 | continue
64 | self.default_version_dict[host.name()] = default_version
65 | logging.debug("tcp default version on %s is %s",
66 | host.name(), default_version)
67 | host.cmd(
68 | f'sysctl -w net.ipv4.tcp_congestion_control={self.version}')
69 | host.cmd(f"sysctl -p")
70 | logging.info(
71 | "############### Oasis set the congestion control"
72 | " algorithm to %s on %s ###############", self.version, host.name())
73 | return True
74 |
75 | def restore(self, network: 'INetwork') -> bool: # type: ignore
76 | # skip restore when the default_version_dict is empty
77 | if not self.default_version_dict:
78 | logging.info(
79 | "############### Oasis TCPConfigInf restore skipped ###############")
80 | return True
81 | hosts = network.get_hosts()
82 | for host in hosts:
83 | default_ver = None
84 | if host.name() in self.default_version_dict:
85 | default_ver = self.default_version_dict[host.name()]
86 | else:
87 | logging.warning(
88 | f"Host %s not found in default_version_dict during restore.", host.name())
89 | if default_ver is None:
90 | continue
91 | host.cmd(
92 | f'sysctl -w net.ipv4.tcp_congestion_control={default_ver}')
93 | host.cmd(f"sysctl -p")
94 | logging.info(
95 | "############### Oasis restore the congestion control"
96 | " algorithm to %s on %s ###############", default_ver, host.name())
97 | return True
98 |
99 |
100 | class NoOpProtocol(IProtoSuite):
101 | """NoOpProtocol are the protocols which are built-in in the system. No need to run any process.
102 | """
103 |
104 | def __init__(self, config: ProtoConfig, is_distributed: bool = True, role: ProtoRole = ProtoRole.both):
105 | super().__init__(config, is_distributed, role)
106 | self.forward_port = self.config.port
107 | self.proto_version_config_inf = None
108 | if 'tcp' in self.config.name.lower():
109 | self.proto_version_config_inf = TCPConfigInf(
110 | 'tcp', self.config.version or 'cubic')
111 | logging.info("NoOpProtocol initialized for: %s", self.config.name)
112 |
113 | def is_distributed(self) -> bool:
114 | return self.is_distributed_var
115 |
116 | def is_noop(self) -> bool:
117 | return True
118 |
119 | def post_run(self, network: INetwork):
120 | return True
121 |
122 | def pre_run(self, network: INetwork):
123 | if self.proto_version_config_inf:
124 | self.proto_version_config_inf.setup(network)
125 | return True
126 |
127 | def run(self, network: INetwork):
128 | return True
129 |
130 | def stop(self, network: INetwork):
131 | if self.proto_version_config_inf:
132 | self.proto_version_config_inf.restore(network)
133 | return True
134 |
135 | def get_protocol_name(self) -> str:
136 | return self.config.name
137 |
138 | def get_protocol_version(self) -> str:
139 | return self.config.version or ""
140 |
141 | def get_protocol_args(self, network: INetwork) -> str:
142 | return self.protocol_args
143 |
144 | def get_forward_port(self) -> int:
145 | if self.forward_port is not None:
146 | return self.forward_port
147 | return 0
148 |
149 | def get_tun_ip(self, network: 'INetwork', host_id: int) -> str:
150 | return ""
151 |
--------------------------------------------------------------------------------
/src/core/topology.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from enum import IntEnum
3 |
4 | from dataclasses import dataclass, field
5 | from typing import Optional, List
6 | import logging
7 | import os
8 | import json
9 |
10 |
11 | class LinkAttr(IntEnum):
12 | link_loss = 0
13 | link_latency = 1
14 | link_jitter = 2
15 | link_bandwidth_forward = 3
16 | link_bandwidth_backward = 4
17 |
18 |
19 | class MatrixType(IntEnum):
20 | # Adjacency matrix to describe the network topology
21 | ADJACENCY_MATRIX = 0
22 | # Bandwidth matrix to describe the network bandwidth link-by-link
23 | BW_MATRIX = 1
24 | # Loss matrix to describe the network loss link-by-link
25 | LOSS_MATRIX = 2
26 | # Latency matrix to describe the network latency link-by-link
27 | LATENCY_MATRIX = 3
28 | # Jitter matrix to describe the network jitter link-by-link
29 | JITTER_MATRIX = 4
30 |
31 |
32 | # mapping MatrixType to the link attribute except for the adjacency matrix
33 | MatType2LinkAttr = {
34 | MatrixType.LOSS_MATRIX: LinkAttr.link_loss,
35 | MatrixType.LATENCY_MATRIX: LinkAttr.link_latency,
36 | MatrixType.JITTER_MATRIX: LinkAttr.link_jitter,
37 | MatrixType.BW_MATRIX: LinkAttr.link_bandwidth_forward
38 | }
39 |
40 |
41 | class TopologyType(IntEnum):
42 | linear = 0 # Linear chain topology
43 | star = 1 # Star topology
44 | tree = 2 # Complete Binary Tree
45 | butterfly = 3 # Butterfly topology
46 | mesh = 5 # Random Mesh topology
47 |
48 |
49 | @dataclass
50 | class Parameter:
51 | name: str
52 | init_value: List[int]
53 |
54 |
55 | @dataclass
56 | class TopologyConfig:
57 | """Configuration for the network topology.
58 | """
59 | name: str
60 | nodes: int
61 | topology_type: TopologyType
62 | # @array_description: the array description of the topology
63 | array_description: Optional[List[Parameter]] = field(default=None)
64 | # @json_description: the json description of the topology
65 | json_description: Optional[str] = field(default=None)
66 |
67 |
68 | class ITopology(ABC):
69 | def __init__(self, base_path: str, top: TopologyConfig, init_all_mat: bool = True):
70 | self.conf_base_path = base_path
71 | self.all_mats = {}
72 | self.adj_matrix = None
73 | self.top_config = top
74 | self.compound_top = False
75 | self._current_top_index = 0 # keep track of the current topology
76 | # when compound_top is True, the topologies is a list of ITopology;
77 | # otherwise, it is empty.
78 | self.topologies = []
79 | if init_all_mat is True:
80 | self.init_all_mats()
81 |
82 | def __iter__(self):
83 | return iter(self.topologies)
84 |
85 | @abstractmethod
86 | def description(self) -> str:
87 | pass
88 |
89 | def get_next_top(self):
90 | if not self.is_compound():
91 | logging.error("get_next_top() called on a non-compound topology.")
92 | return None
93 | if self._current_top_index >= len(self.topologies):
94 | logging.info("No more compound topologies available.")
95 | return None
96 | top = self.topologies[self._current_top_index]
97 | logging.info("########## Use Oasis compound topology %s.",
98 | self._current_top_index)
99 | self._current_top_index += 1
100 | return top
101 |
102 | def is_compound(self):
103 | return self.compound_top
104 |
105 | @abstractmethod
106 | def generate_adj_matrix(self, num_of_nodes: int):
107 | pass
108 |
109 | @abstractmethod
110 | def generate_other_matrices(self, adj_matrix):
111 | pass
112 |
113 | def get_topology_type(self):
114 | return self.top_config.topology_type
115 |
116 | def get_matrix(self, mat_type: MatrixType):
117 | # when invoked, compound_top is expected to be False
118 | if self.is_compound():
119 | logging.error("Incorrect usage of compound topology get_matrix()")
120 | if mat_type not in self.all_mats:
121 | return None
122 | return self.all_mats[mat_type]
123 |
124 | def init_all_mats(self):
125 | # init from json_description or array_description
126 | if self.top_config.json_description is not None:
127 | logging.info(
128 | 'Load the matrix from json_description')
129 | self.load_all_mats(
130 | self.top_config.json_description)
131 | elif self.top_config.array_description is not None:
132 | logging.info(
133 | 'Load the matrix from array_description')
134 | self.adj_matrix = self.generate_adj_matrix(self.top_config.nodes)
135 | self.all_mats[MatrixType.ADJACENCY_MATRIX] = self.adj_matrix
136 | self.generate_other_matrices(self.adj_matrix)
137 |
138 | def load_all_mats(self, json_file_path):
139 | """Load all matrices from the Json file.
140 | Args:
141 | json_file_path (string): The path of the Json file
142 | which save the matrix.
143 | An example:
144 | src/config/mesh-network.json
145 | """
146 | if json_file_path and not os.path.isabs(json_file_path):
147 | json_file_path = os.path.join(self.conf_base_path, json_file_path)
148 | logging.info(f"Loading matrix from Json file: %s", json_file_path)
149 | if not os.path.exists(json_file_path):
150 | raise ValueError(f"Json File {json_file_path} does not exist.")
151 | with open(json_file_path, 'r', encoding='utf-8') as f:
152 | json_content = json.load(f)
153 | if json_content is None:
154 | raise ValueError("The content of the Json file is None.")
155 | for mat_desc in json_content['data']:
156 | if 'matrix_type' not in mat_desc or 'matrix_data' not in mat_desc:
157 | continue
158 | logging.info(f"Matrix data: %s", mat_desc['matrix_data'])
159 | self.all_mats[mat_desc['matrix_type']] = mat_desc['matrix_data']
160 |
--------------------------------------------------------------------------------
/src/tools/extract_data.py:
--------------------------------------------------------------------------------
1 | from copy import deepcopy
2 | import os
3 | import sys
4 | import logging
5 | import re
6 | from dataclasses import dataclass
7 | from util import (is_same_path, str_to_mbps)
8 | '''
9 | Usage:
10 | sudo python3 src/tools/extract_data.py ./test_results/test1000
11 |
12 | This tool is used to plot the throughput results which are generated by "bats-protocol-rtt-loss-test.yaml"
13 |
14 | Output:
15 | 1. throughput_latency_loss.svg
16 | 2. throughput_latency_loss.csv (CSV file with all the throughput data table)
17 | '''
18 |
19 |
20 | @dataclass
21 | class PerfPointData:
22 | protocol: str
23 | loss_rate: float = 0
24 | latency: int = 0
25 | throughput: float = 0
26 |
27 |
28 | def plot_compound_throughput(test_results_dir):
29 | '''
30 | Plot the throughput of different protocols under different
31 | loss rate and latency according to the test results in ./test_results/test1000/
32 |
33 | '''
34 | protocols = []
35 | perf_point = []
36 | for root, dirs, files in os.walk(test_results_dir):
37 | if not is_same_path(root, test_results_dir):
38 | continue
39 | if len(dirs) == 0:
40 | continue
41 | for dir_name in dirs:
42 | if not dir_name.startswith("topology-"):
43 | continue
44 | topology_description = ""
45 | with open(f"{test_results_dir}/{dir_name}/topology_description.txt", "r", encoding="utf-8") as f:
46 | topology_description = f.read()
47 | logging.info(f"Topology description: %s", topology_description)
48 | # get latency
49 | matches = re.findall(r"latency (\d+)ms", topology_description)
50 | if len(matches) == 0:
51 | logging.error(f"Failed to find latency in %s",
52 | topology_description)
53 | continue
54 | latency = int(matches[0])
55 | logging.info(f"Latency: %s", latency)
56 | # get loss rate
57 | matches = re.findall(r"loss (\d+(\.\d+)?)%", topology_description)
58 | if len(matches) == 0:
59 | logging.error(f"Failed to find loss rate in %s",
60 | topology_description)
61 | continue
62 | loss_rate = float(matches[0][0])
63 | logging.info(f"Loss rate: %s", loss_rate)
64 | for _, dirs, files in os.walk(f"{test_results_dir}/{dir_name}"):
65 | for file_name in files:
66 | if not ("IperfBatsTest" in file_name or "IperfTest" in file_name):
67 | logging.info(
68 | f"Not a IperfBatsTest or IperfTest %s", file_name)
69 | continue
70 | protocol_name = file_name.split("_")[0]
71 | if protocol_name not in protocols:
72 | protocols.append(protocol_name)
73 | perf_data = PerfPointData("", 0, 0, 0)
74 | perf_data.loss_rate = loss_rate
75 | perf_data.latency = latency
76 | perf_data.protocol = protocol_name
77 | with open(f"{test_results_dir}/{dir_name}/{file_name}", "r", encoding="utf-8") as f:
78 | lines = f.readlines()
79 | for line in lines:
80 | if "receiver" not in line:
81 | continue
82 | logging.info(
83 | f"Found receiver line %s", line)
84 | recv_throughput_pattern = r"(\d+(\.\d+)?) (K|M|G)?bits/sec"
85 | matches2 = re.findall(
86 | recv_throughput_pattern, line)
87 | recv_throughput = str_to_mbps(
88 | matches2[0][0], matches2[0][2])
89 | logging.info(
90 | f"Found receiver matches2 %s", recv_throughput)
91 | perf_data.throughput = recv_throughput
92 | break
93 | logging.info(
94 | f"found a perf data %s", perf_data)
95 | perf_point.append(perf_data)
96 |
97 | x = [perf.loss_rate for perf in perf_point]
98 | y = [perf.latency for perf in perf_point]
99 |
100 | xlabel = deepcopy(x)
101 | ylabel = deepcopy(y)
102 | xlabel = list(set(xlabel))
103 | ylabel = list(set(ylabel))
104 | xlabel.sort()
105 | ylabel.sort()
106 |
107 | # all perf point data to CVS file too
108 | with open(f"{test_results_dir}/throughput_latency_loss.csv", "w", encoding="utf-8") as f:
109 | f.write("Loss Rate/Latency,")
110 | for latency in ylabel:
111 | f.write(f"{latency}ms,")
112 | f.write("\n")
113 | for loss_rate in xlabel:
114 | f.write(f"{loss_rate}%,")
115 | for latency in ylabel:
116 | for perf in perf_point:
117 | if perf.loss_rate == loss_rate and perf.latency == latency:
118 | f.write(f"{perf.throughput},")
119 | break
120 | f.write("\n")
121 | # all perf point data to markdown file with table format
122 | with open(f"{test_results_dir}/throughput_latency_loss.md", "a+", encoding="utf-8") as f:
123 | f.write("|Loss Rate/Latency|")
124 | for latency in ylabel:
125 | f.write(f"{latency}ms|")
126 | f.write("\n")
127 | f.write("|---|")
128 | for latency in ylabel:
129 | f.write("---|")
130 | f.write("\n")
131 | for loss_rate in xlabel:
132 | f.write(f"|{loss_rate}%|")
133 | for latency in ylabel:
134 | for perf in perf_point:
135 | if perf.loss_rate == loss_rate and perf.latency == latency:
136 | f.write(f"{perf.throughput}|")
137 | break
138 | f.write("\n")
139 |
140 |
141 | if __name__ == '__main__':
142 | logging.basicConfig(level=logging.INFO)
143 | input_dir = sys.argv[1]
144 | plot_compound_throughput(input_dir)
145 |
--------------------------------------------------------------------------------
/src/testsuites/test_competition.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import random
3 | import time
4 | import multiprocessing
5 | from dataclasses import dataclass, field
6 | from typing import Optional, List
7 |
8 | from interfaces.network import INetwork
9 | from protosuites.proto_info import IProtoInfo
10 | from .test import (ITestSuite, test_type_str_mapping)
11 |
12 |
13 | @dataclass
14 | class FlowParameter:
15 | # valid format for `flow_type`: [protocol]-[cc] or [protocol]
16 | flow_type: str = field(default="tcp")
17 | client_host: int = field(default=0)
18 | server_host: int = field(default=1)
19 | delay: Optional[int] = field(default=None)
20 | # the flow will last for `duration` seconds.
21 | duration: Optional[int] = field(default=None)
22 |
23 |
24 | @dataclass
25 | class FlowCompetitionConfig:
26 | competition_flow: Optional[List[FlowParameter]] = field(default=None)
27 |
28 |
29 | class FlowCompetitionTest(ITestSuite):
30 | '''
31 | Decorator of ITestSuite to run competition flows between hosts in the network.
32 | '''
33 |
34 | def __init__(self, config: FlowCompetitionConfig, test: ITestSuite) -> None:
35 | '''init with a ITestSuite object
36 | '''
37 | super().__init__(test.config)
38 | self.competition_flows = config.competition_flow if config.competition_flow is not None else []
39 | self.test = test
40 | self.min_start = 0
41 | interval = test.config.interval if test.config.interval is not None else 1
42 | interval_num = test.config.interval_num if test.config.interval_num is not None else 10
43 | self.max_interval = interval * interval_num
44 | self.max_start = min(self.max_interval, 10)
45 | self.min_duration = 10
46 | self.max_duration = 20
47 |
48 | def is_competition_test(self) -> bool:
49 | return True
50 |
51 | def _run_test(self, network: INetwork, proto_info: IProtoInfo):
52 | # multiprocessing to run competition flows
53 | processes = []
54 | start_barrier = multiprocessing.Barrier(
55 | len(self.competition_flows) + 1)
56 | for flow in self.competition_flows:
57 | t = multiprocessing.Process(target=self.run_flow,
58 | args=(flow, network, start_barrier))
59 | t.start()
60 | processes.append(t)
61 | # Wait for all processes to be ready
62 | start_barrier.wait()
63 | self.test.run(network, proto_info)
64 | for p in processes:
65 | p.join()
66 | return True
67 |
68 | def run_flow(self, flow, network: INetwork, start_barrier):
69 | start_barrier.wait()
70 | if flow.delay is None or flow.duration is None:
71 | flow.delay = random.uniform(self.min_start, self.max_start)
72 | flow.duration = random.uniform(
73 | self.min_duration, self.max_duration)
74 | if flow.delay + flow.duration > self.max_interval:
75 | flow.duration = self.max_interval - flow.delay
76 | time.sleep(flow.delay)
77 | client = network.get_hosts()[flow.client_host]
78 | server = network.get_hosts()[flow.server_host]
79 | # format the flow_log_name
80 | flow_log_name = self.result.record
81 | if self.config.test_type is not None:
82 | flow_log_name = self.result.result_dir + \
83 | self.base_name + "_" + f"{self.__class__.__name__}_{self.config.name}_{self.config.packet_type}" \
84 | f"_{test_type_str_mapping[self.config.test_type]}" \
85 | f"_h{flow.client_host}_h{flow.server_host}.log"
86 | server_ip = server.IP()
87 | flow_meta_data = f'Competition flow: {client.name()} -> {server.name()} ' \
88 | f', delay:{flow.delay}, duration:{flow.duration}'
89 | cc = None
90 | protocol = flow.flow_type
91 | if '-' in flow.flow_type:
92 | parts = flow.flow_type.split("-")
93 | cc = parts[1] if len(parts) > 1 else None
94 | protocol = parts[0] if len(parts) > 1 else None
95 | flow_meta_data += f', protocol:{protocol}, cc:{cc}'
96 | server.cmd(f'echo \"{flow_meta_data}\" > {flow_log_name}')
97 | server.cmd(f'iperf3 -s -p 5001 --logfile {flow_log_name} &')
98 | logging.info(
99 | "Starting %s, log: %s", flow_meta_data, flow_log_name)
100 | if 'tcp' in flow.flow_type:
101 | if cc is None:
102 | res = client.popen(
103 | f'iperf3 -c {server_ip} -p 5001 -i 1 -t {int(flow.duration)}').stdout.read().decode('utf-8')
104 | else:
105 | res = client.popen(
106 | f'iperf3 -c {server_ip} -p 5001 -i 1 -t '
107 | f'{int(flow.duration)} --congestion {cc}').stdout.read().decode('utf-8')
108 | logging.info('iperf client output: %s', res)
109 | if flow.flow_type in ('btp', 'brtp'):
110 | for intf in server.getIntfs():
111 | bats_iperf_server_cmd = f'bats_iperf -s -p 4000 -I {intf}' \
112 | f' -l {flow_log_name} &'
113 | logging.info(
114 | 'bats_iperf server cmd: %s', bats_iperf_server_cmd)
115 | server.cmd(f'{bats_iperf_server_cmd}')
116 | args_from_proto = None
117 | if flow.flow_type == 'btp':
118 | args_from_proto = '-m 0'
119 | if flow.flow_type == 'brtp':
120 | args_from_proto = '-m 1'
121 | if args_from_proto is None:
122 | logging.error("unrecognized flow type %s", flow.flow_type)
123 | bats_iperf_client_cmd = f'bats_iperf -c {server_ip} {args_from_proto} -p 4000 ' \
124 | f' -i 1 -t {int(flow.duration)}'
125 | logging.info('bats_iperf client cmd: %s', bats_iperf_client_cmd)
126 | res = client.popen(
127 | f'{bats_iperf_client_cmd}').stdout.read().decode('utf-8')
128 | logging.info(
129 | "Finished competition flow: %s -> %s", client.name(), server.name())
130 |
131 | def post_process(self):
132 | # rewrite the self.result.record since the real target is the `self.test`.
133 | self.result.record = self.test.result.record
134 | return True
135 |
136 | def pre_process(self):
137 | return True
138 |
--------------------------------------------------------------------------------
/src/tools/psutil_monitor.py:
--------------------------------------------------------------------------------
1 | from typing import List, Dict, Any
2 | import threading
3 | import time
4 | import logging
5 | import argparse
6 | import psutil
7 |
8 |
9 | class PsutilNetworkMonitor():
10 | """Network monitor implementation using psutil"""
11 |
12 | def __init__(self):
13 | self.interfaces = []
14 | self.interval = 1.0
15 | self.monitoring_thread = None
16 | self.stop_event = threading.Event()
17 | self.results = []
18 | self.is_monitoring = False
19 | self.average_tx_throughput = 0.0
20 | self.average_rx_throughput = 0.0
21 |
22 | def start_monitoring(self, interfaces: List[str], interval: float = 1.0) -> None:
23 | """Start monitoring the specified network interfaces"""
24 | if self.is_monitoring:
25 | logging.warning(
26 | "Network monitoring is already running. Stopping previous session.")
27 | self.stop_monitoring()
28 | all_interfaces = psutil.net_if_addrs()
29 | if not interfaces:
30 | logging.error("No network interfaces specified for monitoring")
31 | return
32 | for iface in all_interfaces:
33 | logging.info(
34 | "========> Available Interface %s ", iface)
35 |
36 | self.interfaces = interfaces
37 | self.interval = interval
38 | self.results = []
39 | self.stop_event.clear()
40 |
41 | self.monitoring_thread = threading.Thread(target=self._monitor_network)
42 | self.monitoring_thread.daemon = True
43 | self.monitoring_thread.start()
44 | self.is_monitoring = True
45 | logging.info(
46 | "Started monitoring interfaces: %s", ', '.join(self.interfaces))
47 |
48 | def stop_monitoring(self) -> None:
49 | """Stop monitoring network interfaces"""
50 | if not self.is_monitoring:
51 | return
52 |
53 | self.stop_event.set()
54 | if self.monitoring_thread and self.monitoring_thread.is_alive():
55 | self.monitoring_thread.join(timeout=2.0)
56 | self.is_monitoring = False
57 | logging.info("Network monitoring stopped")
58 |
59 | def get_results(self) -> Dict[str, Any]:
60 | """Get the monitoring results"""
61 | return {
62 | "interfaces": self.interfaces,
63 | "interval": self.interval,
64 | "measurements": self.results
65 | }
66 |
67 | def write_results_to_file(self, filename: str) -> None:
68 | """Write monitoring results to a file"""
69 | with open(filename, 'a', encoding='utf-8') as f:
70 | f.write("\n=== Network Throughput Results ===\n")
71 | f.write("Timestamp,Interface,Bytes Sent (MB/s),Bytes Received (MB/s)\n")
72 | f.write(
73 | f"Average TX Throughput: {self.average_tx_throughput:.3f} MB/s\n")
74 | f.write(
75 | f"Average RX Throughput: {self.average_rx_throughput:.3f} MB/s\n")
76 | for entry in self.results:
77 | f.write(
78 | f"{entry['timestamp']},{entry['interface']},{entry['bytes_sent']:.3f},{entry['bytes_recv']:.3f}\n")
79 |
80 | def _monitor_network(self) -> None:
81 | """Internal method to monitor network traffic"""
82 | previous_counters = psutil.net_io_counters(pernic=True)
83 |
84 | while not self.stop_event.is_set():
85 | time.sleep(self.interval)
86 | try:
87 | current_counters = psutil.net_io_counters(pernic=True)
88 | timestamp = time.strftime("%Y-%m-%d %H:%M:%S")
89 |
90 | for iface in self.interfaces:
91 | if iface in previous_counters and iface in current_counters:
92 | bytes_sent = (current_counters[iface].bytes_sent -
93 | previous_counters[iface].bytes_sent) / self.interval / (1024 * 1024)
94 | bytes_recv = (current_counters[iface].bytes_recv -
95 | previous_counters[iface].bytes_recv) / self.interval / (1024 * 1024)
96 | self.average_tx_throughput = (
97 | self.average_tx_throughput + bytes_sent) / 2
98 | self.average_rx_throughput = (
99 | self.average_rx_throughput + bytes_recv) / 2
100 | # Store results
101 | self.results.append({
102 | "timestamp": timestamp,
103 | "interface": iface,
104 | "bytes_sent": bytes_sent,
105 | "bytes_recv": bytes_recv
106 | })
107 | logging.info(
108 | "Interface %s: Send: %.3f MB/s, Receive: %.3f MB/s", iface, bytes_sent, bytes_recv)
109 | previous_counters = current_counters
110 | except Exception as e:
111 | logging.error("Error monitoring network: %s", e)
112 |
113 |
114 | if __name__ == '__main__':
115 | logging.basicConfig(level=logging.INFO)
116 |
117 | parser = argparse.ArgumentParser(
118 | description='Monitor network throughput using psutil')
119 | parser.add_argument(
120 | '--interfaces', nargs='+', required=True,
121 | help='List of network interfaces to monitor')
122 | parser.add_argument(
123 | '--interval', type=float, default=1.0,
124 | help='Interval in seconds for monitoring (default: 1.0)')
125 | parser.add_argument(
126 | '--monitoring_time', type=int, default=10,
127 | help='Time in seconds to monitor (default: 10)')
128 | parser.add_argument(
129 | '--output', type=str, default='network_monitoring_results.csv',
130 | help='Output file name for monitoring results (default: network_monitoring_results.csv)')
131 | monitor = PsutilNetworkMonitor()
132 | monitor.start_monitoring(
133 | interfaces=parser.parse_args().interfaces,
134 | interval=parser.parse_args().interval)
135 | logging.info("Monitoring started on interfaces: %s",
136 | monitor.interfaces)
137 | time.sleep(parser.parse_args().monitoring_time)
138 | monitor.stop_monitoring()
139 | results = monitor.get_results()
140 | monitor.write_results_to_file(parser.parse_args().output)
141 | logging.info("Monitoring results saved to %s", parser.parse_args().output)
142 |
--------------------------------------------------------------------------------
/src/start.py:
--------------------------------------------------------------------------------
1 | import os
2 | import sys
3 | import argparse
4 | import logging
5 |
6 | from var.global_var import g_root_path
7 | from tools.util import parse_test_file_name
8 | from containernet.containernet import (
9 | NestedContainernet, load_nested_config)
10 |
11 | """ ################# USAGE OF THE SCRIPT ##########################
12 | start.py is used to initialize the nested containernet environment,
13 | and inside the nested containernet, it will execute the test cases
14 | through the run_test.py script.
15 | """
16 |
17 |
18 | def parse_args():
19 | """
20 | Parse command line arguments.
21 | """
22 | parser = argparse.ArgumentParser()
23 | parser.add_argument('--containernet',
24 | help='nested containernet name in the YAML file',
25 | dest='containernet',
26 | type=str,
27 | default="")
28 | parser.add_argument('--testbed',
29 | help='The name of testbed that will be used',
30 | dest='testbed',
31 | type=str,
32 | default="")
33 | parser.add_argument('-t',
34 | help='YAML file for the test case to be executed',
35 | dest='tests_config_file',
36 | type=str,
37 | default="")
38 | parser.add_argument('-p',
39 | help='base path of all the YAML files',
40 | dest='yaml_base_path',
41 | type=str,
42 | default="")
43 | parser.add_argument('-d',
44 | help='enable debug mode',
45 | dest='debug_log',
46 | type=str,
47 | default='False')
48 | parser.add_argument('--halt',
49 | help='halt when test is done',
50 | dest='halt',
51 | type=str,
52 | default="False")
53 | return parser
54 |
55 |
56 | def build_nested_env(containernet_name, yaml_base_path_input, oasis_workspace_input):
57 | # join cur_workspace with nested_config_file
58 | absolute_path_of_config_file = os.path.join(
59 | yaml_base_path_input + "/", 'nested-containernet-config.yaml')
60 | if not os.path.exists(f'{absolute_path_of_config_file}'):
61 | logging.info(f"Error: %s does not exist.", {
62 | absolute_path_of_config_file})
63 | return None
64 | nested_config = load_nested_config(
65 | absolute_path_of_config_file, containernet_name)
66 | if nested_config.image == "":
67 | logging.info(
68 | f"Error: %s is not in the nested config file.", containernet_name)
69 | sys.exit(1)
70 | test_name = "default "
71 | # execute the test cases on nested containernet
72 | return NestedContainernet(
73 | nested_config, yaml_base_path_input, oasis_workspace_input, test_name)
74 |
75 |
76 | if __name__ == '__main__':
77 | local_parser = parse_args()
78 | ns, args = local_parser.parse_known_args()
79 | cur_test_yaml_file = ns.tests_config_file
80 | nested_containernet = ns.containernet
81 | baremetal_testbed = ns.testbed
82 | yaml_base_path = ns.yaml_base_path
83 | debug_log = ns.debug_log
84 | halt = ns.halt
85 | if debug_log == 'True':
86 | logging.basicConfig(level=logging.DEBUG, format='%(asctime)s %(levelname)s: %(message)s',
87 | datefmt='%Y-%m-%d %H:%M:%S')
88 | logging.info("Debug mode is enabled.")
89 | else:
90 | logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s: %(message)s',
91 | datefmt='%Y-%m-%d %H:%M:%S')
92 | logging.info("Debug mode is disabled.")
93 | current_process_dir = os.getcwd()
94 | logging.info(f"Current directory the process: %s", current_process_dir)
95 |
96 | base_path = os.path.dirname(os.path.abspath(__file__))
97 | oasis_workspace = os.path.dirname(base_path)
98 | logging.info(f"Base path of the oasis project: %s", oasis_workspace)
99 | if current_process_dir == oasis_workspace:
100 | logging.info("running in the workspace directory of oasis")
101 | else:
102 | logging.info("running outside the workspace directory of oasis")
103 | # ############### workspace dir and process dir ################
104 | # (1) python source files are always started from the `oasis_workspace`
105 | # (2) yaml/json configuration files are always started from the `yaml_base_path`
106 | # ##############################################################
107 | cur_test_yaml_file, _ = parse_test_file_name(cur_test_yaml_file)
108 | if not cur_test_yaml_file:
109 | logging.info("Error: invalid test file name.")
110 | sys.exit(1)
111 | # check whether yaml_base_path is an absolute path
112 | if not os.path.isabs(yaml_base_path):
113 | yaml_base_path = os.path.join(current_process_dir, yaml_base_path)
114 | test_case_file = os.path.join(
115 | yaml_base_path, cur_test_yaml_file)
116 | if not os.path.exists(f'{test_case_file}'):
117 | logging.info(f"Error: %s does not exist.", {test_case_file})
118 | sys.exit(1)
119 | # @Note: Oasis support test on containernet and bare metal testbed.
120 | # For containernet, network is constructed and maintained by containernet.
121 | # For bare metal testbed, network is constructed by real physical machines.
122 | if nested_containernet == "" and baremetal_testbed == "":
123 | logging.info(
124 | "Error: neither nested_containernet nor baremetal_testbed is provided.")
125 | sys.exit(1)
126 | if nested_containernet != "" and baremetal_testbed == "":
127 | logging.info("Oasis is running on nested containernet.")
128 | if nested_containernet == "" and baremetal_testbed != "":
129 | logging.info("Oasis is running on baremetal testbed [%s].",
130 | baremetal_testbed)
131 | # Both modes need the nested containernet due to mininet dependencies
132 | nested_env = build_nested_env(
133 | nested_containernet, yaml_base_path, oasis_workspace)
134 | if not nested_env:
135 | logging.info("Error: failed to build the nested containernet.")
136 | sys.exit(1)
137 | nested_env.start()
138 | nested_env.execute(
139 | f"python3 {g_root_path}src/run_test.py {yaml_base_path} {oasis_workspace} "
140 | f"{ns.tests_config_file} {debug_log} {halt}")
141 | nested_env.stop()
142 |
--------------------------------------------------------------------------------
/src/data_analyzer/rtt_analyzer.py:
--------------------------------------------------------------------------------
1 | import logging
2 | import re
3 | import os
4 | import matplotlib.pyplot as plt
5 | import numpy as np
6 | from .analyzer import IDataAnalyzer
7 |
8 |
9 | class RTTAnalyzer(IDataAnalyzer):
10 | """Analyze and visualize multiple input rtt logs.
11 | """
12 |
13 | def analyze(self):
14 | return True
15 |
16 | def visualize(self):
17 | """
18 | plot rtt graph
19 | """
20 | data_rtt_agv10 = {}
21 | data_rtt_all = {}
22 | plt.clf()
23 | plt.rcParams['font.family'] = 'serif'
24 | plt.xlabel('Time (ms)', fontsize=8,
25 | fontweight='bold')
26 | plt.ylabel('RTT (ms)', fontsize=8,
27 | fontweight='bold')
28 | default_title = "Average RTT for each consecutive 10 packets\n"
29 | default_title += self.config.subtitle
30 | plt.title(default_title, fontsize=10, fontweight="bold")
31 | max_lines = 0
32 | x = None
33 | for input_log in self.config.input:
34 | logging.info(f"Visualize rtt log: %s", input_log)
35 | if not os.path.exists(input_log):
36 | logging.error("rtt log file %s not found", input_log)
37 | continue
38 | log_base_name = os.path.basename(input_log)
39 | data_rtt_agv10[log_base_name] = []
40 | data_rtt_all[log_base_name] = []
41 | with open(f"{input_log}", "r", encoding='utf-8') as f:
42 | lines = f.readlines()
43 | start_time = 0
44 | end_time = 0
45 | interval = 0.00
46 | for line in lines:
47 | if "Start timestamp:" in line:
48 | start_time = int(re.findall(
49 | r'Start timestamp: (\d+)', line)[0])
50 | if "End timestamp:" in line:
51 | end_time = int(re.findall(
52 | r'End timestamp: (\d+)', line)[0])
53 | if "Interval:" in line:
54 | interval_list = re.findall(
55 | r'Interval: (\d+\.\d+)', line)
56 | if interval_list:
57 | interval = float(interval_list[0])
58 | average_10 = self.find_agv10_rtt(line)
59 | if average_10 is not None:
60 | data_rtt_agv10[log_base_name].append(average_10)
61 | per_packet_rtt = self.find_per_packet_rtt(line)
62 | if per_packet_rtt is not None:
63 | data_rtt_all[log_base_name].append(per_packet_rtt)
64 | if len(data_rtt_agv10[log_base_name]) == 0:
65 | logging.warning(
66 | f"no avg10 data in %s, lines %s", log_base_name, len(lines))
67 | continue
68 | if len(data_rtt_all[log_base_name]) == 0:
69 | logging.warning(
70 | f"no per packet rtt data in %s, lines %s", log_base_name, len(lines))
71 | continue
72 | if max_lines == 0:
73 | max_lines = len(data_rtt_agv10[log_base_name])
74 | if len(data_rtt_agv10[log_base_name]) < max_lines:
75 | # append 5000 instead
76 | data_rtt_agv10[log_base_name] += [5000] * \
77 | (max_lines - len(data_rtt_agv10[log_base_name]))
78 | # start plot x-y graph: x is time in ms,
79 | log_label = log_base_name.split("_")[0]
80 | # set x range
81 | if x is None:
82 | x = np.arange(0, (end_time - start_time) *
83 | 1000, int(10*interval*1000))
84 | valid_point_num = min(len(x), len(
85 | data_rtt_agv10[log_base_name]))
86 | # logging.info(f"plot : {log_file} valid_point_num: {valid_point_num}")
87 | plt.plot(x[:valid_point_num], data_rtt_agv10[log_base_name][:valid_point_num],
88 | label=f"{log_label}")
89 | plt.legend(loc='upper left', fontsize=8)
90 | if x is None:
91 | logging.warning("no data to plot")
92 | return
93 | if not self.config.output:
94 | self.config.output = "rtt.svg"
95 | if '.svg' not in self.config.output:
96 | plt.savefig(f"{self.config.output}rtt.svg")
97 | logging.info("Visualize rtt diagram saved to %s",
98 | self.config.output)
99 | else:
100 | plt.savefig(f"{self.config.output}")
101 | logging.info("Visualize rtt diagram saved to %s",
102 | self.config.output)
103 | self.plot_rtt_cdf(data_rtt_all)
104 |
105 | def find_agv10_rtt(self, line):
106 | """
107 | find the average 10 rtt from the log lines
108 | """
109 | average_10 = re.findall(r'average 10: (\d+)', line)
110 | if len(average_10) != 0:
111 | average_10 = int(average_10[0])
112 | average_10 = min(average_10, 5000)
113 | return average_10
114 | return None
115 |
116 | def find_per_packet_rtt(self, line):
117 | """
118 | find the per packet rtt from the log lines
119 | """
120 | per_packet_rtt = re.findall(r'new_rtt = (\d+)', line)
121 | if len(per_packet_rtt) != 0:
122 | per_packet_rtt = int(per_packet_rtt[0])
123 | per_packet_rtt = min(per_packet_rtt, 5000)
124 | return per_packet_rtt
125 | return None
126 |
127 | def plot_rtt_cdf(self, rtt_data: dict):
128 | plt.clf()
129 | plt.rcParams['font.family'] = 'serif'
130 | plt.xlabel('RTT (ms)', fontsize=8,
131 | fontweight='bold')
132 | plt.ylabel('Cumulative Probability', fontsize=8,
133 | fontweight='bold')
134 | default_title = "TCP messages RTT CDF\n"
135 | default_title += self.config.subtitle
136 | plt.title(default_title, fontsize=10, fontweight="bold")
137 | for log_base_name, rtt_list in rtt_data.items():
138 | if len(rtt_list) == 0:
139 | logging.warning(
140 | f"no per packet rtt data in %s", log_base_name)
141 | continue
142 | # Sort the RTT values
143 | rtt_list.sort()
144 | # Calculate the cumulative probabilities
145 | cdf = np.arange(1, len(rtt_list) + 1) / len(rtt_list)
146 | # Plot the CDF
147 | log_label = log_base_name.split("_")[0]
148 | plt.plot(rtt_list, cdf, label=f"{log_label}")
149 | plt.legend(loc='lower right', fontsize=8)
150 | # Save the plot to svg file
151 | if '.svg' not in self.config.output:
152 | plt.savefig(f"{self.config.output}rtt_cdf.svg")
153 | logging.info("Visualize RTT CDF diagram saved to %s",
154 | self.config.output)
155 | else:
156 | path = os.path.dirname(self.config.output)
157 | plt.savefig(f"{path}rtt_cdf.svg")
158 | logging.info("Visualize RTT CDF diagram saved to %s",
159 | path)
160 |
--------------------------------------------------------------------------------