├── setup.cfg ├── requirements.txt ├── .gitignore ├── .travis.yml ├── empty.env ├── cert_config.txt ├── result.py ├── unique_random_slugs.py ├── web ├── cleanup.sh ├── Caddyfile ├── styles.css ├── index.html └── script.js ├── LICENSE.md ├── .github ├── dependabot.yml ├── workflows │ ├── check.yml │ ├── deploy.yml │ ├── cleanup.yml │ ├── aggregate.py │ └── interop.yml └── FUNDING.yml ├── implementations.py ├── pull.py ├── certs.sh ├── implementations.json ├── docker-compose.yml ├── run.py ├── trace.py ├── README.md ├── interop.py └── testcases.py /setup.cfg: -------------------------------------------------------------------------------- 1 | [flake8] 2 | ignore=E501,W503 3 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | pycryptodome 2 | random-slugs 3 | termcolor 4 | prettytable 5 | pyshark 6 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | certs/ 2 | logs/ 3 | logs_*/ 4 | *.json 5 | !implementations.json 6 | web/latest 7 | 8 | *.egg-info/ 9 | __pycache__ 10 | build/ 11 | dist/ 12 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | dist: xenial 2 | install: pip install -r requirements.txt black flake8 isort 3 | language: python 4 | python: "3.8" 5 | script: .travis/script 6 | sudo: true 7 | -------------------------------------------------------------------------------- /empty.env: -------------------------------------------------------------------------------- 1 | CLIENT_PARAMS="" 2 | CERTS="" 3 | CRON="" 4 | DOWNLOADS="" 5 | IPERF_CONGESTION="" 6 | QLOGDIR="" 7 | REQUESTS="" 8 | SCENARIO="" 9 | SERVER_PARAMS="" 10 | SSLKEYLOGFILE="" 11 | TESTCASE_CLIENT="" 12 | TESTCASE_SERVER="" 13 | WWW="" 14 | WAITFORSERVER="" 15 | -------------------------------------------------------------------------------- /cert_config.txt: -------------------------------------------------------------------------------- 1 | [ req ] 2 | distinguished_name = req_distinguished_name 3 | x509_extensions = v3_ca 4 | dirstring_type = nobmp 5 | [ req_distinguished_name ] 6 | [ v3_ca ] 7 | keyUsage=critical, keyCertSign 8 | subjectKeyIdentifier=hash 9 | authorityKeyIdentifier=keyid:always,issuer:always 10 | basicConstraints=critical,CA:TRUE,pathlen:100 11 | -------------------------------------------------------------------------------- /result.py: -------------------------------------------------------------------------------- 1 | from enum import Enum 2 | 3 | 4 | class TestResult(Enum): 5 | SUCCEEDED = "succeeded" 6 | FAILED = "failed" 7 | UNSUPPORTED = "unsupported" 8 | 9 | def symbol(self): 10 | if self == TestResult.SUCCEEDED: 11 | return "✓" 12 | elif self == TestResult.FAILED: 13 | return "✕" 14 | elif self == TestResult.UNSUPPORTED: 15 | return "?" 16 | -------------------------------------------------------------------------------- /unique_random_slugs.py: -------------------------------------------------------------------------------- 1 | from random_slugs import generate_slug as original_generate_slug 2 | 3 | _used_slugs = set() 4 | 5 | 6 | def generate_slug(): 7 | for _ in range(100000): 8 | slug = original_generate_slug() 9 | if slug not in _used_slugs: 10 | _used_slugs.add(slug) 11 | return slug 12 | raise ValueError("Unable to generate unique slug after 100k attempts") 13 | -------------------------------------------------------------------------------- /web/cleanup.sh: -------------------------------------------------------------------------------- 1 | #!/bin/bash 2 | 3 | # This script is used to delete old logs from the log directory. 4 | # It removes log directories it deletes from logs.json. 5 | 6 | die() { 7 | echo "$0 " 8 | exit 1 9 | } 10 | 11 | if [ -z "$1" ] || [ -z "$2" ]; then 12 | die 13 | fi 14 | 15 | LOGDIR=$1 16 | AGE=$2 17 | 18 | find "$LOGDIR" -maxdepth 1 -type d -mtime "+$AGE" | while read -r line; do 19 | DIR=$(basename "$line") 20 | echo "Deleting $DIR" 21 | jq ". - [ \"$DIR\" ]" "$LOGDIR/logs.json" | sponge "$LOGDIR/logs.json" 22 | rm -rf "$line" 23 | done 24 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | Copyright 2019 Jana Iyengar, Marten Seemann 2 | 3 | Licensed under the Apache License, Version 2.0 (the "License"); 4 | you may not use this file except in compliance with the License. 5 | You may obtain a copy of the License at 6 | 7 | http://www.apache.org/licenses/LICENSE-2.0 8 | 9 | Unless required by applicable law or agreed to in writing, software 10 | distributed under the License is distributed on an "AS IS" BASIS, 11 | WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 12 | See the License for the specific language governing permissions and 13 | limitations under the License. 14 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | # To get started with Dependabot version updates, you'll need to specify which 2 | # package ecosystems to update and where the package manifests are located. 3 | # Please see the documentation for all configuration options: 4 | # https://docs.github.com/code-security/dependabot/dependabot-version-updates/configuration-options-for-the-dependabot.yml-file 5 | 6 | version: 2 7 | updates: 8 | - package-ecosystem: "github-actions" 9 | directory: "/" 10 | schedule: 11 | interval: "weekly" 12 | - package-ecosystem: "pip" 13 | directory: "/" 14 | schedule: 15 | interval: "weekly" 16 | -------------------------------------------------------------------------------- /web/Caddyfile: -------------------------------------------------------------------------------- 1 | interop.seemann.io:443 2 | 3 | # The website must be mounted at /var/www/web. 4 | # The log directory must be mounted at /var/www/logs. 5 | root /logs/* /var/www/ 6 | root * /var/www/web 7 | 8 | # Allow external sites to download our JSON files. 9 | @json { 10 | path *.json 11 | } 12 | handle @json { 13 | header Access-Control-Allow-Origin "*" 14 | } 15 | 16 | # Allow external sites to download our qlog files. 17 | @qlog { 18 | path *.qlog 19 | } 20 | handle @qlog { 21 | header Access-Control-Allow-Origin "*" 22 | } 23 | 24 | # Allow external sites to download our sqlog files. 25 | @sqlog { 26 | path *.sqlog 27 | } 28 | handle @sqlog { 29 | header Access-Control-Allow-Origin "*" 30 | } 31 | 32 | file_server browse 33 | -------------------------------------------------------------------------------- /.github/workflows/check.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | jobs: 4 | check: 5 | runs-on: ubuntu-latest 6 | steps: 7 | - uses: actions/checkout@v6 8 | - uses: actions/setup-python@v6 9 | - name: install tools 10 | run: pip install flake8 black 11 | - name: enforce coding styles using flake8 12 | run: flake8 . 13 | - name: run Black linter 14 | if: success() || failure() # run this step even if the previous one failed 15 | run: black --check --diff . 16 | - name: check that implementations.json is valid 17 | if: success() || failure() # run this step even if the previous one failed 18 | run: python implementations.py 19 | - uses: ludeeus/action-shellcheck@master 20 | -------------------------------------------------------------------------------- /implementations.py: -------------------------------------------------------------------------------- 1 | import json 2 | from enum import Enum 3 | 4 | IMPLEMENTATIONS = {} 5 | 6 | 7 | class Role(Enum): 8 | BOTH = "both" 9 | SERVER = "server" 10 | CLIENT = "client" 11 | 12 | 13 | with open("implementations.json", "r") as f: 14 | data = json.load(f) 15 | for name, val in data.items(): 16 | IMPLEMENTATIONS[name] = {"image": val["image"], "url": val["url"]} 17 | role = val["role"] 18 | if role == "server": 19 | IMPLEMENTATIONS[name]["role"] = Role.SERVER 20 | elif role == "client": 21 | IMPLEMENTATIONS[name]["role"] = Role.CLIENT 22 | elif role == "both": 23 | IMPLEMENTATIONS[name]["role"] = Role.BOTH 24 | else: 25 | raise Exception("unknown role: " + role) 26 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: [marten-seemann] # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | lfx_crowdfunding: # Replace with a single LFX Crowdfunding project-name e.g., cloud-foundry 13 | custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 14 | -------------------------------------------------------------------------------- /web/styles.css: -------------------------------------------------------------------------------- 1 | body.loading table.result, 2 | body.loading .navbar-nav .start-time, 3 | body.loading .navbar-nav .duration, 4 | body.loading .navbar-nav .end-time { 5 | opacity: 0.3; 6 | } 7 | 8 | .btn-xs { 9 | margin: 0 .1rem .1rem 0; 10 | padding: .01rem .025rem .01rem .025rem; 11 | font-size: .7rem; 12 | border-width: 2px; 13 | border-radius: 4px; 14 | font-weight: bold; 15 | } 16 | 17 | .btn-highlight { 18 | background-color: yellow !important; 19 | color: black !important; 20 | } 21 | 22 | .hover-xy { 23 | background-color: #ecf0f1; 24 | color: #e74c3c; 25 | } 26 | 27 | .table th { 28 | vertical-align: middle; 29 | } 30 | tr.hover-xy > th, th.hover-xy { 31 | background-color: #cfd9db; 32 | color: #e74c3c; 33 | } 34 | 35 | .sponsor { 36 | width: max-content; 37 | 38 | .btn { 39 | color: black; 40 | text-decoration: none; 41 | margin-bottom: 5px; 42 | 43 | i { 44 | margin-right: 5px; 45 | } 46 | } 47 | } 48 | -------------------------------------------------------------------------------- /pull.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import os 3 | import sys 4 | 5 | from implementations import IMPLEMENTATIONS 6 | 7 | print("Pulling the simulator...") 8 | os.system("docker pull martenseemann/quic-network-simulator") 9 | 10 | print("\nPulling the iperf endpoint...") 11 | os.system("docker pull martenseemann/quic-interop-iperf-endpoint") 12 | 13 | 14 | def get_args(): 15 | parser = argparse.ArgumentParser() 16 | parser.add_argument("-i", "--implementations", help="implementations to pull") 17 | return parser.parse_args() 18 | 19 | 20 | implementations = {} 21 | if get_args().implementations: 22 | for s in get_args().implementations.split(","): 23 | if s not in [n for n, _ in IMPLEMENTATIONS.items()]: 24 | sys.exit("implementation " + s + " not found.") 25 | implementations[s] = IMPLEMENTATIONS[s] 26 | else: 27 | implementations = IMPLEMENTATIONS 28 | 29 | for name, value in implementations.items(): 30 | print("\nPulling " + name + "...") 31 | os.system("docker pull " + value["image"]) 32 | -------------------------------------------------------------------------------- /.github/workflows/deploy.yml: -------------------------------------------------------------------------------- 1 | name: Deploy website 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | 8 | jobs: 9 | deploy: 10 | runs-on: ubuntu-latest 11 | steps: 12 | - uses: actions/checkout@v6 13 | - name: Upload website to interop.seemann.io 14 | uses: burnett01/rsync-deployments@33214bd98ba4ac2be90f5976672b3f030fce9ce4 # v7.1.0 15 | with: 16 | switches: -avzr --delete 17 | path: web/ 18 | remote_path: ${{ secrets.INTEROP_SEEMANN_IO_WEBSITE_DIR }} 19 | remote_host: interop.seemann.io 20 | remote_user: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 21 | remote_key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 22 | - name: Restart server 23 | uses: appleboy/ssh-action@823bd89e131d8d508129f9443cad5855e9ba96f0 # v1.2.4 24 | with: 25 | host: interop.seemann.io 26 | username: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 27 | key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 28 | script: service website restart 29 | -------------------------------------------------------------------------------- /.github/workflows/cleanup.yml: -------------------------------------------------------------------------------- 1 | name: cleanup 2 | on: 3 | schedule: 4 | - cron: "0 */8 * * *" # every 8h 5 | 6 | jobs: 7 | cleanup: 8 | runs-on: ubuntu-latest 9 | steps: 10 | - name: Delete old logs on the server 11 | uses: appleboy/ssh-action@823bd89e131d8d508129f9443cad5855e9ba96f0 # v1.2.4 12 | with: 13 | host: interop.seemann.io 14 | username: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 15 | key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 16 | script: | 17 | delete_oldest_folder() { 18 | OLDEST_DIR=$(find "${{ vars.LOG_DIR }}" -mindepth 1 -maxdepth 1 -type d -printf '%T+ %p\n' | sort | head -n 1 | cut -d" " -f2-) 19 | if [[ -n "$OLDEST_DIR" ]]; then 20 | echo "Deleting oldest directory: $OLDEST_DIR" 21 | rm -rf "$OLDEST_DIR" 22 | fi 23 | } 24 | 25 | # Loop until enough space is available or no directories left to delete 26 | while true; do 27 | AVAILABLE_SPACE_GB=$(df -BG "${{ vars.LOG_DIR }}" | tail -n 1 | awk '{print $4}' | sed 's/G//') 28 | echo "Available Space: $AVAILABLE_SPACE_GB GB" 29 | 30 | if [[ "$AVAILABLE_SPACE_GB" -lt 50 ]]; then 31 | echo "Less than 50 GB available. Trying to clean up..." 32 | delete_oldest_folder 33 | else 34 | echo "Enough space available." 35 | break 36 | fi 37 | done 38 | 39 | TEMP_FILE=$(mktemp) 40 | find "${{ vars.LOG_DIR }}" -mindepth 1 -maxdepth 1 -type d -not -name 'lost+found' -exec basename {} \; | sort > "$TEMP_FILE" 41 | jq -R -s 'split("\n") | map(select(. != ""))' "$TEMP_FILE" > "${{ vars.LOG_DIR }}/logs.json" 42 | rm -f "$TEMP_FILE" 43 | -------------------------------------------------------------------------------- /certs.sh: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env bash 2 | 3 | set -e 4 | 5 | if [ -z "$1" ] || [ -z "$2" ]; then 6 | echo "$0 " 7 | exit 1 8 | fi 9 | 10 | CERTDIR=$1 11 | CHAINLEN=$2 12 | 13 | mkdir -p "$CERTDIR" || true 14 | 15 | # Generate Root CA and certificate 16 | openssl ecparam -name prime256v1 -genkey -out "$CERTDIR"/ca_0.key 17 | openssl req -x509 -sha256 -nodes -days 10 -key "$CERTDIR"/ca_0.key \ 18 | -out "$CERTDIR"/cert_0.pem \ 19 | -subj "/O=interop runner Root Certificate Authority/" \ 20 | -config cert_config.txt \ 21 | -extensions v3_ca \ 22 | 2>/dev/null 23 | 24 | # Inflate certificate for the amplification test 25 | fakedns="" 26 | if [ "$CHAINLEN" != "1" ]; then 27 | for i in $(seq 1 20); do 28 | fakedns="$fakedns,DNS:$(LC_CTYPE=C tr -dc '[:alnum:]' /dev/null 43 | 44 | # Sign the certificate 45 | j=$((i - 1)) 46 | if [[ $i < "$CHAINLEN" ]]; then 47 | openssl x509 -req -sha256 -days 10 -in "$CERTDIR"/cert.csr -out "$CERTDIR"/cert_"$i".pem \ 48 | -CA "$CERTDIR"/cert_"$j".pem -CAkey "$CERTDIR"/ca_"$j".key -CAcreateserial \ 49 | -extfile cert_config.txt \ 50 | -extensions v3_ca \ 51 | 2>/dev/null 52 | else 53 | openssl x509 -req -sha256 -days 10 -in "$CERTDIR"/cert.csr -out "$CERTDIR"/cert_"$i".pem \ 54 | -CA "$CERTDIR"/cert_"$j".pem -CAkey "$CERTDIR"/ca_"$j".key -CAcreateserial \ 55 | -extfile <(printf "subjectAltName=DNS:server,DNS:server4,DNS:server6,DNS:server46%s" "$fakedns") \ 56 | 2>/dev/null 57 | fi 58 | done 59 | 60 | mv "$CERTDIR"/cert_0.pem "$CERTDIR"/ca.pem 61 | cp "$CERTDIR"/ca_"$CHAINLEN".key "$CERTDIR"/priv.key 62 | 63 | # combine certificates 64 | for i in $(seq "$CHAINLEN" -1 1); do 65 | cat "$CERTDIR"/cert_"$i".pem >>"$CERTDIR"/cert.pem 66 | rm "$CERTDIR"/cert_"$i".pem "$CERTDIR"/ca_"$i".key 67 | done 68 | rm -f "$CERTDIR"/*.srl "$CERTDIR"/ca_0.key "$CERTDIR"/cert.csr 69 | -------------------------------------------------------------------------------- /.github/workflows/aggregate.py: -------------------------------------------------------------------------------- 1 | import argparse 2 | import json 3 | import sys 4 | 5 | 6 | def get_args(): 7 | parser = argparse.ArgumentParser() 8 | parser.add_argument( 9 | "-s", "--server", help="server implementations (comma-separated)" 10 | ) 11 | parser.add_argument( 12 | "-c", "--client", help="client implementations (comma-separated)" 13 | ) 14 | parser.add_argument("-t", "--start-time", help="start time") 15 | parser.add_argument("-l", "--log-dir", help="log directory") 16 | parser.add_argument("-o", "--output", help="output file (stdout if not set)") 17 | return parser.parse_args() 18 | 19 | 20 | servers = get_args().server.split(",") 21 | clients = get_args().client.split(",") 22 | result = { 23 | "servers": servers, 24 | "clients": clients, 25 | "log_dir": get_args().log_dir, 26 | "start_time": int(get_args().start_time), 27 | "results": [], 28 | "measurements": [], 29 | "tests": {}, 30 | "urls": {}, 31 | } 32 | 33 | 34 | def parse(server: str, client: str, cat: str): 35 | filename = server + "_" + client + "_" + cat + ".json" 36 | try: 37 | with open(filename) as f: 38 | data = json.load(f) 39 | except IOError: 40 | print("Warning: Couldn't open file " + filename) 41 | result[cat].append([]) 42 | return 43 | parse_data(server, client, cat, data) 44 | 45 | 46 | def parse_data(server: str, client: str, cat: str, data: object): 47 | if len(data["servers"]) != 1: 48 | sys.exit("expected exactly one server") 49 | if data["servers"][0] != server: 50 | sys.exit("inconsistent server") 51 | if len(data["clients"]) != 1: 52 | sys.exit("expected exactly one client") 53 | if data["clients"][0] != client: 54 | sys.exit("inconsistent client") 55 | if "end_time" not in result or data["end_time"] > result["end_time"]: 56 | result["end_time"] = data["end_time"] 57 | result[cat].append(data[cat][0]) 58 | result["quic_draft"] = data["quic_draft"] 59 | result["quic_version"] = data["quic_version"] 60 | result["urls"].update(data["urls"]) 61 | result["tests"].update(data["tests"]) 62 | 63 | 64 | for client in clients: 65 | for server in servers: 66 | parse(server, client, "results") 67 | parse(server, client, "measurements") 68 | 69 | if get_args().output: 70 | f = open(get_args().output, "w") 71 | json.dump(result, f) 72 | f.close() 73 | else: 74 | print(json.dumps(result)) 75 | -------------------------------------------------------------------------------- /implementations.json: -------------------------------------------------------------------------------- 1 | { 2 | "quic-go": { 3 | "image": "martenseemann/quic-go-interop:latest", 4 | "url": "https://github.com/quic-go/quic-go", 5 | "role": "both" 6 | }, 7 | "ngtcp2": { 8 | "image": "ghcr.io/ngtcp2/ngtcp2-interop:latest", 9 | "url": "https://github.com/ngtcp2/ngtcp2", 10 | "role": "both" 11 | }, 12 | "mvfst": { 13 | "image": "ghcr.io/facebook/proxygen/mvfst-interop:latest", 14 | "url": "https://github.com/facebookincubator/mvfst", 15 | "role": "both" 16 | }, 17 | "quiche": { 18 | "image": "cloudflare/quiche-qns:latest", 19 | "url": "https://github.com/cloudflare/quiche", 20 | "role": "both" 21 | }, 22 | "kwik": { 23 | "image": "peterdoornbosch/kwik_n_flupke-interop", 24 | "url": "https://github.com/ptrd/kwik", 25 | "role": "both" 26 | }, 27 | "picoquic": { 28 | "image": "privateoctopus/picoquic:latest", 29 | "url": "https://github.com/private-octopus/picoquic", 30 | "role": "both" 31 | }, 32 | "aioquic": { 33 | "image": "aiortc/aioquic-qns:latest", 34 | "url": "https://github.com/aiortc/aioquic", 35 | "role": "both" 36 | }, 37 | "neqo": { 38 | "image": "ghcr.io/mozilla/neqo-qns:latest", 39 | "url": "https://github.com/mozilla/neqo", 40 | "role": "both" 41 | }, 42 | "nginx": { 43 | "image": "ghcr.io/nginx/nginx-quic-qns:latest", 44 | "url": "https://quic.nginx.org/", 45 | "role": "server" 46 | }, 47 | "msquic": { 48 | "image": "ghcr.io/microsoft/msquic/qns:main", 49 | "url": "https://github.com/microsoft/msquic", 50 | "role": "both" 51 | }, 52 | "chrome": { 53 | "image": "martenseemann/chrome-quic-interop-runner", 54 | "url": "https://github.com/quic-interop/chrome-quic-interop-runner", 55 | "role": "client" 56 | }, 57 | "xquic": { 58 | "image": "ghcr.io/alibaba/xquic/xquic-interop:latest", 59 | "url": "https://github.com/alibaba/xquic", 60 | "role": "both" 61 | }, 62 | "lsquic": { 63 | "image": "litespeedtech/lsquic-qir:latest", 64 | "url": "https://github.com/litespeedtech/lsquic", 65 | "role": "both" 66 | }, 67 | "haproxy": { 68 | "image": "haproxytech/haproxy-qns:latest", 69 | "url": "https://github.com/haproxy/haproxy", 70 | "role": "server" 71 | }, 72 | "quinn": { 73 | "image": "stammw/quinn-interop:latest", 74 | "url": "https://github.com/quinn-rs/quinn", 75 | "role": "both" 76 | }, 77 | "s2n-quic": { 78 | "image": "ghcr.io/aws/s2n-quic/s2n-quic-qns:latest", 79 | "url": "https://github.com/aws/s2n-quic", 80 | "role": "both" 81 | }, 82 | "go-x-net": { 83 | "image": "us-central1-docker.pkg.dev/golang-interop-testing/quic/go-x-net:latest", 84 | "url": "https://pkg.go.dev/golang.org/x/net/internal/quic", 85 | "role": "both" 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /docker-compose.yml: -------------------------------------------------------------------------------- 1 | services: 2 | sim: 3 | image: martenseemann/quic-network-simulator 4 | container_name: sim 5 | hostname: sim 6 | stdin_open: true 7 | tty: true 8 | environment: 9 | - WAITFORSERVER=$WAITFORSERVER 10 | - SCENARIO=$SCENARIO 11 | cap_add: 12 | - NET_ADMIN 13 | - NET_RAW 14 | expose: 15 | - "57832" 16 | networks: 17 | leftnet: 18 | ipv4_address: 193.167.0.2 19 | ipv6_address: fd00:cafe:cafe:0::2 20 | interface_name: eth0 21 | rightnet: 22 | ipv4_address: 193.167.100.2 23 | ipv6_address: fd00:cafe:cafe:100::2 24 | interface_name: eth1 25 | extra_hosts: 26 | - "server:193.167.100.100" 27 | 28 | server: 29 | image: $SERVER 30 | container_name: server 31 | hostname: server 32 | stdin_open: true 33 | tty: true 34 | volumes: 35 | - $WWW:/www:ro 36 | - $CERTS:/certs:ro 37 | environment: 38 | - CRON=$CRON 39 | - ROLE=server 40 | - SERVER_PARAMS=$SERVER_PARAMS 41 | - SSLKEYLOGFILE=/logs/keys.log 42 | - QLOGDIR=/logs/qlog/ 43 | - TESTCASE=$TESTCASE_SERVER 44 | depends_on: 45 | - sim 46 | cap_add: 47 | - NET_ADMIN 48 | ulimits: 49 | memlock: 67108864 50 | networks: 51 | rightnet: 52 | ipv4_address: 193.167.100.100 53 | ipv6_address: fd00:cafe:cafe:100::100 54 | interface_name: eth0 55 | extra_hosts: 56 | - "server4:193.167.100.100" 57 | - "server6:fd00:cafe:cafe:100::100" 58 | 59 | client: 60 | image: $CLIENT 61 | container_name: client 62 | hostname: client 63 | stdin_open: true 64 | tty: true 65 | volumes: 66 | - $DOWNLOADS:/downloads:delegated 67 | - $CERTS:/certs:ro 68 | environment: 69 | - CRON=$CRON 70 | - ROLE=client 71 | - CLIENT_PARAMS=$CLIENT_PARAMS 72 | - SSLKEYLOGFILE=/logs/keys.log 73 | - QLOGDIR=/logs/qlog/ 74 | - TESTCASE=$TESTCASE_CLIENT 75 | - REQUESTS=$REQUESTS 76 | depends_on: 77 | - sim 78 | cap_add: 79 | - NET_ADMIN 80 | ulimits: 81 | memlock: 67108864 82 | networks: 83 | leftnet: 84 | ipv4_address: 193.167.0.100 85 | ipv6_address: fd00:cafe:cafe:0::100 86 | interface_name: eth0 87 | extra_hosts: 88 | - "server4:193.167.100.100" 89 | - "server6:fd00:cafe:cafe:100::100" 90 | - "server46:193.167.100.100" 91 | - "server46:fd00:cafe:cafe:100::100" 92 | 93 | iperf_server: 94 | image: martenseemann/quic-interop-iperf-endpoint 95 | container_name: iperf_server 96 | stdin_open: true 97 | tty: true 98 | environment: 99 | - ROLE=server 100 | - CLIENT=client4 101 | - IPERF_CONGESTION=$IPERF_CONGESTION 102 | depends_on: 103 | - sim 104 | cap_add: 105 | - NET_ADMIN 106 | networks: 107 | rightnet: 108 | ipv4_address: 193.167.100.110 109 | ipv6_address: fd00:cafe:cafe:100::110 110 | extra_hosts: 111 | - "client4:193.167.0.90" 112 | - "client6:fd00:cafe:cafe:0::100" 113 | - "client46:193.167.0.90" 114 | - "client46:fd00:cafe:cafe:0::100" 115 | 116 | iperf_client: 117 | image: martenseemann/quic-interop-iperf-endpoint 118 | container_name: iperf_client 119 | stdin_open: true 120 | tty: true 121 | environment: 122 | - ROLE=client 123 | - IPERF_CONGESTION=$IPERF_CONGESTION 124 | depends_on: 125 | - sim 126 | cap_add: 127 | - NET_ADMIN 128 | networks: 129 | leftnet: 130 | ipv4_address: 193.167.0.90 131 | ipv6_address: fd00:cafe:cafe:0::90 132 | extra_hosts: 133 | - "server4:193.167.100.110" 134 | - "server6:fd00:cafe:cafe:100::110" 135 | - "server46:193.167.100.110" 136 | - "server46:fd00:cafe:cafe:100::110" 137 | 138 | networks: 139 | leftnet: 140 | driver: bridge 141 | driver_opts: 142 | com.docker.network.bridge.enable_ip_masquerade: 'false' 143 | enable_ipv6: true 144 | ipam: 145 | config: 146 | - subnet: 193.167.0.0/24 147 | - subnet: fd00:cafe:cafe:0::/64 148 | rightnet: 149 | driver: bridge 150 | driver_opts: 151 | com.docker.network.bridge.enable_ip_masquerade: 'false' 152 | enable_ipv6: true 153 | ipam: 154 | config: 155 | - subnet: 193.167.100.0/24 156 | - subnet: fd00:cafe:cafe:100::/64 157 | 158 | -------------------------------------------------------------------------------- /web/index.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | QUIC Interop Runner 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | Fork me on GitHub 16 | 17 | 48 | 49 |
50 | 51 |

About

52 |

53 | This page documents the current interop status of various QUIC client and server implementations that have chosen to participate in this automated testing. It is updated several times per day; older results can be accessed via the "Run" selector in the title bar. 54 |

55 | 56 |

57 | In the following tables, results for client implementations are shown horizontally, results for server implementations are shown vertically. The results were obtained with QUIC version . 58 |

59 | 60 |

61 | It is straightforward to add your implementation to this automated testing effort; please see these simple instructions. 62 |

63 | 64 | 74 | 75 |
76 | 77 |

Results Filter

78 | 79 |
80 |
Client:
81 |
82 |
83 | 84 |
85 |
Server:
86 |
87 |
88 | 89 |
90 |
Test:
91 |
92 |
93 | 94 |

Interop Status

95 |
96 | 97 |

Measurement Results

98 |
99 |
100 | 101 | 102 | 103 | 104 | 105 | 106 | 107 | -------------------------------------------------------------------------------- /run.py: -------------------------------------------------------------------------------- 1 | #!/usr/bin/env python3 2 | 3 | import argparse 4 | import sys 5 | from typing import List, Tuple 6 | 7 | import testcases 8 | from implementations import IMPLEMENTATIONS, Role 9 | from interop import InteropRunner 10 | from testcases import MEASUREMENTS, TESTCASES 11 | 12 | implementations = { 13 | name: {"image": value["image"], "url": value["url"]} 14 | for name, value in IMPLEMENTATIONS.items() 15 | } 16 | client_implementations = [ 17 | name 18 | for name, value in IMPLEMENTATIONS.items() 19 | if value["role"] == Role.BOTH or value["role"] == Role.CLIENT 20 | ] 21 | server_implementations = [ 22 | name 23 | for name, value in IMPLEMENTATIONS.items() 24 | if value["role"] == Role.BOTH or value["role"] == Role.SERVER 25 | ] 26 | 27 | 28 | def main(): 29 | def get_args(): 30 | parser = argparse.ArgumentParser() 31 | parser.add_argument( 32 | "-d", 33 | "--debug", 34 | action="store_const", 35 | const=True, 36 | default=False, 37 | help="turn on debug logs", 38 | ) 39 | parser.add_argument( 40 | "-s", "--server", help="server implementations (comma-separated)" 41 | ) 42 | parser.add_argument( 43 | "-c", "--client", help="client implementations (comma-separated)" 44 | ) 45 | parser.add_argument( 46 | "-t", 47 | "--test", 48 | help="test cases (comma-separatated). Valid test cases are: " 49 | + ", ".join([x.name() for x in TESTCASES + MEASUREMENTS]), 50 | ) 51 | parser.add_argument( 52 | "-r", 53 | "--replace", 54 | help="replace path of implementation. Example: -r myquicimpl=dockertagname", 55 | ) 56 | parser.add_argument( 57 | "-l", 58 | "--log-dir", 59 | help="log directory", 60 | default="", 61 | ) 62 | parser.add_argument( 63 | "-f", "--save-files", help="save downloaded files if a test fails" 64 | ) 65 | parser.add_argument( 66 | "-j", "--json", help="output the matrix to file in json format" 67 | ) 68 | parser.add_argument( 69 | "-m", 70 | "--markdown", 71 | help="output the matrix in Markdown format", 72 | action="store_const", 73 | const=True, 74 | default=False, 75 | ) 76 | parser.add_argument( 77 | "-i", 78 | "--must-include", 79 | help="implementation that must be included", 80 | ) 81 | parser.add_argument( 82 | "-n", 83 | "--no-auto-unsupported", 84 | help="implementations for which auto-marking as unsupported when all tests fail should be skipped", 85 | ) 86 | return parser.parse_args() 87 | 88 | replace_arg = get_args().replace 89 | if replace_arg: 90 | for s in replace_arg.split(","): 91 | pair = s.split("=") 92 | if len(pair) != 2: 93 | sys.exit("Invalid format for replace") 94 | name, image = pair[0], pair[1] 95 | if name not in IMPLEMENTATIONS: 96 | sys.exit("Implementation " + name + " not found.") 97 | implementations[name]["image"] = image 98 | 99 | def get_impls(arg, availableImpls, role) -> List[str]: 100 | if not arg: 101 | return availableImpls 102 | impls = [] 103 | for s in arg.split(","): 104 | if s not in availableImpls: 105 | sys.exit(role + " implementation " + s + " not found.") 106 | impls.append(s) 107 | return impls 108 | 109 | def get_impl_pairs(clients, servers, must_include) -> List[Tuple[str, str]]: 110 | impls = [] 111 | for client in clients: 112 | for server in servers: 113 | if ( 114 | must_include is None 115 | or client == must_include 116 | or server == must_include 117 | ): 118 | impls.append((client, server)) 119 | return impls 120 | 121 | def get_tests_and_measurements( 122 | arg, 123 | ) -> Tuple[List[testcases.TestCase], List[testcases.TestCase]]: 124 | if arg is None: 125 | return TESTCASES, MEASUREMENTS 126 | elif arg == "onlyTests": 127 | return TESTCASES, [] 128 | elif arg == "onlyMeasurements": 129 | return [], MEASUREMENTS 130 | elif not arg: 131 | return [] 132 | tests = [] 133 | measurements = [] 134 | for t in arg.split(","): 135 | if t in [tc.name() for tc in TESTCASES]: 136 | tests += [tc for tc in TESTCASES if tc.name() == t] 137 | elif t in [tc.name() for tc in MEASUREMENTS]: 138 | measurements += [tc for tc in MEASUREMENTS if tc.name() == t] 139 | else: 140 | print( 141 | ( 142 | "Test case {} not found.\n" 143 | "Available testcases: {}\n" 144 | "Available measurements: {}" 145 | ).format( 146 | t, 147 | ", ".join([t.name() for t in TESTCASES]), 148 | ", ".join([t.name() for t in MEASUREMENTS]), 149 | ) 150 | ) 151 | sys.exit() 152 | return tests, measurements 153 | 154 | t = get_tests_and_measurements(get_args().test) 155 | clients = get_impls(get_args().client, client_implementations, "Client") 156 | servers = get_impls(get_args().server, server_implementations, "Server") 157 | # If there is only one client or server, we should not automatically mark tests as unsupported 158 | no_auto_unsupported = set() 159 | for kind in [clients, servers]: 160 | if len(kind) == 1: 161 | no_auto_unsupported.add(kind[0]) 162 | return InteropRunner( 163 | implementations=implementations, 164 | client_server_pairs=get_impl_pairs(clients, servers, get_args().must_include), 165 | tests=t[0], 166 | measurements=t[1], 167 | output=get_args().json, 168 | markdown=get_args().markdown, 169 | debug=get_args().debug, 170 | log_dir=get_args().log_dir, 171 | save_files=get_args().save_files, 172 | no_auto_unsupported=( 173 | no_auto_unsupported 174 | if get_args().no_auto_unsupported is None 175 | else get_impls( 176 | get_args().no_auto_unsupported, clients + servers, "Client/Server" 177 | ) 178 | ), 179 | ).run() 180 | 181 | 182 | if __name__ == "__main__": 183 | sys.exit(main()) 184 | -------------------------------------------------------------------------------- /trace.py: -------------------------------------------------------------------------------- 1 | import datetime 2 | import logging 3 | from enum import Enum 4 | from typing import List, Optional, Tuple 5 | 6 | import pyshark 7 | 8 | IP4_CLIENT = "193.167.0.100" 9 | IP4_SERVER = "193.167.100.100" 10 | IP6_CLIENT = "fd00:cafe:cafe:0::100" 11 | IP6_SERVER = "fd00:cafe:cafe:100::100" 12 | 13 | 14 | QUIC_V2 = hex(0x6B3343CF) 15 | 16 | 17 | class Direction(Enum): 18 | ALL = 0 19 | FROM_CLIENT = 1 20 | FROM_SERVER = 2 21 | INVALID = 3 22 | 23 | 24 | class PacketType(Enum): 25 | INITIAL = 1 26 | HANDSHAKE = 2 27 | ZERORTT = 3 28 | RETRY = 4 29 | ONERTT = 5 30 | VERSIONNEGOTIATION = 6 31 | INVALID = 7 32 | 33 | 34 | WIRESHARK_PACKET_TYPES = { 35 | PacketType.INITIAL: "0", 36 | PacketType.ZERORTT: "1", 37 | PacketType.HANDSHAKE: "2", 38 | PacketType.RETRY: "3", 39 | } 40 | 41 | 42 | WIRESHARK_PACKET_TYPES_V2 = { 43 | PacketType.INITIAL: "1", 44 | PacketType.ZERORTT: "2", 45 | PacketType.HANDSHAKE: "3", 46 | PacketType.RETRY: "0", 47 | } 48 | 49 | 50 | def get_direction(p) -> Direction: 51 | if (hasattr(p, "ip") and p.ip.src == IP4_CLIENT) or ( 52 | hasattr(p, "ipv6") and p.ipv6.src == IP6_CLIENT 53 | ): 54 | return Direction.FROM_CLIENT 55 | 56 | if (hasattr(p, "ip") and p.ip.src == IP4_SERVER) or ( 57 | hasattr(p, "ipv6") and p.ipv6.src == IP6_SERVER 58 | ): 59 | return Direction.FROM_SERVER 60 | 61 | return Direction.INVALID 62 | 63 | 64 | def get_packet_type(p) -> PacketType: 65 | if p.quic.header_form == "0": 66 | return PacketType.ONERTT 67 | if p.quic.version == "0x00000000": 68 | return PacketType.VERSIONNEGOTIATION 69 | if p.quic.version == QUIC_V2: 70 | for t, num in WIRESHARK_PACKET_TYPES_V2.items(): 71 | if p.quic.long_packet_type_v2 == num: 72 | return t 73 | return PacketType.INVALID 74 | for t, num in WIRESHARK_PACKET_TYPES.items(): 75 | if p.quic.long_packet_type == num: 76 | return t 77 | return PacketType.INVALID 78 | 79 | 80 | class TraceAnalyzer: 81 | _filename = "" 82 | 83 | def __init__(self, filename: str, keylog_file: Optional[str] = None): 84 | self._filename = filename 85 | self._keylog_file = keylog_file 86 | 87 | def _get_direction_filter(self, d: Direction) -> str: 88 | f = "(quic && !icmp) && " 89 | if d == Direction.FROM_CLIENT: 90 | return ( 91 | f + "(ip.src==" + IP4_CLIENT + " || ipv6.src==" + IP6_CLIENT + ") && " 92 | ) 93 | elif d == Direction.FROM_SERVER: 94 | return ( 95 | f + "(ip.src==" + IP4_SERVER + " || ipv6.src==" + IP6_SERVER + ") && " 96 | ) 97 | else: 98 | return f 99 | 100 | def _get_packets(self, f: str) -> List: 101 | override_prefs = {} 102 | if self._keylog_file is not None: 103 | override_prefs["tls.keylog_file"] = self._keylog_file 104 | cap = pyshark.FileCapture( 105 | self._filename, 106 | display_filter=f, 107 | override_prefs=override_prefs, 108 | disable_protocol="http3", # see https://github.com/quic-interop/quic-interop-runner/pull/179 109 | decode_as={"udp.port==443": "quic"}, 110 | ) 111 | packets = [] 112 | # If the pcap has been cut short in the middle of the packet, pyshark will crash. 113 | # See https://github.com/KimiNewt/pyshark/issues/390. 114 | try: 115 | for p in cap: 116 | if "quic" not in p: 117 | logging.info("Captured packet without quic layer: %r", p) 118 | continue 119 | packets.append(p) 120 | except Exception as e: 121 | logging.debug(e) 122 | cap.close() 123 | 124 | if self._keylog_file is not None: 125 | for p in packets: 126 | if hasattr(p["quic"], "decryption_failed"): 127 | logging.info("At least one QUIC packet could not be decrypted") 128 | logging.debug(p) 129 | break 130 | return packets 131 | 132 | def get_raw_packets(self, direction: Direction = Direction.ALL) -> List: 133 | packets = [] 134 | for packet in self._get_packets(self._get_direction_filter(direction) + "quic"): 135 | packets.append(packet) 136 | return packets 137 | 138 | def get_1rtt(self, direction: Direction = Direction.ALL) -> List: 139 | """Get all QUIC packets, one or both directions.""" 140 | packets, _, _ = self.get_1rtt_sniff_times(direction) 141 | return packets 142 | 143 | def get_1rtt_sniff_times( 144 | self, direction: Direction = Direction.ALL 145 | ) -> Tuple[List, datetime.datetime, datetime.datetime]: 146 | """Get all QUIC packets, one or both directions, and first and last sniff times.""" 147 | packets = [] 148 | first, last = 0, 0 149 | for packet in self._get_packets( 150 | self._get_direction_filter(direction) + "quic.header_form==0" 151 | ): 152 | for layer in packet.layers: 153 | if ( 154 | layer.layer_name == "quic" 155 | and not hasattr(layer, "long_packet_type") 156 | and not hasattr(layer, "long_packet_type_v2") 157 | ): 158 | if first == 0: 159 | first = packet.sniff_time 160 | last = packet.sniff_time 161 | packets.append(layer) 162 | return packets, first, last 163 | 164 | def get_vnp(self, direction: Direction = Direction.ALL) -> List: 165 | return self._get_packets( 166 | self._get_direction_filter(direction) + "quic.version==0" 167 | ) 168 | 169 | def _get_long_header_packets( 170 | self, packet_type: PacketType, direction: Direction 171 | ) -> List: 172 | packets = [] 173 | for packet in self._get_packets( 174 | self._get_direction_filter(direction) 175 | + "(quic.long.packet_type || quic.long.packet_type_v2)" 176 | ): 177 | for layer in packet.layers: 178 | if layer.layer_name == "quic" and ( 179 | ( 180 | hasattr(layer, "long_packet_type") 181 | and layer.long_packet_type 182 | == WIRESHARK_PACKET_TYPES[packet_type] 183 | ) 184 | or ( 185 | hasattr(layer, "long_packet_type_v2") 186 | and layer.long_packet_type_v2 187 | == WIRESHARK_PACKET_TYPES_V2[packet_type] 188 | ) 189 | ): 190 | packets.append(layer) 191 | return packets 192 | 193 | def get_initial(self, direction: Direction = Direction.ALL) -> List: 194 | """Get all Initial packets.""" 195 | return self._get_long_header_packets(PacketType.INITIAL, direction) 196 | 197 | def get_retry(self, direction: Direction = Direction.ALL) -> List: 198 | """Get all Retry packets.""" 199 | return self._get_long_header_packets(PacketType.RETRY, direction) 200 | 201 | def get_handshake(self, direction: Direction = Direction.ALL) -> List: 202 | """Get all Handshake packets.""" 203 | return self._get_long_header_packets(PacketType.HANDSHAKE, direction) 204 | 205 | def get_0rtt(self) -> List: 206 | """Get all 0-RTT packets.""" 207 | return self._get_long_header_packets(PacketType.ZERORTT, Direction.FROM_CLIENT) 208 | -------------------------------------------------------------------------------- /.github/workflows/interop.yml: -------------------------------------------------------------------------------- 1 | name: interop 2 | on: 3 | schedule: 4 | # Every 8h, at 15 minutes past the hour 5 | # This makes sure that the cleanup cron job can run first. 6 | - cron: "15 */8 * * *" 7 | 8 | jobs: 9 | config: 10 | runs-on: ubuntu-latest 11 | outputs: 12 | logname: ${{ steps.set-logname.outputs.logname }} 13 | starttime: ${{ steps.set-starttime.outputs.starttime }} 14 | servers: ${{ steps.set-servers.outputs.servers }} 15 | clients: ${{ steps.set-clients.outputs.clients }} 16 | images: ${{ steps.set-images.outputs.images }} 17 | steps: 18 | - name: Set log name 19 | id: set-logname 20 | run: | 21 | LOGNAME=$(date -u +"%Y-%m-%dT%H:%M") 22 | echo $LOGNAME 23 | echo "logname=$LOGNAME" >> $GITHUB_OUTPUT 24 | - name: Save start time 25 | id: set-starttime 26 | run: | 27 | STARTTIME=$(date +%s) 28 | echo $STARTTIME 29 | echo "starttime=$STARTTIME" >> $GITHUB_OUTPUT 30 | - uses: actions/checkout@v6 31 | - uses: actions/setup-python@v6 32 | with: 33 | python-version: 3.8 34 | - name: Determine servers 35 | id: set-servers 36 | run: | 37 | SERVERS=$(jq -c 'with_entries(select(.value.role == "server" or .value.role == "both")) | keys_unsorted' implementations.json) 38 | echo $SERVERS 39 | echo "servers=$SERVERS" >> $GITHUB_OUTPUT 40 | - name: Determine clients 41 | id: set-clients 42 | run: | 43 | CLIENTS=$(jq -c 'with_entries(select(.value.role == "client" or .value.role == "both")) | keys_unsorted' implementations.json) 44 | echo $CLIENTS 45 | echo "clients=$CLIENTS" >> $GITHUB_OUTPUT 46 | - name: Determine Docker images 47 | id: set-images 48 | run: | 49 | IMAGES=$(jq -c 'keys_unsorted' implementations.json) 50 | echo $IMAGES 51 | echo "images=$IMAGES" >> $GITHUB_OUTPUT 52 | docker-pull-tools: 53 | runs-on: ubuntu-latest 54 | strategy: 55 | matrix: 56 | image: [ 'quic-network-simulator', 'quic-interop-iperf-endpoint' ] 57 | steps: 58 | - uses: actions/checkout@v6 59 | - name: Pull 60 | run: | 61 | URL="martenseemann/${{ matrix.image }}" 62 | docker pull $URL 63 | echo "URL=$URL" >> $GITHUB_ENV 64 | - name: Docker inspect 65 | run: docker image inspect $URL 66 | - name: Save Docker image 67 | run: | 68 | docker save $URL | gzip --best > ${{ matrix.image }}.tar.gz 69 | du -sh ${{ matrix.image }}.tar.gz 70 | - name: Upload result 71 | uses: actions/upload-artifact@v6 72 | with: 73 | name: images-${{ matrix.image }} 74 | path: ${{ matrix.image }}.tar.gz 75 | if-no-files-found: error 76 | docker-pull-images: 77 | needs: [ config ] 78 | runs-on: ubuntu-latest 79 | strategy: 80 | matrix: 81 | image: ${{ fromJson(needs.config.outputs.images) }} 82 | name: Pull ${{ matrix.image }} 83 | steps: 84 | - uses: actions/checkout@v6 85 | - name: Run docker pull 86 | run: | 87 | URL=$(jq -r '.["${{ matrix.image }}"].image' implementations.json) 88 | echo $URL 89 | docker pull $URL 90 | echo "URL=$URL" >> $GITHUB_ENV 91 | - name: Docker inspect 92 | run: docker image inspect $URL 93 | - name: Save Docker image 94 | run: | 95 | docker save $URL | gzip --best > ${{ matrix.image }}.tar.gz 96 | du -sh ${{ matrix.image }}.tar.gz 97 | - name: Upload result 98 | uses: actions/upload-artifact@v6 99 | with: 100 | name: image-${{ matrix.image }} 101 | path: ${{ matrix.image }}.tar.gz 102 | if-no-files-found: error 103 | tests: 104 | needs: [ config, docker-pull-tools, docker-pull-images ] 105 | runs-on: ubuntu-latest 106 | continue-on-error: true 107 | timeout-minutes: 45 108 | strategy: 109 | fail-fast: false 110 | matrix: 111 | server: ${{ fromJson(needs.config.outputs.servers) }} 112 | client: ${{ fromJson(needs.config.outputs.clients) }} 113 | name: (${{ matrix.server }} - ${{ matrix.client }}) 114 | steps: 115 | - uses: actions/checkout@v6 116 | # Remove this when GitHub runner gets docker engine >= v28.1.0. 117 | - uses: docker/setup-docker-action@v4 118 | with: 119 | version: version=v28.3.0 120 | - uses: actions/setup-python@v6 121 | with: 122 | python-version: 3.8 123 | - name: Enable IPv6 support 124 | run: sudo modprobe ip6table_filter 125 | - run: docker image ls 126 | - name: Download quic-network-simulator image 127 | uses: actions/download-artifact@v7 128 | with: 129 | name: images-quic-network-simulator 130 | - name: Download quic-interop-iperf-endpoint image 131 | uses: actions/download-artifact@v7 132 | with: 133 | name: images-quic-interop-iperf-endpoint 134 | - name: Download ${{ matrix.server }} Docker image 135 | uses: actions/download-artifact@v7 136 | with: 137 | name: image-${{ matrix.server }} 138 | - name: Download ${{ matrix.client }} Docker image 139 | if: ${{ matrix.server != matrix.client }} 140 | uses: actions/download-artifact@v7 141 | with: 142 | name: image-${{ matrix.client }} 143 | - name: Load docker images 144 | run: | 145 | docker load --input quic-network-simulator.tar.gz 146 | docker load --input quic-interop-iperf-endpoint.tar.gz 147 | docker load --input ${{ matrix.server }}.tar.gz 148 | docker load --input ${{ matrix.client }}.tar.gz 149 | - run: docker image ls 150 | - name: Install Wireshark 151 | run: | 152 | sudo add-apt-repository ppa:wireshark-dev/nightly 153 | sudo apt-get update 154 | sudo apt-get install -y --no-install-recommends tshark 155 | - name: Install Python packages 156 | run: | 157 | pip install -U pip 158 | pip install -r requirements.txt 159 | - name: Run tests 160 | env: 161 | CRON: "true" 162 | run: | 163 | (python run.py --client ${{ matrix.client }} --server ${{ matrix.server }} --log-dir logs --json ${{ matrix.server }}_${{ matrix.client }}_results.json -t onlyTests || true) | tee output.txt 164 | mkdir -p logs/${{ matrix.server }}_${{ matrix.client }} 165 | mv output.txt logs/${{ matrix.server }}_${{ matrix.client }}/ 166 | - name: Run measurements 167 | env: 168 | CRON: "true" 169 | run: | 170 | python run.py --client ${{ matrix.client }} --server ${{ matrix.server }} --log-dir logs_measurement --json ${{ matrix.server }}_${{ matrix.client }}_measurements.json -t onlyMeasurements || true 171 | if [ ! -d "logs_measurement" ]; then exit 0; fi 172 | find logs_measurement -depth -name "sim" -type d -exec rm -r "{}" \; 173 | find logs_measurement -depth -name "client" -type d -exec rm -r "{}" \; 174 | find logs_measurement -depth -name "server" -type d -exec rm -r "{}" \; 175 | mv logs_measurement/${{ matrix.server }}_${{ matrix.client }}/* logs/${{ matrix.server }}_${{ matrix.client }}/ 176 | - name: Upload logs to interop.seemann.io 177 | uses: burnett01/rsync-deployments@33214bd98ba4ac2be90f5976672b3f030fce9ce4 # v7.1.0 178 | if: ${{ always() && github.event_name == 'schedule' }} 179 | with: 180 | switches: -avzr --relative 181 | path: logs/./${{ matrix.server }}_${{ matrix.client }}/ 182 | remote_path: ${{ vars.LOG_DIR }}/${{ needs.config.outputs.logname }} 183 | remote_host: interop.seemann.io 184 | remote_user: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 185 | remote_key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 186 | - name: Upload result 187 | uses: actions/upload-artifact@v6 188 | with: 189 | name: results-${{ matrix.server }}-${{ matrix.client }} 190 | if-no-files-found: error 191 | path: | 192 | ${{ matrix.server }}_${{ matrix.client }}_results.json 193 | ${{ matrix.server }}_${{ matrix.client }}_measurements.json 194 | aggregate: 195 | needs: [ config, tests ] 196 | runs-on: ubuntu-latest 197 | if: always() 198 | env: 199 | LOGNAME: ${{ needs.config.outputs.logname }} 200 | steps: 201 | - uses: actions/checkout@v6 202 | - uses: actions/setup-python@v6 203 | with: 204 | python-version: 3.8 205 | - name: Download results 206 | uses: actions/download-artifact@v7 207 | with: 208 | pattern: results-* 209 | - name: Aggregate results 210 | run: | 211 | mv results-*/*.json . 212 | python .github/workflows/aggregate.py \ 213 | --start-time ${{ needs.config.outputs.starttime }} \ 214 | --server ${{ join(fromJson(needs.config.outputs.servers), ',') }} \ 215 | --client ${{ join(fromJson(needs.config.outputs.clients), ',') }} \ 216 | --log-dir=$LOGNAME \ 217 | --output result.json 218 | - name: Print result 219 | run: jq '.' result.json 220 | - name: Upload result to artifacts 221 | uses: actions/upload-artifact@v6 222 | with: 223 | name: result-aggregated 224 | path: result.json 225 | - name: Upload logs to interop.seemann.io 226 | uses: burnett01/rsync-deployments@33214bd98ba4ac2be90f5976672b3f030fce9ce4 # v7.1.0 227 | if: ${{ github.event_name == 'schedule' }} 228 | with: 229 | switches: -avzr 230 | path: result.json 231 | remote_path: ${{ vars.LOG_DIR }}/${{ needs.config.outputs.logname }}/ 232 | remote_host: interop.seemann.io 233 | remote_user: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 234 | remote_key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 235 | - name: Point interop.seemann.io to the latest result 236 | uses: appleboy/ssh-action@823bd89e131d8d508129f9443cad5855e9ba96f0 # v1.2.4 237 | if: ${{ github.event_name == 'schedule' }} 238 | with: 239 | host: interop.seemann.io 240 | username: ${{ secrets.INTEROP_SEEMANN_IO_USER }} 241 | key: ${{ secrets.INTEROP_SEEMANN_IO_SSH_KEY }} 242 | envs: LOGNAME 243 | script: | 244 | cd ${{ vars.LOG_DIR }} 245 | jq '. += [ "${{ needs.config.outputs.logname }}" ]' logs.json | sponge logs.json 246 | rm latest || true 247 | ln -s $LOGNAME latest 248 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Interop Test Runner 2 | 3 | The Interop Test Runner aims to automatically generate an interop matrix by running multiple **test cases** using different QUIC implementations. 4 | 5 | * Research Article: [Automating QUIC Interoperability Testing](https://dl.acm.org/doi/10.1145/3405796.3405826) 6 | * IETF Blog Post: [Automating interoperability testing to improve open standards for the Internet](https://www.ietf.org/blog/quic-automated-interop-testing/) 7 | 8 | ## Requirements 9 | 10 | The Interop Runner is written in Python 3. You'll need to install the 11 | following softwares to run the interop test: 12 | 13 | * Python3 modules. Run the following command: 14 | 15 | ```bash 16 | pip3 install -r requirements.txt 17 | ``` 18 | 19 | * [Docker](https://docs.docker.com/engine/install/) and [docker compose](https://docs.docker.com/compose/). 20 | 21 | * [Development version of Wireshark](https://www.wireshark.org/download.html) (version 4.5.0 or newer). 22 | 23 | ## Running the Interop Runner 24 | 25 | Run the interop tests: 26 | 27 | ```bash 28 | python3 run.py 29 | ``` 30 | 31 | ## IPv6 support 32 | 33 | To enable IPv6 support for the simulator on Linux, the `ip6table_filter` kernel module needs to be loaded on the host. If it isn't loaded on your machine, you'll need to run `sudo modprobe ip6table_filter`. 34 | 35 | ## Building a QUIC endpoint 36 | 37 | To include your QUIC implementation in the Interop Runner, create a Docker image following the instructions for [setting up an endpoint in the quic-network-simulator](https://github.com/quic-interop/quic-network-simulator), publish it on [Docker Hub](https://hub.docker.com) and add it to [implementations.json](implementations.json). Once your implementation is ready to interop, please send us a PR with this addition. Read on for more instructions on what to do within the Docker image. 38 | 39 | Typically, a test case will require a server to serve files from a directory, and a client to download files. Different test cases will specify the behavior to be tested. For example, the Retry test case expects the server to use a Retry before accepting the connection from the client. All configuration information from the test framework to your implementation is fed into the Docker image using environment variables. The test case is passed into your Docker container using the `TESTCASE` environment variable. If your implementation doesn't support a test case, it MUST exit with status code 127. This will allow us to add new test cases in the future, and correctly report test failures und successes, even if some implementations have not yet implented support for this new test case. 40 | 41 | The Interop Runner mounts the directory `/www` into your server Docker container. This directory will contain one or more randomly generated files. Your server implementation is expected to run on port 443 and serve files from this directory. 42 | Equivalently, the Interop Runner mounts `/downloads` into your client Docker container. The directory is initially empty, and your client implementation is expected to store downloaded files into this directory. The URLs of the files to download are passed to the client using the environment variable `REQUESTS`, which contains one or more URLs, separated by a space. 43 | 44 | After the transfer is completed, the client container is expected to exit with exit status 0. If an error occurred during the transfer, the client is expected to exit with exit status 1. 45 | After completion of the test case, the Interop Runner will verify that the client downloaded the files it was expected to transfer, and that the file contents match. Additionally, for certain test cases, the Interop Runner will use the pcap of the transfer to verify that the implementations fulfilled the requirements of the test (for example, for the Retry test case, the pcap should show that a Retry packet was sent, and that the client used the Token provided in that packet). 46 | 47 | The Interop Runner generates a key and a certificate chain and mounts it into `/certs`. The server needs to load its private key from `priv.key`, and the certificate chain from `cert.pem`. 48 | 49 | ### Examples 50 | 51 | If you're not familiar with Docker, it might be helpful to have a look at the Dockerfiles and scripts that other implementations use: 52 | 53 | * quic-go: [Dockerfile](https://github.com/quic-go/quic-go/blob/master/interop/Dockerfile), [run_endpoint.sh](https://github.com/quic-go/quic-go/blob/master/interop/run_endpoint.sh) and [CI config](https://github.com/quic-go/quic-go/blob/master/.github/workflows/build-interop-docker.yml) 54 | * quicly: [Dockerfile](https://github.com/h2o/quicly/blob/master/misc/quic-interop-runner/Dockerfile) and [run_endpoint.sh](https://github.com/h2o/quicly/blob/master/misc/quic-interop-runner/run_endpoint.sh) and [run_endpoint.sh](https://github.com/cloudflare/quiche/blob/master/tools/qns/run_endpoint.sh) 55 | * quiche: [Dockerfile](https://github.com/cloudflare/quiche/blob/master/Dockerfile) 56 | * neqo: [Dockerfile](https://github.com/mozilla/neqo/blob/main/qns/Dockerfile) and [interop.sh](https://github.com/mozilla/neqo/blob/main/qns/interop.sh) 57 | * msquic: [Dockerfile](https://github.com/microsoft/msquic/blob/master/Dockerfile), [run_endpoint.sh](https://github.com/microsoft/msquic/blob/master/scripts/run_endpoint.sh) and [CI config](https://github.com/microsoft/msquic/blob/master/.azure/azure-pipelines.docker.yml) 58 | 59 | Implementers: Please feel free to add links to your implementation here! 60 | 61 | Note that the [online interop](https://interop.seemann.io/) runner requires `linux/amd64` architecture, so if you build on a different architecture (e.g. "Apple silicon"), you would need to use `--platform linux/amd64` with `docker build` to create a compatible image. 62 | Even better, and the recommended approach, is to use a multi-platform build to provide both `amd64` and `arm64` images, so everybody can run the interop locally with your implementation. To build the multi-platform image, you can use the `docker buildx` command: 63 | 64 | ```bash 65 | docker buildx create --use 66 | docker buildx build --pull --push --platform linux/amd64,linux/arm64 -t . 67 | ``` 68 | 69 | ## Logs 70 | 71 | To facilitate debugging, the Interop Runner saves the log files to the logs directory. This directory is overwritten every time the Interop Runner is executed. 72 | 73 | The log files are saved to a directory named `#server_#client/#testcase`. `output.txt` contains the console output of the interop test runner (which might contain information why a test case failed). The server and client logs are saved in the `server` and `client` directory, respectively. The `sim` directory contains pcaps recorded by the simulator. 74 | 75 | If implementations wish to export the TLS secrets, they are encouraged to do so in the format in the [NSS Key Log format](https://developer.mozilla.org/en-US/docs/Mozilla/Projects/NSS/Key_Log_Format). The interop runner sets the SSLKEYLOGFILE environment variable to a file in the logs directory. In the future, the interop runner might use those files to decode the traces. 76 | 77 | Implementations that implement [qlog](https://github.com/quiclog/internet-drafts) should export the log files to the directory specified by the `QLOGDIR` environment variable. 78 | 79 | ## Test cases 80 | 81 | The Interop Runner implements the following test cases. Unless noted otherwise, test cases use HTTP/0.9 for file transfers. More test cases will be added in the future, to test more protocol features. The name in parentheses is the value of the `TESTCASE` environment variable passed into your Docker container. 82 | 83 | * **Version Negotiation** (`versionnegotiation`): Tests that a server sends a Version Negotiation packet in response to an unknown QUIC version number. The client should start a connection using an unsupported version number (it can use a reserved version number to do so), and should abort the connection attempt when receiving the Version Negotiation packet. 84 | Currently disabled due to #20. 85 | 86 | * **Handshake** (`handshake`): Tests the successful completion of the handshake. The client is expected to establish a single QUIC connection to the server and download one or multiple small files. Servers should not send a Retry packet in this test case. 87 | 88 | * **Transfer** (`transfer`): Tests both flow control and stream multiplexing. The client should use small initial flow control windows for both stream- and connection-level flow control, such that the during the transfer of files on the order of 1 MB the flow control window needs to be increased. The client is exepcted to establish a single QUIC connection, and use multiple streams to concurrently download the files. 89 | 90 | * **ChaCha20** (`chacha20`): In this test, client and server are expected to offer **only** ChaCha20 as a ciphersuite. The client then downloads the files. 91 | 92 | * **KeyUpdate** (`keyupdate`, only for the client): The client is expected to make sure that a key update happens early in the connection (during the first MB transferred). It doesn't matter which peer actually initiated the update. 93 | 94 | * **Retry** (`retry`): Tests that the server can generate a Retry, and that the client can act upon it (i.e. use the Token provided in the Retry packet in the Initial packet). 95 | 96 | * **Resumption** (`resumption`): Tests QUIC session resumption (without 0-RTT). The client is expected to establish a connection and download the first file. The server is expected to provide the client with a session ticket that allows it to resume the connection. After downloading the first file, the client has to close the connection, establish a resumed connection using the session ticket, and use this connection to download the remaining file(s). 97 | 98 | * **0-RTT** (`zerortt`): Tests QUIC 0-RTT. The client is expected to establish a connection and download the first file. The server is expected to provide the client with a session ticket that allows it establish a 0-RTT connection on the next connection attempt. After downloading the first file, the client has to close the connection, establish and request the remaining file(s) in 0-RTT. 99 | 100 | * **HTTP3** (`http3`): Tests a simple HTTP/3 connection. The client is expected to download multiple files using HTTP/3. Files should be requested and transfered in parallel. 101 | 102 | * **Handshake Loss** (`multiconnect`): Tests resilience of the handshake to high loss. The client is expected to establish multiple connections, sequential or in parallel, and use each connection to download a single file. 103 | 104 | * **V2** (`v2`): In this test, client starts connecting server in QUIC v1 with `version_information` transport parameter that includes QUIC v2 (`0x6b3343cf`) in `other_versions` field. Server should select QUIC v2 in compatible version negotiation. Client is expected to download one small file in QUIC v2. 105 | 106 | * **Port Rebinding** (`rebind-port`): In this test case, a NAT is simulated that changes the client port (as observed by the server) after the handshake. Server should perform path vaildation. 107 | 108 | * **Address Rebinding** (`rebind-addr`): In this test case, a NAT is simulated that changes the client IP address (as observed by the server) after the handshake. Server should perform path vaildation. 109 | 110 | * **Connection Migratioon** (`connectionmigration`): In this test case, the server is expected to provide its preferred addresses to the client during the handshake. The client is expected to perform active migration to one of those addresses. 111 | -------------------------------------------------------------------------------- /web/script.js: -------------------------------------------------------------------------------- 1 | /* globals document, window, console, URLSearchParams, XMLHttpRequest, $, history */ 2 | 3 | (function() { 4 | "use strict"; 5 | const map = { client: {}, server: {}, test: {} }; 6 | const color_type = { succeeded: "success", unsupported: "secondary disabled", failed: "danger"}; 7 | 8 | // see https://stackoverflow.com/a/43466724/ 9 | function formatTime(seconds) { 10 | return [ 11 | parseInt(seconds / 60 / 60), 12 | parseInt(seconds / 60 % 60), 13 | parseInt(seconds % 60) 14 | ].join(":").replace(/\b(\d)\b/g, "0$1"); 15 | } 16 | 17 | function getLogLink(log_dir, server, client, test, text, res) { 18 | var ttip = "Test: " + test + "
" + 19 | "Client: " + client + "
" + 20 | "Server: " + server + "
" + 21 | "Result: " + res + ""; 22 | 23 | var a = document.createElement("a"); 24 | a.className = "btn btn-xs btn-" + color_type[res] + " " + res + " test-" + text.toLowerCase(); 25 | var ttip_target = a; 26 | if (res !== "unsupported") { 27 | a.href = "logs/" + log_dir + "/" + server + "_" + client + "/" + test; 28 | a.target = "_blank"; 29 | ttip += "

(Click for logs.)"; 30 | } else { 31 | var s = document.createElement("span"); 32 | s.className = "d-inline-block"; 33 | s.tabIndex = 0; 34 | a.style = "pointer-events: none;"; 35 | s.appendChild(a); 36 | ttip_target = s; 37 | } 38 | ttip_target.title = ttip; 39 | $(ttip_target).attr("data-toggle", "tooltip").attr("data-placement", "bottom").attr("data-html", true).tooltip(); 40 | $(ttip_target).click(function() { $(this).blur(); }); 41 | a.appendChild(document.createTextNode(text)); 42 | return ttip_target; 43 | } 44 | 45 | function makeClickable(e, url) { 46 | e.title = url; 47 | $(e).attr("role", "button").attr("data-href", url).attr("data-toggle", "tooltip").tooltip(); 48 | e.onclick = function(e) { window.open(e.target.getAttribute("data-href")); }; 49 | } 50 | 51 | function makeColumnHeaders(t, result) { 52 | for(var i = 0; i <= result.servers.length; i++) 53 | t.appendChild(document.createElement("colgroup")); 54 | var thead = t.createTHead(); 55 | var row = thead.insertRow(0); 56 | var cell = document.createElement("th"); 57 | row.appendChild(cell); 58 | cell.scope = "col"; 59 | cell.className = "table-light client-any"; 60 | for(var i = 0; i < result.servers.length; i++) { 61 | cell = document.createElement("th"); 62 | row.appendChild(cell); 63 | cell.scope = "col"; 64 | cell.className = "table-light server-" + result.servers[i]; 65 | if (result.hasOwnProperty("urls")) 66 | makeClickable(cell, result.urls[result.servers[i]]); 67 | cell.innerHTML = result.servers[i]; 68 | } 69 | } 70 | 71 | function makeRowHeader(tbody, result, i) { 72 | var row = tbody.insertRow(i); 73 | var cell = document.createElement("th"); 74 | cell.scope = "row"; 75 | cell.className = "table-light client-" + result.clients[i]; 76 | if (result.hasOwnProperty("urls")) 77 | makeClickable(cell, result.urls[result.clients[i]]); 78 | cell.innerHTML = result.clients[i]; 79 | row.appendChild(cell); 80 | return row; 81 | } 82 | 83 | function fillInteropTable(result) { 84 | var index = 0; 85 | var appendResult = function(el, res, i, j) { 86 | result.results[index].forEach(function(item) { 87 | if(item.result !== res) return; 88 | el.appendChild(getLogLink(result.log_dir, result.servers[j], result.clients[i], item.name, item.abbr, res)); 89 | }); 90 | }; 91 | 92 | var t = document.getElementById("interop"); 93 | t.innerHTML = ""; 94 | makeColumnHeaders(t, result); 95 | var tbody = t.createTBody(); 96 | for(var i = 0; i < result.clients.length; i++) { 97 | var row = makeRowHeader(tbody, result, i); 98 | for(var j = 0; j < result.servers.length; j++) { 99 | var cell = row.insertCell(j+1); 100 | cell.className = "server-" + result.servers[j] + " client-" + result.clients[i]; 101 | appendResult(cell, "succeeded", i, j); 102 | appendResult(cell, "unsupported", i, j); 103 | appendResult(cell, "failed", i, j); 104 | index++; 105 | } 106 | } 107 | } 108 | 109 | function fillMeasurementTable(result) { 110 | var t = document.getElementById("measurements"); 111 | t.innerHTML = ""; 112 | makeColumnHeaders(t, result); 113 | var tbody = t.createTBody(); 114 | var index = 0; 115 | for(var i = 0; i < result.clients.length; i++) { 116 | var row = makeRowHeader(tbody, result, i); 117 | for(var j = 0; j < result.servers.length; j++) { 118 | var res = result.measurements[index]; 119 | var cell = row.insertCell(j+1); 120 | cell.className = "server-" + result.servers[j] + " client-" + result.clients[i]; 121 | for(var k = 0; k < res.length; k++) { 122 | var measurement = res[k]; 123 | var link = getLogLink(result.log_dir, result.servers[j], result.clients[i], measurement.name, measurement.abbr, measurement.result); 124 | if (measurement.result === "succeeded") 125 | link.innerHTML += ": " + measurement.details; 126 | cell.appendChild(link); 127 | } 128 | index++; 129 | } 130 | } 131 | } 132 | 133 | function dateToString(date) { 134 | return date.toLocaleDateString("en-US", { timeZone: 'UTC' }) + " " + date.toLocaleTimeString("en-US", { timeZone: 'UTC', timeZoneName: 'short' }); 135 | } 136 | 137 | function makeButton(type, text, tooltip) { 138 | var b = document.createElement("button"); 139 | b.innerHTML = text; 140 | b.id = type + "-" + text.toLowerCase(); 141 | if (tooltip) { 142 | b.title = tooltip; 143 | $(b).attr("data-toggle", "tooltip").attr("data-placement", "bottom").attr("data-html", true).tooltip(); 144 | } 145 | b.type = "button"; 146 | b.className = type + " btn btn-light"; 147 | $(b).click(clickButton); 148 | return b; 149 | } 150 | 151 | function toggleHighlight(e) { 152 | const comp = e.target.id.split("-"); 153 | const which = "." + comp[0] + "-" + comp[1] + "." + comp[2]; 154 | $(which).toggleClass("btn-highlight"); 155 | } 156 | 157 | function setButtonState() { 158 | var params = new URLSearchParams(history.state ? history.state.path : window.location.search); 159 | var show = {}; 160 | Object.keys(map).forEach(type => { 161 | map[type] = params.getAll(type).map(x => x.toLowerCase().split(",")).flat(); 162 | if (map[type].length === 0) 163 | map[type] = $("#" + type + " :button").get().map(x => x.id.replace(type + "-", "")); 164 | $("#" + type + " :button").removeClass("active font-weight-bold").addClass("text-muted font-weight-light").filter((i, e) => map[type].includes(e.id.replace(type + "-", ""))).addClass("active font-weight-bold").removeClass("text-muted font-weight-light"); 165 | show[type] = map[type].map(e => "." + type + "-" + e); 166 | }); 167 | 168 | $(".result td").add(".result th").add(".result td a").hide(); 169 | 170 | const show_classes = show.client.map(el1 => show.server.map(el2 => el1 + el2)).flat().join(); 171 | $(".client-any," + show_classes).show(); 172 | 173 | $(".result " + show.client.map(e => "th" + e).join()).show(); 174 | $(".result " + show.server.map(e => "th" + e).join()).show(); 175 | $(".measurement," + show.test.join()).show(); 176 | 177 | $("#test :button").each((i, e) => { 178 | $(e).find("span,br").remove(); 179 | var count = { succeeded: 0, unsupported: 0, failed: 0}; 180 | Object.keys(count).map(c => count[c] = $(".btn." + e.id + "." + c + ":visible").length); 181 | Object.keys(count).map(c => { 182 | e.appendChild(document.createElement("br")); 183 | var b = document.createElement("span"); 184 | b.innerHTML = count[c]; 185 | b.className = "btn btn-xs btn-" + color_type[c]; 186 | if (e.classList.contains("active") === false) 187 | b.className += " disabled"; 188 | b.id = e.id + "-" + c; 189 | $(b).hover(toggleHighlight, toggleHighlight); 190 | e.appendChild(b); 191 | }); 192 | }); 193 | } 194 | 195 | function clickButton(e) { 196 | function toggle(array, value) { 197 | var index = array.indexOf(value); 198 | if (index === -1) 199 | array.push(value); 200 | else 201 | array.splice(index, 1); 202 | } 203 | 204 | var b = $(e.target).closest(":button")[0]; 205 | b.blur(); 206 | const type = [...b.classList].filter(x => Object.keys(map).includes(x))[0]; 207 | const which = b.id.replace(type + "-", ""); 208 | 209 | var params = new URLSearchParams(history.state ? history.state.path : window.location.search); 210 | if (params.has(type) && params.get(type)) 211 | map[type] = params.get(type).split(","); 212 | else 213 | map[type] = $("#" + type + " :button").get().map(e => e.id.replace(type + "-", "")); 214 | 215 | toggle(map[type], which); 216 | params.set(type, map[type]); 217 | if (map[type].length === $("#" + type + " :button").length) 218 | params.delete(type); 219 | 220 | const comp = decodeURIComponent(params.toString()); 221 | var refresh = window.location.protocol + "//" + window.location.host + window.location.pathname + (comp ? "?" + comp : ""); 222 | window.history.pushState(null, null, refresh); 223 | 224 | setButtonState(); 225 | return false; 226 | } 227 | 228 | function makeTooltip(name, desc) { 229 | return "" + name + "" + (desc === undefined ? "" : "
" + desc); 230 | } 231 | 232 | function process(result) { 233 | var startTime = new Date(1000*result.start_time); 234 | var endTime = new Date(1000*result.end_time); 235 | var duration = result.end_time - result.start_time; 236 | document.getElementById("lastrun-start").innerHTML = dateToString(startTime); 237 | document.getElementById("lastrun-end").innerHTML = dateToString(endTime); 238 | document.getElementById("duration").innerHTML = formatTime(duration); 239 | document.getElementById("quic-vers").innerHTML = 240 | "" + result.quic_version + " (\"draft-" + result.quic_draft + "\")"; 241 | 242 | fillInteropTable(result); 243 | fillMeasurementTable(result); 244 | 245 | $("#client").add("#server").add("#test").empty(); 246 | $("#client").append(result.clients.map(e => makeButton("client", e))); 247 | $("#server").append(result.servers.map(e => makeButton("server", e))); 248 | if (result.hasOwnProperty("tests")) 249 | $("#test").append(Object.keys(result.tests).map(e => makeButton("test", e, makeTooltip(result.tests[e].name, result.tests[e].desc)))); 250 | else { 251 | // TODO: this else can eventually be removed, when all past runs have the test descriptions in the json 252 | const tcases = result.results.concat(result.measurements).flat().map(x => [x.abbr, x.name]).filter((e, i, a) => a.map(x => x[0]).indexOf(e[0]) === i); 253 | $("#test").append(tcases.map(e => makeButton("test", e[0], makeTooltip(e[1])))); 254 | } 255 | setButtonState(); 256 | 257 | $("table.result").delegate("td", "mouseover mouseleave", function(e) { 258 | const t = $(this).closest("table.result"); 259 | if (e.type === "mouseover") { 260 | $(this).parent().addClass("hover-xy"); 261 | t.children("colgroup").eq($(this).index()).addClass("hover-xy"); 262 | t.find("th").eq($(this).index()).addClass("hover-xy"); 263 | } else { 264 | $(this).parent().removeClass("hover-xy"); 265 | t.children("colgroup").eq($(this).index()).removeClass("hover-xy"); 266 | t.find("th").eq($(this).index()).removeClass("hover-xy"); 267 | } 268 | }); 269 | } 270 | 271 | function load(dir) { 272 | document.getElementsByTagName("body")[0].classList.add("loading"); 273 | document.getElementById("run-selection-msg").innerHTML = ""; 274 | var xhr = new XMLHttpRequest(); 275 | xhr.responseType = 'json'; 276 | xhr.open('GET', 'logs/' + dir + '/result.json'); 277 | xhr.onreadystatechange = function() { 278 | if(xhr.readyState !== XMLHttpRequest.DONE) return; 279 | if(xhr.status !== 200) { 280 | console.log("Received status: ", xhr.status); 281 | var run = dir.replace("logs_", ""); 282 | var errMsg = 'Error: could not locate result for "' + run + '" run'; 283 | document.getElementById("run-selection-msg").innerHTML = errMsg; 284 | var refresh = window.location.protocol + "//" + window.location.host + window.location.pathname + "?run=" + run; 285 | window.history.pushState(null, null, refresh); 286 | return; 287 | } 288 | var result = xhr.response; 289 | var selectedRun = result.log_dir.replace("logs_", ""); 290 | var refresh = window.location.protocol + "//" + window.location.host + window.location.pathname + "?run=" + selectedRun; 291 | window.history.pushState(null, null, refresh); 292 | process(result); 293 | document.getElementsByTagName("body")[0].classList.remove("loading"); 294 | }; 295 | xhr.send(); 296 | } 297 | 298 | var selectedRun = null; 299 | var queryParams = (new URL(document.location)).searchParams; 300 | if (queryParams.has("run") === true) { 301 | // if the request used a specific run (like ?run=123), then 302 | // load that specifc one 303 | selectedRun = queryParams.get("run") 304 | load("logs_" + selectedRun); 305 | } else { 306 | load("latest"); 307 | } 308 | 309 | // enable loading of old runs 310 | var xhr = new XMLHttpRequest(); 311 | xhr.responseType = 'json'; 312 | xhr.open('GET', 'logs/logs.json'); 313 | xhr.onreadystatechange = function() { 314 | if(xhr.readyState !== XMLHttpRequest.DONE) return; 315 | if(xhr.status !== 200) { 316 | console.log("Received status: ", xhr.status); 317 | return; 318 | } 319 | var s = document.createElement("select"); 320 | xhr.response.reverse().forEach(function(el) { 321 | var opt = document.createElement("option"); 322 | opt.innerHTML = el.replace("logs_", ""); 323 | opt.value = el; 324 | s.appendChild(opt); 325 | }); 326 | s.addEventListener("change", function(ev) { 327 | load(ev.currentTarget.value); 328 | }); 329 | document.getElementById("available-runs").appendChild(s); 330 | if (selectedRun != null) { 331 | // just set the selected run, no need to trigger "change" 332 | // event here 333 | s.value = "logs_" + selectedRun; 334 | } 335 | }; 336 | xhr.send(); 337 | })(); 338 | -------------------------------------------------------------------------------- /interop.py: -------------------------------------------------------------------------------- 1 | import json 2 | import logging 3 | import os 4 | from unique_random_slugs import generate_slug 5 | import re 6 | import shutil 7 | import statistics 8 | import subprocess 9 | import sys 10 | import tempfile 11 | from datetime import datetime 12 | from typing import Callable, List, Tuple 13 | 14 | import prettytable 15 | from termcolor import colored 16 | 17 | import testcases 18 | from result import TestResult 19 | from testcases import Perspective 20 | 21 | 22 | class MeasurementResult: 23 | result = TestResult 24 | details = str 25 | 26 | 27 | class LogFileFormatter(logging.Formatter): 28 | def format(self, record): 29 | msg = super(LogFileFormatter, self).format(record) 30 | # remove color control characters 31 | return re.compile(r"\x1B[@-_][0-?]*[ -/]*[@-~]").sub("", msg) 32 | 33 | 34 | class InteropRunner: 35 | _start_time = 0 36 | test_results = {} 37 | measurement_results = {} 38 | compliant = {} 39 | _implementations = {} 40 | _client_server_pairs = [] 41 | _tests = [] 42 | _measurements = [] 43 | _output = "" 44 | _markdown = False 45 | _log_dir = "" 46 | _save_files = False 47 | _no_auto_unsupported = [] 48 | 49 | def __init__( 50 | self, 51 | implementations: dict, 52 | client_server_pairs: List[Tuple[str, str]], 53 | tests: List[testcases.TestCase], 54 | measurements: List[testcases.Measurement], 55 | output: str, 56 | markdown: bool, 57 | debug: bool, 58 | save_files=False, 59 | log_dir="", 60 | no_auto_unsupported=[], 61 | ): 62 | logger = logging.getLogger() 63 | logger.setLevel(logging.DEBUG) 64 | console = logging.StreamHandler(stream=sys.stderr) 65 | if debug: 66 | console.setLevel(logging.DEBUG) 67 | else: 68 | console.setLevel(logging.INFO) 69 | logger.addHandler(console) 70 | self._start_time = datetime.now() 71 | self._tests = tests 72 | self._measurements = measurements 73 | self._client_server_pairs = client_server_pairs 74 | self._implementations = implementations 75 | self._output = output 76 | self._markdown = markdown 77 | self._log_dir = log_dir 78 | self._save_files = save_files 79 | self._no_auto_unsupported = no_auto_unsupported 80 | if len(self._log_dir) == 0: 81 | self._log_dir = "logs_{:%Y-%m-%dT%H:%M:%S}".format(self._start_time) 82 | if os.path.exists(self._log_dir): 83 | sys.exit("Log dir " + self._log_dir + " already exists.") 84 | logging.info("Saving logs to %s.", self._log_dir) 85 | for client, server in client_server_pairs: 86 | for test in self._tests: 87 | self.test_results.setdefault(server, {}).setdefault( 88 | client, {} 89 | ).setdefault(test, {}) 90 | for measurement in measurements: 91 | self.measurement_results.setdefault(server, {}).setdefault( 92 | client, {} 93 | ).setdefault(measurement, {}) 94 | 95 | def _is_unsupported(self, lines: List[str]) -> bool: 96 | return any("exited with code 127" in str(line) for line in lines) or any( 97 | "exit status 127" in str(line) for line in lines 98 | ) 99 | 100 | def _check_impl_is_compliant(self, name: str) -> bool: 101 | """check if an implementation return UNSUPPORTED for unknown test cases""" 102 | if name in self.compliant: 103 | logging.debug( 104 | "%s already tested for compliance: %s", name, str(self.compliant) 105 | ) 106 | return self.compliant[name] 107 | 108 | client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_") 109 | www_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="compliance_www_") 110 | certs_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="compliance_certs_") 111 | downloads_dir = tempfile.TemporaryDirectory( 112 | dir="/tmp", prefix="compliance_downloads_" 113 | ) 114 | 115 | testcases.generate_cert_chain(certs_dir.name) 116 | 117 | # check that the client is capable of returning UNSUPPORTED 118 | logging.debug("Checking compliance of %s client", name) 119 | cmd = ( 120 | "CERTS=" + certs_dir.name + " " 121 | "TESTCASE_CLIENT=" + generate_slug() + " " 122 | "SERVER_LOGS=/dev/null " 123 | "CLIENT_LOGS=" + client_log_dir.name + " " 124 | "WWW=" + www_dir.name + " " 125 | "DOWNLOADS=" + downloads_dir.name + " " 126 | 'SCENARIO="simple-p2p --delay=15ms --bandwidth=10Mbps --queue=25" ' 127 | "CLIENT=" + self._implementations[name]["image"] + " " 128 | "SERVER=" 129 | + self._implementations[name]["image"] 130 | + " " # only needed so docker compose doesn't complain 131 | "docker compose --env-file empty.env up --timeout 0 --abort-on-container-exit -V sim client" 132 | ) 133 | output = subprocess.run( 134 | cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 135 | ) 136 | if not self._is_unsupported(output.stdout.splitlines()): 137 | logging.error("%s client not compliant.", name) 138 | logging.debug("%s", output.stdout.decode("utf-8", errors="replace")) 139 | self.compliant[name] = False 140 | return False 141 | logging.debug("%s client compliant.", name) 142 | 143 | # check that the server is capable of returning UNSUPPORTED 144 | logging.debug("Checking compliance of %s server", name) 145 | server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_") 146 | cmd = ( 147 | "CERTS=" + certs_dir.name + " " 148 | "TESTCASE_SERVER=" + generate_slug() + " " 149 | "SERVER_LOGS=" + server_log_dir.name + " " 150 | "CLIENT_LOGS=/dev/null " 151 | "WWW=" + www_dir.name + " " 152 | "DOWNLOADS=" + downloads_dir.name + " " 153 | "CLIENT=" 154 | + self._implementations[name]["image"] 155 | + " " # only needed so docker compose doesn't complain 156 | "SERVER=" + self._implementations[name]["image"] + " " 157 | "docker compose --env-file empty.env up -V server" 158 | ) 159 | output = subprocess.run( 160 | cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 161 | ) 162 | if not self._is_unsupported(output.stdout.splitlines()): 163 | logging.error("%s server not compliant.", name) 164 | logging.debug("%s", output.stdout.decode("utf-8", errors="replace")) 165 | self.compliant[name] = False 166 | return False 167 | logging.debug("%s server compliant.", name) 168 | 169 | # remember compliance test outcome 170 | self.compliant[name] = True 171 | return True 172 | 173 | def _postprocess_results(self): 174 | clients = list(set(client for client, _ in self._client_server_pairs)) 175 | servers = list(set(server for _, server in self._client_server_pairs)) 176 | questionable = [TestResult.FAILED, TestResult.UNSUPPORTED] 177 | # If a client failed a test against all servers, make the test unsupported for the client 178 | if len(servers) > 1: 179 | for c in set(clients) - set(self._no_auto_unsupported): 180 | for t in self._tests: 181 | if all(self.test_results[s][c][t] in questionable for s in servers): 182 | print( 183 | f"Client {c} failed or did not support test {t.name()} " 184 | + 'against all servers, marking the entire test as "unsupported"' 185 | ) 186 | for s in servers: 187 | self.test_results[s][c][t] = TestResult.UNSUPPORTED 188 | # If a server failed a test against all clients, make the test unsupported for the server 189 | if len(clients) > 1: 190 | for s in set(servers) - set(self._no_auto_unsupported): 191 | for t in self._tests: 192 | if all(self.test_results[s][c][t] in questionable for c in clients): 193 | print( 194 | f"Server {s} failed or did not support test {t.name()} " 195 | + 'against all clients, marking the entire test as "unsupported"' 196 | ) 197 | for c in clients: 198 | self.test_results[s][c][t] = TestResult.UNSUPPORTED 199 | 200 | def _print_results(self): 201 | """print the interop table""" 202 | logging.info("Run took %s", datetime.now() - self._start_time) 203 | 204 | def get_letters(result): 205 | return ( 206 | result.symbol() 207 | + "(" 208 | + ",".join( 209 | [test.abbreviation() for test in cell if cell[test] is result] 210 | ) 211 | + ")" 212 | ) 213 | 214 | if len(self._tests) > 0: 215 | t = prettytable.PrettyTable() 216 | if self._markdown: 217 | t.set_style(prettytable.MARKDOWN) 218 | else: 219 | t.hrules = prettytable.ALL 220 | t.vrules = prettytable.ALL 221 | rows = {} 222 | columns = {} 223 | for client, server in self._client_server_pairs: 224 | columns[server] = {} 225 | row = rows.setdefault(client, {}) 226 | cell = self.test_results[server][client] 227 | br = "
" if self._markdown else "\n" 228 | res = colored(get_letters(TestResult.SUCCEEDED), "green") + br 229 | res += colored(get_letters(TestResult.UNSUPPORTED), "grey") + br 230 | res += colored(get_letters(TestResult.FAILED), "red") 231 | row[server] = res 232 | 233 | t.field_names = [""] + [column for column, _ in columns.items()] 234 | for client, results in rows.items(): 235 | row = [client] 236 | for server, _ in columns.items(): 237 | row += [results.setdefault(server, "")] 238 | t.add_row(row) 239 | print(t) 240 | 241 | if len(self._measurements) > 0: 242 | t = prettytable.PrettyTable() 243 | if self._markdown: 244 | t.set_style(prettytable.MARKDOWN) 245 | else: 246 | t.hrules = prettytable.ALL 247 | t.vrules = prettytable.ALL 248 | rows = {} 249 | columns = {} 250 | for client, server in self._client_server_pairs: 251 | columns[server] = {} 252 | row = rows.setdefault(client, {}) 253 | cell = self.measurement_results[server][client] 254 | results = [] 255 | for measurement in self._measurements: 256 | res = cell[measurement] 257 | if not hasattr(res, "result"): 258 | continue 259 | if res.result == TestResult.SUCCEEDED: 260 | results.append( 261 | colored( 262 | measurement.abbreviation() + ": " + res.details, 263 | "green", 264 | ) 265 | ) 266 | elif res.result == TestResult.UNSUPPORTED: 267 | results.append(colored(measurement.abbreviation(), "grey")) 268 | elif res.result == TestResult.FAILED: 269 | results.append(colored(measurement.abbreviation(), "red")) 270 | row[server] = "\n".join(results) 271 | t.field_names = [""] + [column for column, _ in columns.items()] 272 | for client, results in rows.items(): 273 | row = [client] 274 | for server, _ in columns.items(): 275 | row += [results.setdefault(server, "")] 276 | t.add_row(row) 277 | print(t) 278 | 279 | def _export_results(self): 280 | if not self._output: 281 | return 282 | clients = list(set(client for client, _ in self._client_server_pairs)) 283 | servers = list(set(server for _, server in self._client_server_pairs)) 284 | out = { 285 | "start_time": self._start_time.timestamp(), 286 | "end_time": datetime.now().timestamp(), 287 | "log_dir": self._log_dir, 288 | "servers": servers, 289 | "clients": clients, 290 | "urls": {x: self._implementations[x]["url"] for x in clients + servers}, 291 | "tests": { 292 | x.abbreviation(): { 293 | "name": x.name(), 294 | "desc": x.desc(), 295 | } 296 | for x in self._tests + self._measurements 297 | }, 298 | "quic_draft": testcases.QUIC_DRAFT, 299 | "quic_version": testcases.QUIC_VERSION, 300 | "results": [], 301 | "measurements": [], 302 | } 303 | 304 | for client in clients: 305 | for server in servers: 306 | results = [] 307 | for test in self._tests: 308 | r = None 309 | if hasattr(self.test_results[server][client][test], "value"): 310 | r = self.test_results[server][client][test].value 311 | results.append( 312 | { 313 | "abbr": test.abbreviation(), 314 | "name": test.name(), # TODO: remove 315 | "result": r, 316 | } 317 | ) 318 | out["results"].append(results) 319 | 320 | measurements = [] 321 | for measurement in self._measurements: 322 | res = self.measurement_results[server][client][measurement] 323 | if not hasattr(res, "result"): 324 | continue 325 | measurements.append( 326 | { 327 | "name": measurement.name(), # TODO: remove 328 | "abbr": measurement.abbreviation(), 329 | "result": res.result.value, 330 | "details": res.details, 331 | } 332 | ) 333 | out["measurements"].append(measurements) 334 | 335 | f = open(self._output, "w") 336 | json.dump(out, f) 337 | f.close() 338 | 339 | def _copy_logs(self, container: str, dir: tempfile.TemporaryDirectory): 340 | cmd = ( 341 | "docker cp \"$(docker ps -a --format '{{.ID}} {{.Names}}' | awk '/^.* " 342 | + container 343 | + "$/ {print $1}')\":/logs/. " 344 | + dir.name 345 | ) 346 | r = subprocess.run( 347 | cmd, 348 | shell=True, 349 | stdout=subprocess.PIPE, 350 | stderr=subprocess.STDOUT, 351 | ) 352 | if r.returncode != 0: 353 | logging.info( 354 | "Copying logs from %s failed: %s", 355 | container, 356 | r.stdout.decode("utf-8", errors="replace"), 357 | ) 358 | 359 | def _run_testcase( 360 | self, server: str, client: str, test: Callable[[], testcases.TestCase] 361 | ) -> TestResult: 362 | return self._run_test(server, client, None, test)[0] 363 | 364 | def _run_test( 365 | self, 366 | server: str, 367 | client: str, 368 | log_dir_prefix: None, 369 | test: Callable[[], testcases.TestCase], 370 | ) -> Tuple[TestResult, float]: 371 | start_time = datetime.now() 372 | sim_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_sim_") 373 | server_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_server_") 374 | client_log_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="logs_client_") 375 | log_file = tempfile.NamedTemporaryFile(dir="/tmp", prefix="output_log_") 376 | log_handler = logging.FileHandler(log_file.name) 377 | log_handler.setLevel(logging.DEBUG) 378 | 379 | formatter = LogFileFormatter("%(asctime)s %(message)s") 380 | log_handler.setFormatter(formatter) 381 | logging.getLogger().addHandler(log_handler) 382 | 383 | testcase = test( 384 | sim_log_dir=sim_log_dir, 385 | client_keylog_file=client_log_dir.name + "/keys.log", 386 | server_keylog_file=server_log_dir.name + "/keys.log", 387 | ) 388 | print( 389 | "Server: " 390 | + server 391 | + ". Client: " 392 | + client 393 | + ". Running test case: " 394 | + str(testcase) 395 | ) 396 | 397 | reqs = " ".join([testcase.urlprefix() + p for p in testcase.get_paths()]) 398 | logging.debug("Requests: %s", reqs) 399 | params = ( 400 | "WAITFORSERVER=server:443 " 401 | "CERTS=" + testcase.certs_dir() + " " 402 | "TESTCASE_SERVER=" + testcase.testname(Perspective.SERVER) + " " 403 | "TESTCASE_CLIENT=" + testcase.testname(Perspective.CLIENT) + " " 404 | "WWW=" + testcase.www_dir() + " " 405 | "DOWNLOADS=" + testcase.download_dir() + " " 406 | "SERVER_LOGS=" + server_log_dir.name + " " 407 | "CLIENT_LOGS=" + client_log_dir.name + " " 408 | 'SCENARIO="{}" ' 409 | "CLIENT=" + self._implementations[client]["image"] + " " 410 | "SERVER=" + self._implementations[server]["image"] + " " 411 | 'REQUESTS="' + reqs + '" ' 412 | ).format(testcase.scenario()) 413 | params += " ".join(testcase.additional_envs()) 414 | containers = "sim client server " + " ".join(testcase.additional_containers()) 415 | cmd = ( 416 | params 417 | + " docker compose --env-file empty.env up --abort-on-container-exit --timeout 1 " 418 | + containers 419 | ) 420 | logging.debug("Command: %s", cmd) 421 | 422 | status = TestResult.FAILED 423 | output = "" 424 | expired = False 425 | try: 426 | r = subprocess.run( 427 | cmd, 428 | shell=True, 429 | stdout=subprocess.PIPE, 430 | stderr=subprocess.STDOUT, 431 | timeout=testcase.timeout(), 432 | ) 433 | output = r.stdout 434 | except subprocess.TimeoutExpired as ex: 435 | output = ex.stdout 436 | expired = True 437 | 438 | logging.debug("%s", output.decode("utf-8", errors="replace")) 439 | 440 | if expired: 441 | logging.debug("Test failed: took longer than %ds.", testcase.timeout()) 442 | r = subprocess.run( 443 | "docker compose --env-file empty.env stop " + containers, 444 | shell=True, 445 | stdout=subprocess.PIPE, 446 | stderr=subprocess.STDOUT, 447 | timeout=60, 448 | ) 449 | logging.debug("%s", r.stdout.decode("utf-8", errors="replace")) 450 | 451 | # copy the pcaps from the simulator 452 | self._copy_logs("sim", sim_log_dir) 453 | self._copy_logs("client", client_log_dir) 454 | self._copy_logs("server", server_log_dir) 455 | 456 | if not expired: 457 | lines = output.splitlines() 458 | if self._is_unsupported(lines): 459 | status = TestResult.UNSUPPORTED 460 | elif any("client exited with code 0" in str(line) for line in lines): 461 | try: 462 | status = testcase.check() 463 | except FileNotFoundError as e: 464 | logging.error(f"testcase.check() threw FileNotFoundError: {e}") 465 | status = TestResult.FAILED 466 | 467 | # save logs 468 | logging.getLogger().removeHandler(log_handler) 469 | log_handler.close() 470 | if status == TestResult.FAILED or status == TestResult.SUCCEEDED: 471 | log_dir = self._log_dir + "/" + server + "_" + client + "/" + str(testcase) 472 | if log_dir_prefix: 473 | log_dir += "/" + log_dir_prefix 474 | shutil.copytree(server_log_dir.name, log_dir + "/server") 475 | shutil.copytree(client_log_dir.name, log_dir + "/client") 476 | shutil.copytree(sim_log_dir.name, log_dir + "/sim") 477 | shutil.copyfile(log_file.name, log_dir + "/output.txt") 478 | if self._save_files and status == TestResult.FAILED: 479 | shutil.copytree(testcase.www_dir(), log_dir + "/www") 480 | try: 481 | shutil.copytree(testcase.download_dir(), log_dir + "/downloads") 482 | except Exception as exception: 483 | logging.info("Could not copy downloaded files: %s", exception) 484 | 485 | testcase.cleanup() 486 | server_log_dir.cleanup() 487 | client_log_dir.cleanup() 488 | sim_log_dir.cleanup() 489 | logging.debug( 490 | "Test: %s took %ss, status: %s", 491 | str(testcase), 492 | (datetime.now() - start_time).total_seconds(), 493 | str(status), 494 | ) 495 | 496 | # measurements also have a value 497 | if hasattr(testcase, "result"): 498 | value = testcase.result() 499 | else: 500 | value = None 501 | 502 | return status, value 503 | 504 | def _run_measurement( 505 | self, server: str, client: str, test: Callable[[], testcases.Measurement] 506 | ) -> MeasurementResult: 507 | values = [] 508 | for i in range(0, test.repetitions()): 509 | result, value = self._run_test(server, client, "%d" % (i + 1), test) 510 | if result != TestResult.SUCCEEDED: 511 | res = MeasurementResult() 512 | res.result = result 513 | res.details = "" 514 | return res 515 | values.append(value) 516 | 517 | logging.debug(values) 518 | res = MeasurementResult() 519 | res.result = TestResult.SUCCEEDED 520 | res.details = "{:.0f} (± {:.0f}) {}".format( 521 | statistics.mean(values), statistics.stdev(values), test.unit() 522 | ) 523 | return res 524 | 525 | def run(self): 526 | """run the interop test suite and output the table""" 527 | 528 | nr_failed = 0 529 | for client, server in self._client_server_pairs: 530 | logging.debug( 531 | "Running with server %s (%s) and client %s (%s)", 532 | server, 533 | self._implementations[server]["image"], 534 | client, 535 | self._implementations[client]["image"], 536 | ) 537 | if not ( 538 | self._check_impl_is_compliant(server) 539 | and self._check_impl_is_compliant(client) 540 | ): 541 | logging.info("Not compliant, skipping") 542 | continue 543 | 544 | # run the test cases 545 | for testcase in self._tests: 546 | status = self._run_testcase(server, client, testcase) 547 | self.test_results[server][client][testcase] = status 548 | if status == TestResult.FAILED: 549 | nr_failed += 1 550 | 551 | # run the measurements 552 | for measurement in self._measurements: 553 | res = self._run_measurement(server, client, measurement) 554 | self.measurement_results[server][client][measurement] = res 555 | 556 | self._postprocess_results() 557 | self._print_results() 558 | self._export_results() 559 | return nr_failed 560 | -------------------------------------------------------------------------------- /testcases.py: -------------------------------------------------------------------------------- 1 | import abc 2 | import filecmp 3 | import logging 4 | import os 5 | import random 6 | from unique_random_slugs import generate_slug 7 | import re 8 | import shutil 9 | import string 10 | import subprocess 11 | import sys 12 | import tempfile 13 | from datetime import timedelta 14 | from enum import Enum, IntEnum 15 | from trace import ( 16 | QUIC_V2, 17 | Direction, 18 | PacketType, 19 | TraceAnalyzer, 20 | get_direction, 21 | get_packet_type, 22 | ) 23 | from typing import List, Tuple 24 | 25 | from Crypto.Cipher import AES 26 | 27 | from result import TestResult 28 | 29 | KB = 1 << 10 30 | MB = 1 << 20 31 | 32 | QUIC_DRAFT = 34 # draft-34 33 | QUIC_VERSION = hex(0x1) 34 | 35 | 36 | class Perspective(Enum): 37 | SERVER = "server" 38 | CLIENT = "client" 39 | 40 | 41 | class ECN(IntEnum): 42 | NONE = 0 43 | ECT1 = 1 44 | ECT0 = 2 45 | CE = 3 46 | 47 | 48 | def random_string(length: int): 49 | """Generate a random string of fixed length""" 50 | letters = string.ascii_lowercase 51 | return "".join(random.choice(letters) for i in range(length)) 52 | 53 | 54 | def generate_cert_chain(directory: str, length: int = 1): 55 | cmd = "./certs.sh " + directory + " " + str(length) 56 | r = subprocess.run( 57 | cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT 58 | ) 59 | logging.debug("%s", r.stdout.decode("utf-8")) 60 | if r.returncode != 0: 61 | logging.info("Unable to create certificates") 62 | sys.exit(1) 63 | 64 | 65 | class TestCase(abc.ABC): 66 | _files = [] 67 | _www_dir = None 68 | _client_keylog_file = None 69 | _server_keylog_file = None 70 | _download_dir = None 71 | _sim_log_dir = None 72 | _cert_dir = None 73 | _cached_server_trace = None 74 | _cached_client_trace = None 75 | 76 | def __init__( 77 | self, 78 | sim_log_dir: tempfile.TemporaryDirectory, 79 | client_keylog_file: str, 80 | server_keylog_file: str, 81 | ): 82 | self._server_keylog_file = server_keylog_file 83 | self._client_keylog_file = client_keylog_file 84 | self._files = [] 85 | self._sim_log_dir = sim_log_dir 86 | 87 | @abc.abstractmethod 88 | def name(self): 89 | pass 90 | 91 | @abc.abstractmethod 92 | def desc(self): 93 | pass 94 | 95 | def __str__(self): 96 | return self.name() 97 | 98 | def testname(self, p: Perspective): 99 | """The name of testcase presented to the endpoint Docker images""" 100 | return self.name() 101 | 102 | @staticmethod 103 | def scenario() -> str: 104 | """Scenario for the ns3 simulator""" 105 | return "simple-p2p --delay=15ms --bandwidth=10Mbps --queue=25" 106 | 107 | @staticmethod 108 | def timeout() -> int: 109 | """timeout in s""" 110 | return 60 111 | 112 | @staticmethod 113 | def urlprefix() -> str: 114 | """URL prefix""" 115 | return "https://server4:443/" 116 | 117 | @staticmethod 118 | def additional_envs() -> List[str]: 119 | return [""] 120 | 121 | @staticmethod 122 | def additional_containers() -> List[str]: 123 | return [""] 124 | 125 | def www_dir(self): 126 | if not self._www_dir: 127 | self._www_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="www_") 128 | return self._www_dir.name + "/" 129 | 130 | def download_dir(self): 131 | if not self._download_dir: 132 | self._download_dir = tempfile.TemporaryDirectory( 133 | dir="/tmp", prefix="download_" 134 | ) 135 | return self._download_dir.name + "/" 136 | 137 | def certs_dir(self): 138 | if not self._cert_dir: 139 | self._cert_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="certs_") 140 | generate_cert_chain(self._cert_dir.name) 141 | return self._cert_dir.name + "/" 142 | 143 | def _is_valid_keylog(self, filename) -> bool: 144 | if not os.path.isfile(filename) or os.path.getsize(filename) == 0: 145 | return False 146 | with open(filename, "r") as file: 147 | if not re.search( 148 | r"^SERVER_HANDSHAKE_TRAFFIC_SECRET", file.read(), re.MULTILINE 149 | ): 150 | logging.info("Key log file %s is using incorrect format.", filename) 151 | return False 152 | return True 153 | 154 | def _keylog_file(self) -> str: 155 | if self._is_valid_keylog(self._client_keylog_file): 156 | logging.debug("Using the client's key log file.") 157 | return self._client_keylog_file 158 | elif self._is_valid_keylog(self._server_keylog_file): 159 | logging.debug("Using the server's key log file.") 160 | return self._server_keylog_file 161 | logging.debug("No key log file found.") 162 | 163 | def _inject_keylog_if_possible(self, trace: str): 164 | """ 165 | Inject the keylog file into the pcap file if it is available and valid. 166 | """ 167 | keylog = self._keylog_file() 168 | if keylog is None: 169 | return 170 | 171 | with tempfile.NamedTemporaryFile() as tmp: 172 | r = subprocess.run( 173 | f"editcap --inject-secrets tls,{keylog} {trace} {tmp.name}", 174 | shell=True, 175 | stdout=subprocess.PIPE, 176 | stderr=subprocess.STDOUT, 177 | ) 178 | logging.debug("%s", r.stdout.decode("utf-8")) 179 | if r.returncode != 0: 180 | return 181 | shutil.copy(tmp.name, trace) 182 | 183 | def _client_trace(self): 184 | if self._cached_client_trace is None: 185 | trace = self._sim_log_dir.name + "/trace_node_left.pcap" 186 | self._inject_keylog_if_possible(trace) 187 | self._cached_client_trace = TraceAnalyzer(trace, self._keylog_file()) 188 | return self._cached_client_trace 189 | 190 | def _server_trace(self): 191 | if self._cached_server_trace is None: 192 | trace = self._sim_log_dir.name + "/trace_node_right.pcap" 193 | self._inject_keylog_if_possible(trace) 194 | self._cached_server_trace = TraceAnalyzer(trace, self._keylog_file()) 195 | return self._cached_server_trace 196 | 197 | def _generate_random_file(self, size: int, filename: str = None) -> str: 198 | if filename is None: 199 | filename = generate_slug() 200 | # see https://www.stefanocappellini.it/generate-pseudorandom-bytes-with-python/ for benchmarks 201 | enc = AES.new(os.urandom(32), AES.MODE_OFB, b"a" * 16) 202 | f = open(self.www_dir() + filename, "wb") 203 | f.write(enc.encrypt(b" " * size)) 204 | f.close() 205 | logging.debug("Generated random file: %s of size: %d", filename, size) 206 | return filename 207 | 208 | def _retry_sent(self) -> bool: 209 | return len(self._client_trace().get_retry()) > 0 210 | 211 | def _check_version_and_files(self) -> bool: 212 | versions = [hex(int(v, 0)) for v in self._get_versions()] 213 | if len(versions) != 1: 214 | logging.info("Expected exactly one version. Got %s", versions) 215 | return False 216 | if QUIC_VERSION not in versions: 217 | logging.info("Wrong version. Expected %s, got %s", QUIC_VERSION, versions) 218 | return False 219 | return self._check_files() 220 | 221 | def _check_files(self) -> bool: 222 | if len(self._files) == 0: 223 | raise Exception("No test files generated.") 224 | files = [ 225 | n 226 | for n in os.listdir(self.download_dir()) 227 | if os.path.isfile(os.path.join(self.download_dir(), n)) 228 | ] 229 | too_many = [f for f in files if f not in self._files] 230 | if len(too_many) != 0: 231 | logging.info("Found unexpected downloaded files: %s", too_many) 232 | too_few = [f for f in self._files if f not in files] 233 | if len(too_few) != 0: 234 | logging.info("Missing files: %s", too_few) 235 | if len(too_many) != 0 or len(too_few) != 0: 236 | return False 237 | for f in self._files: 238 | fp = self.download_dir() + f 239 | if not os.path.isfile(fp): 240 | logging.info("File %s does not exist.", fp) 241 | return False 242 | try: 243 | size = os.path.getsize(self.www_dir() + f) 244 | downloaded_size = os.path.getsize(fp) 245 | if size != downloaded_size: 246 | logging.info( 247 | "File size of %s doesn't match. Original: %d bytes, downloaded: %d bytes.", 248 | fp, 249 | size, 250 | downloaded_size, 251 | ) 252 | return False 253 | if not filecmp.cmp(self.www_dir() + f, fp, shallow=False): 254 | logging.info("File contents of %s do not match.", fp) 255 | return False 256 | except Exception as exception: 257 | logging.info( 258 | "Could not compare files %s and %s: %s", 259 | self.www_dir() + f, 260 | fp, 261 | exception, 262 | ) 263 | return False 264 | logging.debug("Check of downloaded files succeeded.") 265 | return True 266 | 267 | def _count_handshakes(self) -> int: 268 | """Count the number of QUIC handshakes""" 269 | tr = self._server_trace() 270 | # Determine the number of handshakes by looking at Initial packets. 271 | # This is easier, since the SCID of Initial packets doesn't changes. 272 | return len(set([p.scid for p in tr.get_initial(Direction.FROM_SERVER)])) 273 | 274 | def _get_versions(self) -> set: 275 | """Get the QUIC versions""" 276 | tr = self._server_trace() 277 | return set([p.version for p in tr.get_initial(Direction.FROM_SERVER)]) 278 | 279 | def _payload_size(self, packets: List) -> int: 280 | """Get the sum of the payload sizes of all packets""" 281 | size = 0 282 | for p in packets: 283 | if hasattr(p, "long_packet_type") or hasattr(p, "long_packet_type_v2"): 284 | if hasattr(p, "payload"): # when keys are available 285 | size += len(p.payload.split(":")) 286 | else: 287 | size += len(p.remaining_payload.split(":")) 288 | else: 289 | if hasattr(p, "protected_payload"): 290 | size += len(p.protected_payload.split(":")) 291 | return size 292 | 293 | def cleanup(self): 294 | if self._www_dir: 295 | self._www_dir.cleanup() 296 | self._www_dir = None 297 | if self._download_dir: 298 | self._download_dir.cleanup() 299 | self._download_dir = None 300 | 301 | @abc.abstractmethod 302 | def get_paths(self): 303 | pass 304 | 305 | @abc.abstractmethod 306 | def check(self) -> TestResult: 307 | self._client_trace() 308 | self._server_trace() 309 | pass 310 | 311 | 312 | class Measurement(TestCase): 313 | @abc.abstractmethod 314 | def result(self) -> float: 315 | pass 316 | 317 | @staticmethod 318 | @abc.abstractmethod 319 | def unit() -> str: 320 | pass 321 | 322 | @staticmethod 323 | @abc.abstractmethod 324 | def repetitions() -> int: 325 | pass 326 | 327 | 328 | class TestCaseVersionNegotiation(TestCase): 329 | @staticmethod 330 | def name(): 331 | return "versionnegotiation" 332 | 333 | @staticmethod 334 | def abbreviation(): 335 | return "V" 336 | 337 | @staticmethod 338 | def desc(): 339 | return "A version negotiation packet is elicited and acted on." 340 | 341 | def get_paths(self): 342 | return [""] 343 | 344 | def check(self) -> TestResult: 345 | super().check() 346 | tr = self._client_trace() 347 | initials = tr.get_initial(Direction.FROM_CLIENT) 348 | dcid = "" 349 | for p in initials: 350 | dcid = p.dcid 351 | break 352 | if dcid == "": 353 | logging.info("Didn't find an Initial / a DCID.") 354 | return TestResult.FAILED 355 | vnps = tr.get_vnp() 356 | for p in vnps: 357 | if p.scid == dcid: 358 | return TestResult.SUCCEEDED 359 | logging.info("Didn't find a Version Negotiation Packet with matching SCID.") 360 | return TestResult.FAILED 361 | 362 | 363 | class TestCaseHandshake(TestCase): 364 | @staticmethod 365 | def name(): 366 | return "handshake" 367 | 368 | @staticmethod 369 | def abbreviation(): 370 | return "H" 371 | 372 | @staticmethod 373 | def desc(): 374 | return "Handshake completes successfully." 375 | 376 | def get_paths(self): 377 | self._files = [self._generate_random_file(1 * KB)] 378 | return self._files 379 | 380 | def check(self) -> TestResult: 381 | super().check() 382 | if not self._check_version_and_files(): 383 | return TestResult.FAILED 384 | if self._retry_sent(): 385 | logging.info("Didn't expect a Retry to be sent.") 386 | return TestResult.FAILED 387 | num_handshakes = self._count_handshakes() 388 | if num_handshakes != 1: 389 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 390 | return TestResult.FAILED 391 | return TestResult.SUCCEEDED 392 | 393 | 394 | class TestCaseLongRTT(TestCaseHandshake): 395 | @staticmethod 396 | def abbreviation(): 397 | return "LR" 398 | 399 | @staticmethod 400 | def name(): 401 | return "longrtt" 402 | 403 | @staticmethod 404 | def testname(p: Perspective): 405 | return "handshake" 406 | 407 | @staticmethod 408 | def desc(): 409 | return "Handshake completes when RTT is long." 410 | 411 | @staticmethod 412 | def scenario() -> str: 413 | """Scenario for the ns3 simulator""" 414 | return "simple-p2p --delay=750ms --bandwidth=10Mbps --queue=25" 415 | 416 | def check(self) -> TestResult: 417 | super().check() 418 | num_handshakes = self._count_handshakes() 419 | if num_handshakes != 1: 420 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 421 | return TestResult.FAILED 422 | if not self._check_version_and_files(): 423 | return TestResult.FAILED 424 | num_ch = 0 425 | for p in self._client_trace().get_initial(Direction.FROM_CLIENT): 426 | if hasattr(p, "tls_handshake_type"): 427 | if p.tls_handshake_type == "1": 428 | num_ch += 1 429 | # Retransmitted ClientHello does not have 430 | # tls_handshake_type attribute. See 431 | # https://gitlab.com/wireshark/wireshark/-/issues/18696 432 | # for details. 433 | elif hasattr(p, "retransmission") or hasattr(p, "overlap"): 434 | num_ch += 1 435 | if num_ch < 2: 436 | logging.info("Expected at least 2 ClientHellos. Got: %d", num_ch) 437 | return TestResult.FAILED 438 | return TestResult.SUCCEEDED 439 | 440 | 441 | class TestCaseTransfer(TestCase): 442 | @staticmethod 443 | def name(): 444 | return "transfer" 445 | 446 | @staticmethod 447 | def abbreviation(): 448 | return "DC" 449 | 450 | @staticmethod 451 | def desc(): 452 | return "Stream data is being sent and received correctly. Connection close completes with a zero error code." 453 | 454 | def get_paths(self): 455 | self._files = [ 456 | self._generate_random_file(2 * MB), 457 | self._generate_random_file(3 * MB), 458 | self._generate_random_file(5 * MB), 459 | ] 460 | return self._files 461 | 462 | def check(self) -> TestResult: 463 | super().check() 464 | num_handshakes = self._count_handshakes() 465 | if num_handshakes != 1: 466 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 467 | return TestResult.FAILED 468 | if not self._check_version_and_files(): 469 | return TestResult.FAILED 470 | return TestResult.SUCCEEDED 471 | 472 | 473 | class TestCaseChaCha20(TestCase): 474 | @staticmethod 475 | def name(): 476 | return "chacha20" 477 | 478 | @staticmethod 479 | def testname(p: Perspective): 480 | return "chacha20" 481 | 482 | @staticmethod 483 | def abbreviation(): 484 | return "C20" 485 | 486 | @staticmethod 487 | def desc(): 488 | return "Handshake completes using ChaCha20." 489 | 490 | def get_paths(self): 491 | self._files = [self._generate_random_file(3 * MB)] 492 | return self._files 493 | 494 | def check(self) -> TestResult: 495 | super().check() 496 | num_handshakes = self._count_handshakes() 497 | if num_handshakes != 1: 498 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 499 | return TestResult.FAILED 500 | ciphersuites = [] 501 | for p in self._client_trace().get_initial(Direction.FROM_CLIENT): 502 | if hasattr(p, "tls_handshake_ciphersuite"): 503 | ciphersuites.append(p.tls_handshake_ciphersuite) 504 | if len(set(ciphersuites)) != 1 or ( 505 | ciphersuites[0] != "4867" and ciphersuites[0] != "0x1303" 506 | ): 507 | logging.info( 508 | "Expected only ChaCha20 cipher suite to be offered. Got: %s", 509 | set(ciphersuites), 510 | ) 511 | return TestResult.FAILED 512 | if not self._check_version_and_files(): 513 | return TestResult.FAILED 514 | return TestResult.SUCCEEDED 515 | 516 | 517 | class TestCaseMultiplexing(TestCase): 518 | @staticmethod 519 | def name(): 520 | return "multiplexing" 521 | 522 | @staticmethod 523 | def testname(p: Perspective): 524 | return "transfer" 525 | 526 | @staticmethod 527 | def abbreviation(): 528 | return "M" 529 | 530 | @staticmethod 531 | def desc(): 532 | return "Thousands of files are transferred over a single connection, and server increased stream limits to accomodate client requests." 533 | 534 | def get_paths(self): 535 | for _ in range(1, 2000): 536 | self._files.append(self._generate_random_file(32)) 537 | return self._files 538 | 539 | def check(self) -> TestResult: 540 | super().check() 541 | if not self._keylog_file(): 542 | logging.info("Can't check test result. SSLKEYLOG required.") 543 | return TestResult.UNSUPPORTED 544 | num_handshakes = self._count_handshakes() 545 | if num_handshakes != 1: 546 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 547 | return TestResult.FAILED 548 | if not self._check_version_and_files(): 549 | return TestResult.FAILED 550 | # Check that the server set a bidirectional stream limit <= 1000 551 | checked_stream_limit = False 552 | for p in self._client_trace().get_handshake(Direction.FROM_SERVER): 553 | if hasattr(p, "tls.quic.parameter.initial_max_streams_bidi"): 554 | checked_stream_limit = True 555 | stream_limit = int( 556 | getattr(p, "tls.quic.parameter.initial_max_streams_bidi") 557 | ) 558 | logging.debug("Server set bidirectional stream limit: %d", stream_limit) 559 | if stream_limit > 1000: 560 | logging.info("Server set a stream limit > 1000.") 561 | return TestResult.FAILED 562 | if not checked_stream_limit: 563 | logging.info("Couldn't check stream limit.") 564 | return TestResult.FAILED 565 | return TestResult.SUCCEEDED 566 | 567 | 568 | class TestCaseRetry(TestCase): 569 | @staticmethod 570 | def name(): 571 | return "retry" 572 | 573 | @staticmethod 574 | def abbreviation(): 575 | return "S" 576 | 577 | @staticmethod 578 | def desc(): 579 | return "Server sends a Retry, and a subsequent connection using the Retry token completes successfully." 580 | 581 | def get_paths(self): 582 | self._files = [ 583 | self._generate_random_file(10 * KB), 584 | ] 585 | return self._files 586 | 587 | def _check_trace(self) -> bool: 588 | # check that (at least) one Retry packet was actually sent 589 | tr = self._client_trace() 590 | tokens = [] 591 | retries = tr.get_retry(Direction.FROM_SERVER) 592 | for p in retries: 593 | if not hasattr(p, "retry_token"): 594 | logging.info("Retry packet doesn't have a retry_token") 595 | logging.info(p) 596 | return False 597 | tokens += [p.retry_token.replace(":", "")] 598 | if len(tokens) == 0: 599 | logging.info("Didn't find any Retry packets.") 600 | return False 601 | 602 | # check that an Initial packet uses a token sent in the Retry packet(s) 603 | highest_pn_before_retry = -1 604 | for p in tr.get_initial(Direction.FROM_CLIENT): 605 | pn = int(p.packet_number) 606 | if p.token_length == "0": 607 | highest_pn_before_retry = max(highest_pn_before_retry, pn) 608 | continue 609 | if pn <= highest_pn_before_retry: 610 | logging.debug( 611 | "Client reset the packet number. Check failed for PN %d", pn 612 | ) 613 | return False 614 | token = p.token.replace(":", "") 615 | if token in tokens: 616 | logging.debug("Check of Retry succeeded. Token used: %s", token) 617 | return True 618 | logging.info("Didn't find any Initial packet using a Retry token.") 619 | return False 620 | 621 | def check(self) -> TestResult: 622 | super().check() 623 | num_handshakes = self._count_handshakes() 624 | if num_handshakes != 1: 625 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 626 | return TestResult.FAILED 627 | if not self._check_version_and_files(): 628 | return TestResult.FAILED 629 | if not self._check_trace(): 630 | return TestResult.FAILED 631 | return TestResult.SUCCEEDED 632 | 633 | 634 | class TestCaseResumption(TestCase): 635 | @staticmethod 636 | def name(): 637 | return "resumption" 638 | 639 | @staticmethod 640 | def abbreviation(): 641 | return "R" 642 | 643 | @staticmethod 644 | def desc(): 645 | return "Connection is established using TLS Session Resumption." 646 | 647 | def get_paths(self): 648 | self._files = [ 649 | self._generate_random_file(5 * KB), 650 | self._generate_random_file(10 * KB), 651 | ] 652 | return self._files 653 | 654 | def check(self) -> TestResult: 655 | super().check() 656 | if not self._keylog_file(): 657 | logging.info("Can't check test result. SSLKEYLOG required.") 658 | return TestResult.UNSUPPORTED 659 | num_handshakes = self._count_handshakes() 660 | if num_handshakes != 2: 661 | logging.info("Expected exactly 2 handshake. Got: %d", num_handshakes) 662 | return TestResult.FAILED 663 | 664 | handshake_packets = self._client_trace().get_handshake(Direction.FROM_SERVER) 665 | cids = [p.scid for p in handshake_packets] 666 | first_handshake_has_cert = False 667 | for p in handshake_packets: 668 | if p.scid == cids[0]: 669 | if hasattr(p, "tls_handshake_certificates_length"): 670 | first_handshake_has_cert = True 671 | elif p.scid == cids[len(cids) - 1]: # second handshake 672 | if hasattr(p, "tls_handshake_certificates_length"): 673 | logging.info( 674 | "Server sent a Certificate message in the second handshake." 675 | ) 676 | return TestResult.FAILED 677 | else: 678 | logging.info( 679 | "Found handshake packet that neither belongs to the first nor the second handshake." 680 | ) 681 | return TestResult.FAILED 682 | if not first_handshake_has_cert: 683 | logging.info( 684 | "Didn't find a Certificate message in the first handshake. That's weird." 685 | ) 686 | return TestResult.FAILED 687 | if not self._check_version_and_files(): 688 | return TestResult.FAILED 689 | return TestResult.SUCCEEDED 690 | 691 | 692 | class TestCaseZeroRTT(TestCase): 693 | NUM_FILES = 40 694 | FILESIZE = 32 # in bytes 695 | FILENAMELEN = 250 696 | 697 | @staticmethod 698 | def name(): 699 | return "zerortt" 700 | 701 | @staticmethod 702 | def abbreviation(): 703 | return "Z" 704 | 705 | @staticmethod 706 | def desc(): 707 | return "0-RTT data is being sent and acted on." 708 | 709 | def get_paths(self): 710 | for _ in range(self.NUM_FILES): 711 | filename = random_string(self.FILENAMELEN) 712 | self._files.append(self._generate_random_file(self.FILESIZE, filename)) 713 | return self._files 714 | 715 | def check(self) -> TestResult: 716 | super().check() 717 | num_handshakes = self._count_handshakes() 718 | if num_handshakes != 2: 719 | logging.info("Expected exactly 2 handshakes. Got: %d", num_handshakes) 720 | return TestResult.FAILED 721 | if not self._check_version_and_files(): 722 | return TestResult.FAILED 723 | tr = self._client_trace() 724 | zeroRTTSize = self._payload_size(tr.get_0rtt()) 725 | oneRTTSize = self._payload_size(tr.get_1rtt(Direction.FROM_CLIENT)) 726 | logging.debug("0-RTT size: %d", zeroRTTSize) 727 | logging.debug("1-RTT size: %d", oneRTTSize) 728 | if zeroRTTSize == 0: 729 | logging.info("Client didn't send any 0-RTT data.") 730 | return TestResult.FAILED 731 | if oneRTTSize > 0.5 * self.FILENAMELEN * self.NUM_FILES: 732 | logging.info("Client sent too much data in 1-RTT packets.") 733 | return TestResult.FAILED 734 | return TestResult.SUCCEEDED 735 | 736 | 737 | class TestCaseHTTP3(TestCase): 738 | @staticmethod 739 | def name(): 740 | return "http3" 741 | 742 | @staticmethod 743 | def abbreviation(): 744 | return "3" 745 | 746 | @staticmethod 747 | def desc(): 748 | return "An H3 transaction succeeded." 749 | 750 | def get_paths(self): 751 | self._files = [ 752 | self._generate_random_file(5 * KB), 753 | self._generate_random_file(10 * KB), 754 | self._generate_random_file(500 * KB), 755 | ] 756 | return self._files 757 | 758 | def check(self) -> TestResult: 759 | super().check() 760 | num_handshakes = self._count_handshakes() 761 | if num_handshakes != 1: 762 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 763 | return TestResult.FAILED 764 | if not self._check_version_and_files(): 765 | return TestResult.FAILED 766 | return TestResult.SUCCEEDED 767 | 768 | 769 | class TestCaseAmplificationLimit(TestCase): 770 | @staticmethod 771 | def name(): 772 | return "amplificationlimit" 773 | 774 | @staticmethod 775 | def testname(p: Perspective): 776 | return "transfer" 777 | 778 | @staticmethod 779 | def abbreviation(): 780 | return "A" 781 | 782 | @staticmethod 783 | def desc(): 784 | return "The server obeys the 3x amplification limit." 785 | 786 | def certs_dir(self): 787 | if not self._cert_dir: 788 | self._cert_dir = tempfile.TemporaryDirectory(dir="/tmp", prefix="certs_") 789 | generate_cert_chain(self._cert_dir.name, 9) 790 | return self._cert_dir.name + "/" 791 | 792 | @staticmethod 793 | def scenario() -> str: 794 | """Scenario for the ns3 simulator""" 795 | # Let the ClientHello pass, but drop a bunch of retransmissions afterwards. 796 | return "droplist --delay=15ms --bandwidth=10Mbps --queue=25 --drops_to_server=2,3,4,5,6,7" 797 | 798 | def get_paths(self): 799 | self._files = [self._generate_random_file(5 * KB)] 800 | return self._files 801 | 802 | def check(self) -> TestResult: 803 | super().check() 804 | if not self._keylog_file(): 805 | logging.info("Can't check test result. SSLKEYLOG required.") 806 | return TestResult.UNSUPPORTED 807 | num_handshakes = self._count_handshakes() 808 | if num_handshakes != 1: 809 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 810 | return TestResult.FAILED 811 | if not self._check_version_and_files(): 812 | return TestResult.FAILED 813 | # Check the highest offset of CRYPTO frames sent by the server. 814 | # This way we can make sure that it actually used the provided cert chain. 815 | max_handshake_offset = 0 816 | for p in self._server_trace().get_handshake(Direction.FROM_SERVER): 817 | if hasattr(p, "crypto_offset"): 818 | max_handshake_offset = max( 819 | max_handshake_offset, int(p.crypto_offset) + int(p.crypto_length) 820 | ) 821 | if max_handshake_offset < 7500: 822 | logging.info( 823 | "Server sent too little Handshake CRYPTO data (%d bytes). Not using the provided cert chain?", 824 | max_handshake_offset, 825 | ) 826 | return TestResult.FAILED 827 | logging.debug( 828 | "Server sent %d bytes in Handshake CRYPTO frames.", max_handshake_offset 829 | ) 830 | 831 | # Check that the server didn't send more than 3-4x what the client sent. 832 | allowed = 0 833 | allowed_with_tolerance = 0 834 | client_sent, server_sent = 0, 0 # only for debug messages 835 | res = TestResult.FAILED 836 | log_output = [] 837 | for p in self._server_trace().get_raw_packets(): 838 | direction = get_direction(p) 839 | packet_type = get_packet_type(p) 840 | if packet_type == PacketType.VERSIONNEGOTIATION: 841 | logging.info("Didn't expect a Version Negotiation packet.") 842 | return TestResult.FAILED 843 | packet_size = int(p.udp.length) - 8 # subtract the UDP header length 844 | if packet_type == PacketType.INVALID: 845 | logging.debug("Couldn't determine packet type.") 846 | return TestResult.FAILED 847 | if direction == Direction.FROM_CLIENT: 848 | if packet_type is PacketType.HANDSHAKE: 849 | res = TestResult.SUCCEEDED 850 | break 851 | if packet_type is PacketType.INITIAL: 852 | client_sent += packet_size 853 | allowed += 3 * packet_size 854 | allowed_with_tolerance += 4 * packet_size 855 | log_output.append( 856 | "Received a {} byte Initial packet from the client. Amplification limit: {}".format( 857 | packet_size, 3 * client_sent 858 | ) 859 | ) 860 | elif direction == Direction.FROM_SERVER: 861 | server_sent += packet_size 862 | log_output.append( 863 | "Received a {} byte Handshake packet from the server. Total: {}".format( 864 | packet_size, server_sent 865 | ) 866 | ) 867 | if packet_size >= allowed_with_tolerance: 868 | log_output.append("Server violated the amplification limit.") 869 | break 870 | if packet_size > allowed: 871 | log_output.append( 872 | "Server violated the amplification limit, but stayed within 3-4x amplification. Letting it slide." 873 | ) 874 | allowed_with_tolerance -= packet_size 875 | allowed -= packet_size 876 | else: 877 | logging.debug("Couldn't determine sender of packet.") 878 | return TestResult.FAILED 879 | 880 | log_level = logging.DEBUG 881 | if res == TestResult.FAILED: 882 | log_level = logging.INFO 883 | for msg in log_output: 884 | logging.log(log_level, msg) 885 | return res 886 | 887 | 888 | class TestCaseBlackhole(TestCase): 889 | @staticmethod 890 | def name(): 891 | return "blackhole" 892 | 893 | @staticmethod 894 | def testname(p: Perspective): 895 | return "transfer" 896 | 897 | @staticmethod 898 | def abbreviation(): 899 | return "B" 900 | 901 | @staticmethod 902 | def desc(): 903 | return "Transfer succeeds despite underlying network blacking out for a few seconds." 904 | 905 | @staticmethod 906 | def scenario() -> str: 907 | """Scenario for the ns3 simulator""" 908 | return "blackhole --delay=15ms --bandwidth=10Mbps --queue=25 --on=5s --off=2s" 909 | 910 | def get_paths(self): 911 | self._files = [self._generate_random_file(10 * MB)] 912 | return self._files 913 | 914 | def check(self) -> TestResult: 915 | super().check() 916 | num_handshakes = self._count_handshakes() 917 | if num_handshakes != 1: 918 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 919 | return TestResult.FAILED 920 | if not self._check_version_and_files(): 921 | return TestResult.FAILED 922 | return TestResult.SUCCEEDED 923 | 924 | 925 | class TestCaseKeyUpdate(TestCaseHandshake): 926 | @staticmethod 927 | def name(): 928 | return "keyupdate" 929 | 930 | @staticmethod 931 | def testname(p: Perspective): 932 | if p is Perspective.CLIENT: 933 | return "keyupdate" 934 | return "transfer" 935 | 936 | @staticmethod 937 | def abbreviation(): 938 | return "U" 939 | 940 | @staticmethod 941 | def desc(): 942 | return "One of the two endpoints updates keys and the peer responds correctly." 943 | 944 | def get_paths(self): 945 | self._files = [self._generate_random_file(3 * MB)] 946 | return self._files 947 | 948 | def check(self) -> TestResult: 949 | super().check() 950 | if not self._keylog_file(): 951 | logging.info("Can't check test result. SSLKEYLOG required.") 952 | return TestResult.UNSUPPORTED 953 | 954 | num_handshakes = self._count_handshakes() 955 | if num_handshakes != 1: 956 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 957 | return TestResult.FAILED 958 | if not self._check_version_and_files(): 959 | return TestResult.FAILED 960 | 961 | client = {0: 0, 1: 0} 962 | server = {0: 0, 1: 0} 963 | try: 964 | 965 | def _get_key_phase(pkt) -> int: 966 | kp: str = pkt.key_phase.raw_value 967 | # when key_phase bit is set in a QUIC packet, certain versions 968 | # of wireshark (4.0.11, for example) have been seen to return the string value 969 | # "1" and certain other versions of wireshark return the string value "True". 970 | # here we deal with such values and return the integer value 1 for either of those. 971 | return 1 if kp in ["1", "True"] else 0 972 | 973 | for p in self._client_trace().get_1rtt(Direction.FROM_CLIENT): 974 | key_phase = _get_key_phase(p) 975 | client[key_phase] += 1 976 | for p in self._server_trace().get_1rtt(Direction.FROM_SERVER): 977 | key_phase = _get_key_phase(p) 978 | server[key_phase] += 1 979 | except Exception: 980 | logging.info( 981 | "Failed to read key phase bits. Potentially incorrect SSLKEYLOG?" 982 | ) 983 | return TestResult.FAILED 984 | 985 | succeeded = client[1] * server[1] > 0 986 | 987 | log_level = logging.INFO 988 | if succeeded: 989 | log_level = logging.DEBUG 990 | 991 | logging.log( 992 | log_level, 993 | "Client sent %d key phase 0 and %d key phase 1 packets.", 994 | client[0], 995 | client[1], 996 | ) 997 | logging.log( 998 | log_level, 999 | "Server sent %d key phase 0 and %d key phase 1 packets.", 1000 | server[0], 1001 | server[1], 1002 | ) 1003 | if not succeeded: 1004 | logging.info( 1005 | "Expected to see packets sent with key phase 1 from both client and server." 1006 | ) 1007 | return TestResult.FAILED 1008 | return TestResult.SUCCEEDED 1009 | 1010 | 1011 | class TestCaseHandshakeLoss(TestCase): 1012 | _num_runs = 50 1013 | 1014 | @staticmethod 1015 | def name(): 1016 | return "handshakeloss" 1017 | 1018 | @staticmethod 1019 | def testname(p: Perspective): 1020 | return "multiconnect" 1021 | 1022 | @staticmethod 1023 | def abbreviation(): 1024 | return "L1" 1025 | 1026 | @staticmethod 1027 | def desc(): 1028 | return "Handshake completes under extreme packet loss." 1029 | 1030 | @staticmethod 1031 | def timeout() -> int: 1032 | return 300 1033 | 1034 | @staticmethod 1035 | def scenario() -> str: 1036 | """Scenario for the ns3 simulator""" 1037 | return "drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=30 --rate_to_client=30 --burst_to_server=3 --burst_to_client=3" 1038 | 1039 | def get_paths(self): 1040 | for _ in range(self._num_runs): 1041 | self._files.append(self._generate_random_file(1 * KB)) 1042 | return self._files 1043 | 1044 | def check(self) -> TestResult: 1045 | super().check() 1046 | num_handshakes = self._count_handshakes() 1047 | if num_handshakes != self._num_runs: 1048 | logging.info( 1049 | "Expected %d handshakes. Got: %d", self._num_runs, num_handshakes 1050 | ) 1051 | return TestResult.FAILED 1052 | if not self._check_version_and_files(): 1053 | return TestResult.FAILED 1054 | return TestResult.SUCCEEDED 1055 | 1056 | 1057 | class TestCaseTransferLoss(TestCase): 1058 | @staticmethod 1059 | def name(): 1060 | return "transferloss" 1061 | 1062 | @staticmethod 1063 | def testname(p: Perspective): 1064 | return "transfer" 1065 | 1066 | @staticmethod 1067 | def abbreviation(): 1068 | return "L2" 1069 | 1070 | @staticmethod 1071 | def desc(): 1072 | return "Transfer completes under moderate packet loss." 1073 | 1074 | @staticmethod 1075 | def scenario() -> str: 1076 | """Scenario for the ns3 simulator""" 1077 | return "drop-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=2 --rate_to_client=2 --burst_to_server=3 --burst_to_client=3" 1078 | 1079 | def get_paths(self): 1080 | # At a packet loss rate of 2% and a MTU of 1500 bytes, we can expect 27 dropped packets. 1081 | self._files = [self._generate_random_file(2 * MB)] 1082 | return self._files 1083 | 1084 | def check(self) -> TestResult: 1085 | super().check() 1086 | num_handshakes = self._count_handshakes() 1087 | if num_handshakes != 1: 1088 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 1089 | return TestResult.FAILED 1090 | if not self._check_version_and_files(): 1091 | return TestResult.FAILED 1092 | return TestResult.SUCCEEDED 1093 | 1094 | 1095 | class TestCaseHandshakeCorruption(TestCaseHandshakeLoss): 1096 | @staticmethod 1097 | def name(): 1098 | return "handshakecorruption" 1099 | 1100 | @staticmethod 1101 | def abbreviation(): 1102 | return "C1" 1103 | 1104 | @staticmethod 1105 | def desc(): 1106 | return "Handshake completes under extreme packet corruption." 1107 | 1108 | @staticmethod 1109 | def scenario() -> str: 1110 | """Scenario for the ns3 simulator""" 1111 | return "corrupt-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=30 --rate_to_client=30 --burst_to_server=3 --burst_to_client=3" 1112 | 1113 | 1114 | class TestCaseTransferCorruption(TestCaseTransferLoss): 1115 | @staticmethod 1116 | def name(): 1117 | return "transfercorruption" 1118 | 1119 | @staticmethod 1120 | def abbreviation(): 1121 | return "C2" 1122 | 1123 | @staticmethod 1124 | def desc(): 1125 | return "Transfer completes under moderate packet corruption." 1126 | 1127 | @staticmethod 1128 | def scenario() -> str: 1129 | """Scenario for the ns3 simulator""" 1130 | return "corrupt-rate --delay=15ms --bandwidth=10Mbps --queue=25 --rate_to_server=2 --rate_to_client=2 --burst_to_server=3 --burst_to_client=3" 1131 | 1132 | 1133 | class TestCaseECN(TestCaseHandshake): 1134 | @staticmethod 1135 | def name(): 1136 | return "ecn" 1137 | 1138 | @staticmethod 1139 | def abbreviation(): 1140 | return "E" 1141 | 1142 | def get_paths(self): 1143 | # Transfer a bit more data, so that QUIC implementations that do ECN validation after the 1144 | # handshake have a chance to ACK some ECN marked packets. 1145 | self._files = [self._generate_random_file(100 * KB)] 1146 | return self._files 1147 | 1148 | def _count_ecn(self, tr): 1149 | ecn = [0] * (max(ECN) + 1) 1150 | for p in tr: 1151 | e = int(getattr(p["ip"], "dsfield.ecn")) 1152 | ecn[e] += 1 1153 | for e in ECN: 1154 | logging.debug("%s %d", e, ecn[e]) 1155 | return ecn 1156 | 1157 | def _check_ecn_any(self, e) -> bool: 1158 | return e[ECN.ECT0] != 0 or e[ECN.ECT1] != 0 1159 | 1160 | def _check_ecn_marks(self, e) -> bool: 1161 | return e[ECN.CE] == 0 and self._check_ecn_any(e) 1162 | 1163 | def _check_ack_ecn(self, tr) -> bool: 1164 | # NOTE: We only check whether the trace contains any ACK-ECN information, not whether it is valid 1165 | for p in tr: 1166 | if hasattr(p["quic"], "ack.ect0_count"): 1167 | return True 1168 | return False 1169 | 1170 | def check(self) -> TestResult: 1171 | super().check() 1172 | if not self._keylog_file(): 1173 | logging.info("Can't check test result. SSLKEYLOG required.") 1174 | return TestResult.UNSUPPORTED 1175 | 1176 | result = super(TestCaseECN, self).check() 1177 | if result != TestResult.SUCCEEDED: 1178 | return result 1179 | 1180 | tr_client = self._client_trace()._get_packets( 1181 | self._client_trace()._get_direction_filter(Direction.FROM_CLIENT) + " quic" 1182 | ) 1183 | ecn = self._count_ecn(tr_client) 1184 | ecn_client_any_marked = self._check_ecn_any(ecn) 1185 | ecn_client_all_ok = self._check_ecn_marks(ecn) 1186 | ack_ecn_client_ok = self._check_ack_ecn(tr_client) 1187 | 1188 | tr_server = self._server_trace()._get_packets( 1189 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) + " quic" 1190 | ) 1191 | ecn = self._count_ecn(tr_server) 1192 | ecn_server_any_marked = self._check_ecn_any(ecn) 1193 | ecn_server_all_ok = self._check_ecn_marks(ecn) 1194 | ack_ecn_server_ok = self._check_ack_ecn(tr_server) 1195 | 1196 | if ecn_client_any_marked is False: 1197 | logging.info("Client did not mark any packets ECT(0) or ECT(1)") 1198 | else: 1199 | if ack_ecn_server_ok is False: 1200 | logging.info("Server did not send any ACK-ECN frames") 1201 | elif ecn_client_all_ok is False: 1202 | logging.info( 1203 | "Not all client packets were consistently marked with ECT(0) or ECT(1)" 1204 | ) 1205 | 1206 | if ecn_server_any_marked is False: 1207 | logging.info("Server did not mark any packets ECT(0) or ECT(1)") 1208 | else: 1209 | if ack_ecn_client_ok is False: 1210 | logging.info("Client did not send any ACK-ECN frames") 1211 | elif ecn_server_all_ok is False: 1212 | logging.info( 1213 | "Not all server packets were consistently marked with ECT(0) or ECT(1)" 1214 | ) 1215 | 1216 | if ( 1217 | ecn_client_all_ok 1218 | and ecn_server_all_ok 1219 | and ack_ecn_client_ok 1220 | and ack_ecn_server_ok 1221 | ): 1222 | return TestResult.SUCCEEDED 1223 | return TestResult.FAILED 1224 | 1225 | 1226 | class TestCasePortRebinding(TestCaseTransfer): 1227 | @staticmethod 1228 | def name(): 1229 | return "rebind-port" 1230 | 1231 | @staticmethod 1232 | def abbreviation(): 1233 | return "BP" 1234 | 1235 | @staticmethod 1236 | def testname(p: Perspective): 1237 | return "transfer" 1238 | 1239 | @staticmethod 1240 | def desc(): 1241 | return "Transfer completes under frequent port rebindings on the client side." 1242 | 1243 | def get_paths(self): 1244 | self._files = [ 1245 | self._generate_random_file(10 * MB), 1246 | ] 1247 | return self._files 1248 | 1249 | @staticmethod 1250 | def scenario() -> str: 1251 | """Scenario for the ns3 simulator""" 1252 | return "rebind --delay=15ms --bandwidth=10Mbps --queue=25 --first-rebind=1s --rebind-freq=5s" 1253 | 1254 | @staticmethod 1255 | def _addr(p: List, which: str) -> str: 1256 | return ( 1257 | getattr(p["ipv6"], which) 1258 | if "IPV6" in str(p.layers) 1259 | else getattr(p["ip"], which) 1260 | ) 1261 | 1262 | @staticmethod 1263 | def _path(p: List) -> Tuple[str, int, str, int]: 1264 | return ( 1265 | (TestCasePortRebinding._addr(p, "src"), int(getattr(p["udp"], "srcport"))), 1266 | (TestCasePortRebinding._addr(p, "dst"), int(getattr(p["udp"], "dstport"))), 1267 | ) 1268 | 1269 | def check(self) -> TestResult: 1270 | super().check() 1271 | if not self._keylog_file(): 1272 | logging.info("Can't check test result. SSLKEYLOG required.") 1273 | return TestResult.UNSUPPORTED 1274 | 1275 | result = super(TestCasePortRebinding, self).check() 1276 | if result != TestResult.SUCCEEDED: 1277 | return result 1278 | 1279 | tr_server = self._server_trace()._get_packets( 1280 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) + " quic" 1281 | ) 1282 | 1283 | cur = None 1284 | last = None 1285 | paths = set() 1286 | challenges = set() 1287 | for p in tr_server: 1288 | cur = self._path(p) 1289 | if last is None: 1290 | last = cur 1291 | continue 1292 | 1293 | if last != cur and cur not in paths: 1294 | paths.add(last) 1295 | last = cur 1296 | # Packet on new path, should have a PATH_CHALLENGE frame 1297 | if hasattr(p["quic"], "path_challenge.data") is False: 1298 | logging.info( 1299 | "First server packet on new path %s did not contain a PATH_CHALLENGE frame", 1300 | cur, 1301 | ) 1302 | logging.info(p["quic"]) 1303 | return TestResult.FAILED 1304 | else: 1305 | challenges.add(getattr(p["quic"], "path_challenge.data")) 1306 | paths.add(cur) 1307 | 1308 | logging.info("Server saw these paths used: %s", paths) 1309 | if len(paths) <= 1: 1310 | logging.info("Server saw only a single path in use; test broken?") 1311 | return TestResult.FAILED 1312 | 1313 | tr_client = self._client_trace()._get_packets( 1314 | self._client_trace()._get_direction_filter(Direction.FROM_CLIENT) + " quic" 1315 | ) 1316 | 1317 | responses = list( 1318 | set( 1319 | getattr(p["quic"], "path_response.data") 1320 | for p in tr_client 1321 | if hasattr(p["quic"], "path_response.data") 1322 | ) 1323 | ) 1324 | 1325 | unresponded = [c for c in challenges if c not in responses] 1326 | if unresponded != []: 1327 | logging.info("PATH_CHALLENGE without a PATH_RESPONSE: %s", unresponded) 1328 | return TestResult.FAILED 1329 | 1330 | return TestResult.SUCCEEDED 1331 | 1332 | 1333 | class TestCaseAddressRebinding(TestCasePortRebinding): 1334 | @staticmethod 1335 | def name(): 1336 | return "rebind-addr" 1337 | 1338 | @staticmethod 1339 | def abbreviation(): 1340 | return "BA" 1341 | 1342 | @staticmethod 1343 | def testname(p: Perspective): 1344 | return "transfer" 1345 | 1346 | @staticmethod 1347 | def desc(): 1348 | return "Transfer completes under frequent IP address and port rebindings on the client side." 1349 | 1350 | @staticmethod 1351 | def scenario() -> str: 1352 | """Scenario for the ns3 simulator""" 1353 | return ( 1354 | super(TestCaseAddressRebinding, TestCaseAddressRebinding).scenario() 1355 | + " --rebind-addr" 1356 | ) 1357 | 1358 | def check(self) -> TestResult: 1359 | super().check() 1360 | if not self._keylog_file(): 1361 | logging.info("Can't check test result. SSLKEYLOG required.") 1362 | return TestResult.UNSUPPORTED 1363 | 1364 | tr_server = self._server_trace()._get_packets( 1365 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) + " quic" 1366 | ) 1367 | 1368 | ips = set() 1369 | for p in tr_server: 1370 | ip_vers = "ip" 1371 | if "IPV6" in str(p.layers): 1372 | ip_vers = "ipv6" 1373 | ips.add(getattr(p[ip_vers], "dst")) 1374 | 1375 | logging.info("Server saw these client addresses: %s", ips) 1376 | if len(ips) <= 1: 1377 | logging.info( 1378 | "Server saw only a single client IP address in use; test broken?" 1379 | ) 1380 | return TestResult.FAILED 1381 | 1382 | result = super(TestCaseAddressRebinding, self).check() 1383 | if result != TestResult.SUCCEEDED: 1384 | return result 1385 | 1386 | return TestResult.SUCCEEDED 1387 | 1388 | 1389 | class TestCaseIPv6(TestCaseTransfer): 1390 | @staticmethod 1391 | def name(): 1392 | return "ipv6" 1393 | 1394 | @staticmethod 1395 | def abbreviation(): 1396 | return "6" 1397 | 1398 | @staticmethod 1399 | def testname(p: Perspective): 1400 | return "transfer" 1401 | 1402 | @staticmethod 1403 | def urlprefix() -> str: 1404 | return "https://server6:443/" 1405 | 1406 | @staticmethod 1407 | def desc(): 1408 | return "A transfer across an IPv6-only network succeeded." 1409 | 1410 | def get_paths(self): 1411 | self._files = [ 1412 | self._generate_random_file(5 * KB), 1413 | self._generate_random_file(10 * KB), 1414 | ] 1415 | return self._files 1416 | 1417 | def check(self) -> TestResult: 1418 | super().check() 1419 | result = super(TestCaseIPv6, self).check() 1420 | if result != TestResult.SUCCEEDED: 1421 | return result 1422 | 1423 | tr_server = self._server_trace()._get_packets( 1424 | self._server_trace()._get_direction_filter(Direction.FROM_SERVER) 1425 | + " quic && ip" 1426 | ) 1427 | 1428 | if tr_server: 1429 | logging.info("Packet trace contains %s IPv4 packets.", len(tr_server)) 1430 | return TestResult.FAILED 1431 | return TestResult.SUCCEEDED 1432 | 1433 | 1434 | class TestCaseConnectionMigration(TestCasePortRebinding): 1435 | @staticmethod 1436 | def name(): 1437 | return "connectionmigration" 1438 | 1439 | @staticmethod 1440 | def abbreviation(): 1441 | return "CM" 1442 | 1443 | @staticmethod 1444 | def testname(p: Perspective): 1445 | if p is Perspective.SERVER: 1446 | # Server needs to send preferred addresses 1447 | return "connectionmigration" 1448 | return "transfer" 1449 | 1450 | @staticmethod 1451 | def desc(): 1452 | return "A transfer succeeded during which the client performed an active migration." 1453 | 1454 | @staticmethod 1455 | def scenario() -> str: 1456 | return super(TestCaseTransfer, TestCaseTransfer).scenario() 1457 | 1458 | @staticmethod 1459 | def urlprefix() -> str: 1460 | """URL prefix""" 1461 | return "https://server46:443/" 1462 | 1463 | def get_paths(self): 1464 | self._files = [ 1465 | self._generate_random_file(2 * MB), 1466 | ] 1467 | return self._files 1468 | 1469 | def check(self) -> TestResult: 1470 | super().check() 1471 | # The parent check() method ensures that the client changed addresses 1472 | # and that PATH_CHALLENGE/RESPONSE frames were sent and received 1473 | result = super(TestCaseConnectionMigration, self).check() 1474 | if result != TestResult.SUCCEEDED: 1475 | return result 1476 | 1477 | tr_client = self._client_trace()._get_packets( 1478 | self._client_trace()._get_direction_filter(Direction.FROM_CLIENT) + " quic" 1479 | ) 1480 | 1481 | last = None 1482 | paths = set() 1483 | dcid = None 1484 | for p in tr_client: 1485 | cur = self._path(p) 1486 | if last is None: 1487 | last = cur 1488 | dcid = getattr(p["quic"], "dcid") 1489 | continue 1490 | 1491 | if last != cur and cur not in paths: 1492 | paths.add(last) 1493 | last = cur 1494 | # packet to different IP/port, should have a new DCID 1495 | if dcid == getattr(p["quic"], "dcid"): 1496 | logging.info( 1497 | "First client packet during active migration to %s used previous DCID %s", 1498 | cur, 1499 | dcid, 1500 | ) 1501 | logging.info(p["quic"]) 1502 | return TestResult.FAILED 1503 | dcid = getattr(p["quic"], "dcid") 1504 | logging.info( 1505 | "DCID changed to %s during active migration to %s", dcid, cur 1506 | ) 1507 | 1508 | return TestResult.SUCCEEDED 1509 | 1510 | 1511 | class TestCaseV2(TestCase): 1512 | @staticmethod 1513 | def name(): 1514 | return "v2" 1515 | 1516 | @staticmethod 1517 | def abbreviation(): 1518 | return "V2" 1519 | 1520 | @staticmethod 1521 | def desc(): 1522 | return "Server should select QUIC v2 in compatible version negotiation." 1523 | 1524 | def get_paths(self): 1525 | self._files = [self._generate_random_file(1 * KB)] 1526 | return self._files 1527 | 1528 | def check(self) -> TestResult: 1529 | super().check() 1530 | # Client should initially send QUIC v1 packet. It may send 1531 | # QUIC v2 packet. 1532 | versions = self._get_packet_versions( 1533 | self._client_trace().get_initial(Direction.FROM_CLIENT) 1534 | ) 1535 | if QUIC_VERSION not in versions: 1536 | logging.info( 1537 | "Wrong version in client Initial. Expected %s, got %s", 1538 | QUIC_VERSION, 1539 | versions, 1540 | ) 1541 | return TestResult.FAILED 1542 | 1543 | # Server Initial packets should have QUIC v2. It may send 1544 | # QUIC v1 packet before sending CRYPTO frame. 1545 | versions = self._get_packet_versions( 1546 | self._server_trace().get_initial(Direction.FROM_SERVER) 1547 | ) 1548 | if QUIC_V2 not in versions: 1549 | logging.info( 1550 | "Wrong version in server Initial. Expected %s, got %s", 1551 | QUIC_V2, 1552 | versions, 1553 | ) 1554 | return TestResult.FAILED 1555 | 1556 | # Client should use QUIC v2 for all Handshake packets. 1557 | versions = self._get_packet_versions( 1558 | self._client_trace().get_handshake(Direction.FROM_CLIENT) 1559 | ) 1560 | if len(versions) != 1: 1561 | logging.info( 1562 | "Expected exactly one version in client Handshake. Got %s", versions 1563 | ) 1564 | return TestResult.FAILED 1565 | if QUIC_V2 not in versions: 1566 | logging.info( 1567 | "Wrong version in client Handshake. Expected %s, got %s", 1568 | QUIC_V2, 1569 | versions, 1570 | ) 1571 | return TestResult.FAILED 1572 | 1573 | # Server should use QUIC v2 for all Handshake packets. 1574 | versions = self._get_packet_versions( 1575 | self._server_trace().get_handshake(Direction.FROM_SERVER) 1576 | ) 1577 | if len(versions) != 1: 1578 | logging.info( 1579 | "Expected exactly one version in server Handshake. Got %s", versions 1580 | ) 1581 | return TestResult.FAILED 1582 | if QUIC_V2 not in versions: 1583 | logging.info( 1584 | "Wrong version in server Handshake. Expected %s, got %s", 1585 | QUIC_V2, 1586 | versions, 1587 | ) 1588 | return TestResult.FAILED 1589 | 1590 | if not self._check_files(): 1591 | return TestResult.FAILED 1592 | 1593 | return TestResult.SUCCEEDED 1594 | 1595 | def _get_packet_versions(self, packets: List) -> set: 1596 | """Get a set of QUIC versions from packets.""" 1597 | return set([hex(int(p.version, 0)) for p in packets]) 1598 | 1599 | 1600 | class MeasurementGoodput(Measurement): 1601 | FILESIZE = 10 * MB 1602 | _result = 0.0 1603 | 1604 | @staticmethod 1605 | def name(): 1606 | return "goodput" 1607 | 1608 | @staticmethod 1609 | def unit() -> str: 1610 | return "kbps" 1611 | 1612 | @staticmethod 1613 | def testname(p: Perspective): 1614 | return "transfer" 1615 | 1616 | @staticmethod 1617 | def abbreviation(): 1618 | return "G" 1619 | 1620 | @staticmethod 1621 | def desc(): 1622 | return "Measures connection goodput over a 10Mbps link." 1623 | 1624 | @staticmethod 1625 | def repetitions() -> int: 1626 | return 5 1627 | 1628 | def get_paths(self): 1629 | self._files = [self._generate_random_file(self.FILESIZE)] 1630 | return self._files 1631 | 1632 | def check(self) -> TestResult: 1633 | super().check() 1634 | num_handshakes = self._count_handshakes() 1635 | if num_handshakes != 1: 1636 | logging.info("Expected exactly 1 handshake. Got: %d", num_handshakes) 1637 | return TestResult.FAILED 1638 | if not self._check_version_and_files(): 1639 | return TestResult.FAILED 1640 | 1641 | packets, first, last = self._client_trace().get_1rtt_sniff_times( 1642 | Direction.FROM_SERVER 1643 | ) 1644 | 1645 | if last - first == 0: 1646 | return TestResult.FAILED 1647 | time = (last - first) / timedelta(milliseconds=1) 1648 | goodput = (8 * self.FILESIZE) / time 1649 | logging.debug( 1650 | "Transfering %d MB took %d ms. Goodput: %d kbps", 1651 | self.FILESIZE / MB, 1652 | time, 1653 | goodput, 1654 | ) 1655 | self._result = goodput 1656 | return TestResult.SUCCEEDED 1657 | 1658 | def result(self) -> float: 1659 | return self._result 1660 | 1661 | 1662 | class MeasurementCrossTraffic(MeasurementGoodput): 1663 | FILESIZE = 25 * MB 1664 | 1665 | @staticmethod 1666 | def name(): 1667 | return "crosstraffic" 1668 | 1669 | @staticmethod 1670 | def abbreviation(): 1671 | return "C" 1672 | 1673 | @staticmethod 1674 | def desc(): 1675 | return "Measures goodput over a 10Mbps link when competing with a TCP (cubic) connection." 1676 | 1677 | @staticmethod 1678 | def timeout() -> int: 1679 | return 180 1680 | 1681 | @staticmethod 1682 | def additional_envs() -> List[str]: 1683 | return ["IPERF_CONGESTION=cubic"] 1684 | 1685 | @staticmethod 1686 | def additional_containers() -> List[str]: 1687 | return ["iperf_server", "iperf_client"] 1688 | 1689 | 1690 | TESTCASES = [ 1691 | TestCaseHandshake, 1692 | TestCaseTransfer, 1693 | TestCaseLongRTT, 1694 | TestCaseChaCha20, 1695 | TestCaseMultiplexing, 1696 | TestCaseRetry, 1697 | TestCaseResumption, 1698 | TestCaseZeroRTT, 1699 | TestCaseHTTP3, 1700 | TestCaseBlackhole, 1701 | TestCaseKeyUpdate, 1702 | TestCaseECN, 1703 | TestCaseAmplificationLimit, 1704 | TestCaseHandshakeLoss, 1705 | TestCaseTransferLoss, 1706 | TestCaseHandshakeCorruption, 1707 | TestCaseTransferCorruption, 1708 | TestCaseIPv6, 1709 | TestCaseV2, 1710 | TestCasePortRebinding, 1711 | TestCaseAddressRebinding, 1712 | TestCaseConnectionMigration, 1713 | ] 1714 | 1715 | MEASUREMENTS = [ 1716 | MeasurementGoodput, 1717 | MeasurementCrossTraffic, 1718 | ] 1719 | --------------------------------------------------------------------------------